1/*
2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include "ipoib.h"
36
37#include <linux/module.h>
38
39#include <linux/init.h>
40#include <linux/slab.h>
41#include <linux/kernel.h>
42#include <linux/vmalloc.h>
43
44#include <linux/if_arp.h>	/* For ARPHRD_xxx */
45
46#include <linux/ip.h>
47#include <linux/in.h>
48
49#include <linux/jhash.h>
50#include <net/arp.h>
51#include <net/addrconf.h>
52#include <linux/inetdevice.h>
53#include <rdma/ib_cache.h>
54
55MODULE_AUTHOR("Roland Dreier");
56MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
57MODULE_LICENSE("Dual BSD/GPL");
58
59int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
60int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
61
62module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
63MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
64module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
65MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
66
67#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
68int ipoib_debug_level;
69
70module_param_named(debug_level, ipoib_debug_level, int, 0644);
71MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
72#endif
73
74struct ipoib_path_iter {
75	struct net_device *dev;
76	struct ipoib_path  path;
77};
78
79static const u8 ipv4_bcast_addr[] = {
80	0x00, 0xff, 0xff, 0xff,
81	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
82	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
83};
84
85struct workqueue_struct *ipoib_workqueue;
86
87struct ib_sa_client ipoib_sa_client;
88
89static int ipoib_add_one(struct ib_device *device);
90static void ipoib_remove_one(struct ib_device *device, void *client_data);
91static void ipoib_neigh_reclaim(struct rcu_head *rp);
92static struct net_device *ipoib_get_net_dev_by_params(
93		struct ib_device *dev, u32 port, u16 pkey,
94		const union ib_gid *gid, const struct sockaddr *addr,
95		void *client_data);
96static int ipoib_set_mac(struct net_device *dev, void *addr);
97static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
98		       int cmd);
99
100static struct ib_client ipoib_client = {
101	.name   = "ipoib",
102	.add    = ipoib_add_one,
103	.remove = ipoib_remove_one,
104	.get_net_dev_by_params = ipoib_get_net_dev_by_params,
105};
106
107#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
108static int ipoib_netdev_event(struct notifier_block *this,
109			      unsigned long event, void *ptr)
110{
111	struct netdev_notifier_info *ni = ptr;
112	struct net_device *dev = ni->dev;
113
114	if (dev->netdev_ops->ndo_open != ipoib_open)
115		return NOTIFY_DONE;
116
117	switch (event) {
118	case NETDEV_REGISTER:
119		ipoib_create_debug_files(dev);
120		break;
121	case NETDEV_CHANGENAME:
122		ipoib_delete_debug_files(dev);
123		ipoib_create_debug_files(dev);
124		break;
125	case NETDEV_UNREGISTER:
126		ipoib_delete_debug_files(dev);
127		break;
128	}
129
130	return NOTIFY_DONE;
131}
132#endif
133
134int ipoib_open(struct net_device *dev)
135{
136	struct ipoib_dev_priv *priv = ipoib_priv(dev);
137
138	ipoib_dbg(priv, "bringing up interface\n");
139
140	netif_carrier_off(dev);
141
142	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
143
144	if (ipoib_ib_dev_open(dev)) {
145		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
146			return 0;
147		goto err_disable;
148	}
149
150	ipoib_ib_dev_up(dev);
151
152	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
153		struct ipoib_dev_priv *cpriv;
154
155		/* Bring up any child interfaces too */
156		down_read(&priv->vlan_rwsem);
157		list_for_each_entry(cpriv, &priv->child_intfs, list) {
158			int flags;
159
160			flags = cpriv->dev->flags;
161			if (flags & IFF_UP)
162				continue;
163
164			dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
165		}
166		up_read(&priv->vlan_rwsem);
167	} else if (priv->parent) {
168		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
169
170		if (!test_bit(IPOIB_FLAG_ADMIN_UP, &ppriv->flags))
171			ipoib_dbg(priv, "parent device %s is not up, so child device may be not functioning.\n",
172				  ppriv->dev->name);
173	}
174	netif_start_queue(dev);
175
176	return 0;
177
178err_disable:
179	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
180
181	return -EINVAL;
182}
183
184static int ipoib_stop(struct net_device *dev)
185{
186	struct ipoib_dev_priv *priv = ipoib_priv(dev);
187
188	ipoib_dbg(priv, "stopping interface\n");
189
190	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
191
192	netif_stop_queue(dev);
193
194	ipoib_ib_dev_down(dev);
195	ipoib_ib_dev_stop(dev);
196
197	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
198		struct ipoib_dev_priv *cpriv;
199
200		/* Bring down any child interfaces too */
201		down_read(&priv->vlan_rwsem);
202		list_for_each_entry(cpriv, &priv->child_intfs, list) {
203			int flags;
204
205			flags = cpriv->dev->flags;
206			if (!(flags & IFF_UP))
207				continue;
208
209			dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
210		}
211		up_read(&priv->vlan_rwsem);
212	}
213
214	return 0;
215}
216
217static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
218{
219	struct ipoib_dev_priv *priv = ipoib_priv(dev);
220
221	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
222		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
223
224	return features;
225}
226
227static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
228{
229	struct ipoib_dev_priv *priv = ipoib_priv(dev);
230	int ret = 0;
231
232	/* dev->mtu > 2K ==> connected mode */
233	if (ipoib_cm_admin_enabled(dev)) {
234		if (new_mtu > ipoib_cm_max_mtu(dev))
235			return -EINVAL;
236
237		if (new_mtu > priv->mcast_mtu)
238			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
239				   priv->mcast_mtu);
240
241		dev->mtu = new_mtu;
242		return 0;
243	}
244
245	if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
246	    new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
247		return -EINVAL;
248
249	priv->admin_mtu = new_mtu;
250
251	if (priv->mcast_mtu < priv->admin_mtu)
252		ipoib_dbg(priv, "MTU must be smaller than the underlying "
253				"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
254
255	new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
256
257	if (priv->rn_ops->ndo_change_mtu) {
258		bool carrier_status = netif_carrier_ok(dev);
259
260		netif_carrier_off(dev);
261
262		/* notify lower level on the real mtu */
263		ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
264
265		if (carrier_status)
266			netif_carrier_on(dev);
267	} else {
268		dev->mtu = new_mtu;
269	}
270
271	return ret;
272}
273
274static void ipoib_get_stats(struct net_device *dev,
275			    struct rtnl_link_stats64 *stats)
276{
277	struct ipoib_dev_priv *priv = ipoib_priv(dev);
278
279	if (priv->rn_ops->ndo_get_stats64)
280		priv->rn_ops->ndo_get_stats64(dev, stats);
281	else
282		netdev_stats_to_stats64(stats, &dev->stats);
283}
284
285/* Called with an RCU read lock taken */
286static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
287					struct net_device *dev)
288{
289	struct net *net = dev_net(dev);
290	struct in_device *in_dev;
291	struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
292	struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
293	__be32 ret_addr;
294
295	switch (addr->sa_family) {
296	case AF_INET:
297		in_dev = in_dev_get(dev);
298		if (!in_dev)
299			return false;
300
301		ret_addr = inet_confirm_addr(net, in_dev, 0,
302					     addr_in->sin_addr.s_addr,
303					     RT_SCOPE_HOST);
304		in_dev_put(in_dev);
305		if (ret_addr)
306			return true;
307
308		break;
309	case AF_INET6:
310		if (IS_ENABLED(CONFIG_IPV6) &&
311		    ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
312			return true;
313
314		break;
315	}
316	return false;
317}
318
319/*
320 * Find the master net_device on top of the given net_device.
321 * @dev: base IPoIB net_device
322 *
323 * Returns the master net_device with a reference held, or the same net_device
324 * if no master exists.
325 */
326static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
327{
328	struct net_device *master;
329
330	rcu_read_lock();
331	master = netdev_master_upper_dev_get_rcu(dev);
332	if (master)
333		dev_hold(master);
334	rcu_read_unlock();
335
336	if (master)
337		return master;
338
339	dev_hold(dev);
340	return dev;
341}
342
343struct ipoib_walk_data {
344	const struct sockaddr *addr;
345	struct net_device *result;
346};
347
348static int ipoib_upper_walk(struct net_device *upper,
349			    struct netdev_nested_priv *priv)
350{
351	struct ipoib_walk_data *data = (struct ipoib_walk_data *)priv->data;
352	int ret = 0;
353
354	if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
355		dev_hold(upper);
356		data->result = upper;
357		ret = 1;
358	}
359
360	return ret;
361}
362
363/**
364 * ipoib_get_net_dev_match_addr - Find a net_device matching
365 * the given address, which is an upper device of the given net_device.
366 *
367 * @addr: IP address to look for.
368 * @dev: base IPoIB net_device
369 *
370 * If found, returns the net_device with a reference held. Otherwise return
371 * NULL.
372 */
373static struct net_device *ipoib_get_net_dev_match_addr(
374		const struct sockaddr *addr, struct net_device *dev)
375{
376	struct netdev_nested_priv priv;
377	struct ipoib_walk_data data = {
378		.addr = addr,
379	};
380
381	priv.data = (void *)&data;
382	rcu_read_lock();
383	if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
384		dev_hold(dev);
385		data.result = dev;
386		goto out;
387	}
388
389	netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &priv);
390out:
391	rcu_read_unlock();
392	return data.result;
393}
394
395/* returns the number of IPoIB netdevs on top a given ipoib device matching a
396 * pkey_index and address, if one exists.
397 *
398 * @found_net_dev: contains a matching net_device if the return value >= 1,
399 * with a reference held. */
400static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
401				     const union ib_gid *gid,
402				     u16 pkey_index,
403				     const struct sockaddr *addr,
404				     int nesting,
405				     struct net_device **found_net_dev)
406{
407	struct ipoib_dev_priv *child_priv;
408	struct net_device *net_dev = NULL;
409	int matches = 0;
410
411	if (priv->pkey_index == pkey_index &&
412	    (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
413		if (!addr) {
414			net_dev = ipoib_get_master_net_dev(priv->dev);
415		} else {
416			/* Verify the net_device matches the IP address, as
417			 * IPoIB child devices currently share a GID. */
418			net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
419		}
420		if (net_dev) {
421			if (!*found_net_dev)
422				*found_net_dev = net_dev;
423			else
424				dev_put(net_dev);
425			++matches;
426		}
427	}
428
429	/* Check child interfaces */
430	down_read_nested(&priv->vlan_rwsem, nesting);
431	list_for_each_entry(child_priv, &priv->child_intfs, list) {
432		matches += ipoib_match_gid_pkey_addr(child_priv, gid,
433						    pkey_index, addr,
434						    nesting + 1,
435						    found_net_dev);
436		if (matches > 1)
437			break;
438	}
439	up_read(&priv->vlan_rwsem);
440
441	return matches;
442}
443
444/* Returns the number of matching net_devs found (between 0 and 2). Also
445 * return the matching net_device in the @net_dev parameter, holding a
446 * reference to the net_device, if the number of matches >= 1 */
447static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port,
448					 u16 pkey_index,
449					 const union ib_gid *gid,
450					 const struct sockaddr *addr,
451					 struct net_device **net_dev)
452{
453	struct ipoib_dev_priv *priv;
454	int matches = 0;
455
456	*net_dev = NULL;
457
458	list_for_each_entry(priv, dev_list, list) {
459		if (priv->port != port)
460			continue;
461
462		matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
463						     addr, 0, net_dev);
464		if (matches > 1)
465			break;
466	}
467
468	return matches;
469}
470
471static struct net_device *ipoib_get_net_dev_by_params(
472		struct ib_device *dev, u32 port, u16 pkey,
473		const union ib_gid *gid, const struct sockaddr *addr,
474		void *client_data)
475{
476	struct net_device *net_dev;
477	struct list_head *dev_list = client_data;
478	u16 pkey_index;
479	int matches;
480	int ret;
481
482	if (!rdma_protocol_ib(dev, port))
483		return NULL;
484
485	ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
486	if (ret)
487		return NULL;
488
489	/* See if we can find a unique device matching the L2 parameters */
490	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
491						gid, NULL, &net_dev);
492
493	switch (matches) {
494	case 0:
495		return NULL;
496	case 1:
497		return net_dev;
498	}
499
500	dev_put(net_dev);
501
502	/* Couldn't find a unique device with L2 parameters only. Use L3
503	 * address to uniquely match the net device */
504	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
505						gid, addr, &net_dev);
506	switch (matches) {
507	case 0:
508		return NULL;
509	default:
510		dev_warn_ratelimited(&dev->dev,
511				     "duplicate IP address detected\n");
512		fallthrough;
513	case 1:
514		return net_dev;
515	}
516}
517
518int ipoib_set_mode(struct net_device *dev, const char *buf)
519{
520	struct ipoib_dev_priv *priv = ipoib_priv(dev);
521
522	if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
523	     !strcmp(buf, "connected\n")) ||
524	     (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
525	     !strcmp(buf, "datagram\n"))) {
526		return 0;
527	}
528
529	/* flush paths if we switch modes so that connections are restarted */
530	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
531		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
532		ipoib_warn(priv, "enabling connected mode "
533			   "will cause multicast packet drops\n");
534		netdev_update_features(dev);
535		dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
536		netif_set_real_num_tx_queues(dev, 1);
537		rtnl_unlock();
538		priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
539
540		ipoib_flush_paths(dev);
541		return (!rtnl_trylock()) ? -EBUSY : 0;
542	}
543
544	if (!strcmp(buf, "datagram\n")) {
545		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
546		netdev_update_features(dev);
547		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
548		netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
549		rtnl_unlock();
550		ipoib_flush_paths(dev);
551		return (!rtnl_trylock()) ? -EBUSY : 0;
552	}
553
554	return -EINVAL;
555}
556
557struct ipoib_path *__path_find(struct net_device *dev, void *gid)
558{
559	struct ipoib_dev_priv *priv = ipoib_priv(dev);
560	struct rb_node *n = priv->path_tree.rb_node;
561	struct ipoib_path *path;
562	int ret;
563
564	while (n) {
565		path = rb_entry(n, struct ipoib_path, rb_node);
566
567		ret = memcmp(gid, path->pathrec.dgid.raw,
568			     sizeof (union ib_gid));
569
570		if (ret < 0)
571			n = n->rb_left;
572		else if (ret > 0)
573			n = n->rb_right;
574		else
575			return path;
576	}
577
578	return NULL;
579}
580
581static int __path_add(struct net_device *dev, struct ipoib_path *path)
582{
583	struct ipoib_dev_priv *priv = ipoib_priv(dev);
584	struct rb_node **n = &priv->path_tree.rb_node;
585	struct rb_node *pn = NULL;
586	struct ipoib_path *tpath;
587	int ret;
588
589	while (*n) {
590		pn = *n;
591		tpath = rb_entry(pn, struct ipoib_path, rb_node);
592
593		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
594			     sizeof (union ib_gid));
595		if (ret < 0)
596			n = &pn->rb_left;
597		else if (ret > 0)
598			n = &pn->rb_right;
599		else
600			return -EEXIST;
601	}
602
603	rb_link_node(&path->rb_node, pn, n);
604	rb_insert_color(&path->rb_node, &priv->path_tree);
605
606	list_add_tail(&path->list, &priv->path_list);
607
608	return 0;
609}
610
611static void path_free(struct net_device *dev, struct ipoib_path *path)
612{
613	struct sk_buff *skb;
614
615	while ((skb = __skb_dequeue(&path->queue)))
616		dev_kfree_skb_irq(skb);
617
618	ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
619
620	/* remove all neigh connected to this path */
621	ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
622
623	if (path->ah)
624		ipoib_put_ah(path->ah);
625
626	kfree(path);
627}
628
629#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
630
631struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
632{
633	struct ipoib_path_iter *iter;
634
635	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
636	if (!iter)
637		return NULL;
638
639	iter->dev = dev;
640	memset(iter->path.pathrec.dgid.raw, 0, 16);
641
642	if (ipoib_path_iter_next(iter)) {
643		kfree(iter);
644		return NULL;
645	}
646
647	return iter;
648}
649
650int ipoib_path_iter_next(struct ipoib_path_iter *iter)
651{
652	struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
653	struct rb_node *n;
654	struct ipoib_path *path;
655	int ret = 1;
656
657	spin_lock_irq(&priv->lock);
658
659	n = rb_first(&priv->path_tree);
660
661	while (n) {
662		path = rb_entry(n, struct ipoib_path, rb_node);
663
664		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
665			   sizeof (union ib_gid)) < 0) {
666			iter->path = *path;
667			ret = 0;
668			break;
669		}
670
671		n = rb_next(n);
672	}
673
674	spin_unlock_irq(&priv->lock);
675
676	return ret;
677}
678
679void ipoib_path_iter_read(struct ipoib_path_iter *iter,
680			  struct ipoib_path *path)
681{
682	*path = iter->path;
683}
684
685#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
686
687void ipoib_mark_paths_invalid(struct net_device *dev)
688{
689	struct ipoib_dev_priv *priv = ipoib_priv(dev);
690	struct ipoib_path *path, *tp;
691
692	spin_lock_irq(&priv->lock);
693
694	list_for_each_entry_safe(path, tp, &priv->path_list, list) {
695		ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
696			  be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
697			  path->pathrec.dgid.raw);
698		if (path->ah)
699			path->ah->valid = 0;
700	}
701
702	spin_unlock_irq(&priv->lock);
703}
704
705static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
706{
707	struct ipoib_pseudo_header *phdr;
708
709	phdr = skb_push(skb, sizeof(*phdr));
710	memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
711}
712
713void ipoib_flush_paths(struct net_device *dev)
714{
715	struct ipoib_dev_priv *priv = ipoib_priv(dev);
716	struct ipoib_path *path, *tp;
717	LIST_HEAD(remove_list);
718	unsigned long flags;
719
720	netif_tx_lock_bh(dev);
721	spin_lock_irqsave(&priv->lock, flags);
722
723	list_splice_init(&priv->path_list, &remove_list);
724
725	list_for_each_entry(path, &remove_list, list)
726		rb_erase(&path->rb_node, &priv->path_tree);
727
728	list_for_each_entry_safe(path, tp, &remove_list, list) {
729		if (path->query)
730			ib_sa_cancel_query(path->query_id, path->query);
731		spin_unlock_irqrestore(&priv->lock, flags);
732		netif_tx_unlock_bh(dev);
733		wait_for_completion(&path->done);
734		path_free(dev, path);
735		netif_tx_lock_bh(dev);
736		spin_lock_irqsave(&priv->lock, flags);
737	}
738
739	spin_unlock_irqrestore(&priv->lock, flags);
740	netif_tx_unlock_bh(dev);
741}
742
743static void path_rec_completion(int status,
744				struct sa_path_rec *pathrec,
745				unsigned int num_prs, void *path_ptr)
746{
747	struct ipoib_path *path = path_ptr;
748	struct net_device *dev = path->dev;
749	struct ipoib_dev_priv *priv = ipoib_priv(dev);
750	struct ipoib_ah *ah = NULL;
751	struct ipoib_ah *old_ah = NULL;
752	struct ipoib_neigh *neigh, *tn;
753	struct sk_buff_head skqueue;
754	struct sk_buff *skb;
755	unsigned long flags;
756
757	if (!status)
758		ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
759			  be32_to_cpu(sa_path_get_dlid(pathrec)),
760			  pathrec->dgid.raw);
761	else
762		ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
763			  status, path->pathrec.dgid.raw);
764
765	skb_queue_head_init(&skqueue);
766
767	if (!status) {
768		struct rdma_ah_attr av;
769
770		if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
771					       pathrec, &av, NULL)) {
772			ah = ipoib_create_ah(dev, priv->pd, &av);
773			rdma_destroy_ah_attr(&av);
774		}
775	}
776
777	spin_lock_irqsave(&priv->lock, flags);
778
779	if (!IS_ERR_OR_NULL(ah)) {
780		/*
781		 * pathrec.dgid is used as the database key from the LLADDR,
782		 * it must remain unchanged even if the SA returns a different
783		 * GID to use in the AH.
784		 */
785		if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
786			   sizeof(union ib_gid))) {
787			ipoib_dbg(
788				priv,
789				"%s got PathRec for gid %pI6 while asked for %pI6\n",
790				dev->name, pathrec->dgid.raw,
791				path->pathrec.dgid.raw);
792			memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
793			       sizeof(union ib_gid));
794		}
795
796		path->pathrec = *pathrec;
797
798		old_ah   = path->ah;
799		path->ah = ah;
800
801		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
802			  ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
803			  pathrec->sl);
804
805		while ((skb = __skb_dequeue(&path->queue)))
806			__skb_queue_tail(&skqueue, skb);
807
808		list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
809			if (neigh->ah) {
810				WARN_ON(neigh->ah != old_ah);
811				/*
812				 * Dropping the ah reference inside
813				 * priv->lock is safe here, because we
814				 * will hold one more reference from
815				 * the original value of path->ah (ie
816				 * old_ah).
817				 */
818				ipoib_put_ah(neigh->ah);
819			}
820			kref_get(&path->ah->ref);
821			neigh->ah = path->ah;
822
823			if (ipoib_cm_enabled(dev, neigh->daddr)) {
824				if (!ipoib_cm_get(neigh))
825					ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
826									       path,
827									       neigh));
828				if (!ipoib_cm_get(neigh)) {
829					ipoib_neigh_free(neigh);
830					continue;
831				}
832			}
833
834			while ((skb = __skb_dequeue(&neigh->queue)))
835				__skb_queue_tail(&skqueue, skb);
836		}
837		path->ah->valid = 1;
838	}
839
840	path->query = NULL;
841	complete(&path->done);
842
843	spin_unlock_irqrestore(&priv->lock, flags);
844
845	if (IS_ERR_OR_NULL(ah))
846		ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
847
848	if (old_ah)
849		ipoib_put_ah(old_ah);
850
851	while ((skb = __skb_dequeue(&skqueue))) {
852		int ret;
853		skb->dev = dev;
854		ret = dev_queue_xmit(skb);
855		if (ret)
856			ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
857				   __func__, ret);
858	}
859}
860
861static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
862			  void *gid)
863{
864	path->dev = priv->dev;
865
866	if (rdma_cap_opa_ah(priv->ca, priv->port))
867		path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
868	else
869		path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
870
871	memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
872	path->pathrec.sgid	    = priv->local_gid;
873	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
874	path->pathrec.numb_path     = 1;
875	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
876}
877
878static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
879{
880	struct ipoib_dev_priv *priv = ipoib_priv(dev);
881	struct ipoib_path *path;
882
883	if (!priv->broadcast)
884		return NULL;
885
886	path = kzalloc(sizeof(*path), GFP_ATOMIC);
887	if (!path)
888		return NULL;
889
890	skb_queue_head_init(&path->queue);
891
892	INIT_LIST_HEAD(&path->neigh_list);
893
894	init_path_rec(priv, path, gid);
895
896	return path;
897}
898
899static int path_rec_start(struct net_device *dev,
900			  struct ipoib_path *path)
901{
902	struct ipoib_dev_priv *priv = ipoib_priv(dev);
903
904	ipoib_dbg(priv, "Start path record lookup for %pI6\n",
905		  path->pathrec.dgid.raw);
906
907	init_completion(&path->done);
908
909	path->query_id =
910		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
911				   &path->pathrec,
912				   IB_SA_PATH_REC_DGID		|
913				   IB_SA_PATH_REC_SGID		|
914				   IB_SA_PATH_REC_NUMB_PATH	|
915				   IB_SA_PATH_REC_TRAFFIC_CLASS |
916				   IB_SA_PATH_REC_PKEY,
917				   1000, GFP_ATOMIC,
918				   path_rec_completion,
919				   path, &path->query);
920	if (path->query_id < 0) {
921		ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
922		path->query = NULL;
923		complete(&path->done);
924		return path->query_id;
925	}
926
927	return 0;
928}
929
930static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
931			       struct net_device *dev)
932{
933	struct ipoib_dev_priv *priv = ipoib_priv(dev);
934	struct ipoib_path *path;
935	unsigned long flags;
936
937	spin_lock_irqsave(&priv->lock, flags);
938
939	path = __path_find(dev, daddr + 4);
940	if (!path)
941		goto out;
942	if (!path->query)
943		path_rec_start(dev, path);
944out:
945	spin_unlock_irqrestore(&priv->lock, flags);
946}
947
948static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
949					  struct net_device *dev)
950{
951	struct ipoib_dev_priv *priv = ipoib_priv(dev);
952	struct rdma_netdev *rn = netdev_priv(dev);
953	struct ipoib_path *path;
954	struct ipoib_neigh *neigh;
955	unsigned long flags;
956
957	spin_lock_irqsave(&priv->lock, flags);
958	neigh = ipoib_neigh_alloc(daddr, dev);
959	if (!neigh) {
960		spin_unlock_irqrestore(&priv->lock, flags);
961		++dev->stats.tx_dropped;
962		dev_kfree_skb_any(skb);
963		return NULL;
964	}
965
966	/* To avoid race condition, make sure that the
967	 * neigh will be added only once.
968	 */
969	if (unlikely(!list_empty(&neigh->list))) {
970		spin_unlock_irqrestore(&priv->lock, flags);
971		return neigh;
972	}
973
974	path = __path_find(dev, daddr + 4);
975	if (!path) {
976		path = path_rec_create(dev, daddr + 4);
977		if (!path)
978			goto err_path;
979
980		__path_add(dev, path);
981	}
982
983	list_add_tail(&neigh->list, &path->neigh_list);
984
985	if (path->ah && path->ah->valid) {
986		kref_get(&path->ah->ref);
987		neigh->ah = path->ah;
988
989		if (ipoib_cm_enabled(dev, neigh->daddr)) {
990			if (!ipoib_cm_get(neigh))
991				ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
992			if (!ipoib_cm_get(neigh)) {
993				ipoib_neigh_free(neigh);
994				goto err_drop;
995			}
996			if (skb_queue_len(&neigh->queue) <
997			    IPOIB_MAX_PATH_REC_QUEUE) {
998				push_pseudo_header(skb, neigh->daddr);
999				__skb_queue_tail(&neigh->queue, skb);
1000			} else {
1001				ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
1002					   skb_queue_len(&neigh->queue));
1003				goto err_drop;
1004			}
1005		} else {
1006			spin_unlock_irqrestore(&priv->lock, flags);
1007			path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1008						       IPOIB_QPN(daddr));
1009			ipoib_neigh_put(neigh);
1010			return NULL;
1011		}
1012	} else {
1013		neigh->ah  = NULL;
1014
1015		if (!path->query && path_rec_start(dev, path))
1016			goto err_path;
1017		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1018			push_pseudo_header(skb, neigh->daddr);
1019			__skb_queue_tail(&neigh->queue, skb);
1020		} else {
1021			goto err_drop;
1022		}
1023	}
1024
1025	spin_unlock_irqrestore(&priv->lock, flags);
1026	ipoib_neigh_put(neigh);
1027	return NULL;
1028
1029err_path:
1030	ipoib_neigh_free(neigh);
1031err_drop:
1032	++dev->stats.tx_dropped;
1033	dev_kfree_skb_any(skb);
1034
1035	spin_unlock_irqrestore(&priv->lock, flags);
1036	ipoib_neigh_put(neigh);
1037
1038	return NULL;
1039}
1040
1041static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1042			     struct ipoib_pseudo_header *phdr)
1043{
1044	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1045	struct rdma_netdev *rn = netdev_priv(dev);
1046	struct ipoib_path *path;
1047	unsigned long flags;
1048
1049	spin_lock_irqsave(&priv->lock, flags);
1050
1051	/* no broadcast means that all paths are (going to be) not valid */
1052	if (!priv->broadcast)
1053		goto drop_and_unlock;
1054
1055	path = __path_find(dev, phdr->hwaddr + 4);
1056	if (!path || !path->ah || !path->ah->valid) {
1057		if (!path) {
1058			path = path_rec_create(dev, phdr->hwaddr + 4);
1059			if (!path)
1060				goto drop_and_unlock;
1061			__path_add(dev, path);
1062		} else {
1063			/*
1064			 * make sure there are no changes in the existing
1065			 * path record
1066			 */
1067			init_path_rec(priv, path, phdr->hwaddr + 4);
1068		}
1069		if (!path->query && path_rec_start(dev, path)) {
1070			goto drop_and_unlock;
1071		}
1072
1073		if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1074			push_pseudo_header(skb, phdr->hwaddr);
1075			__skb_queue_tail(&path->queue, skb);
1076			goto unlock;
1077		} else {
1078			goto drop_and_unlock;
1079		}
1080	}
1081
1082	spin_unlock_irqrestore(&priv->lock, flags);
1083	ipoib_dbg(priv, "Send unicast ARP to %08x\n",
1084		  be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
1085	path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1086				       IPOIB_QPN(phdr->hwaddr));
1087	return;
1088
1089drop_and_unlock:
1090	++dev->stats.tx_dropped;
1091	dev_kfree_skb_any(skb);
1092unlock:
1093	spin_unlock_irqrestore(&priv->lock, flags);
1094}
1095
1096static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1097{
1098	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1099	struct rdma_netdev *rn = netdev_priv(dev);
1100	struct ipoib_neigh *neigh;
1101	struct ipoib_pseudo_header *phdr;
1102	struct ipoib_header *header;
1103	unsigned long flags;
1104
1105	phdr = (struct ipoib_pseudo_header *) skb->data;
1106	skb_pull(skb, sizeof(*phdr));
1107	header = (struct ipoib_header *) skb->data;
1108
1109	if (unlikely(phdr->hwaddr[4] == 0xff)) {
1110		/* multicast, arrange "if" according to probability */
1111		if ((header->proto != htons(ETH_P_IP)) &&
1112		    (header->proto != htons(ETH_P_IPV6)) &&
1113		    (header->proto != htons(ETH_P_ARP)) &&
1114		    (header->proto != htons(ETH_P_RARP)) &&
1115		    (header->proto != htons(ETH_P_TIPC))) {
1116			/* ethertype not supported by IPoIB */
1117			++dev->stats.tx_dropped;
1118			dev_kfree_skb_any(skb);
1119			return NETDEV_TX_OK;
1120		}
1121		/* Add in the P_Key for multicast*/
1122		phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1123		phdr->hwaddr[9] = priv->pkey & 0xff;
1124
1125		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1126		if (likely(neigh))
1127			goto send_using_neigh;
1128		ipoib_mcast_send(dev, phdr->hwaddr, skb);
1129		return NETDEV_TX_OK;
1130	}
1131
1132	/* unicast, arrange "switch" according to probability */
1133	switch (header->proto) {
1134	case htons(ETH_P_IP):
1135	case htons(ETH_P_IPV6):
1136	case htons(ETH_P_TIPC):
1137		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1138		if (unlikely(!neigh)) {
1139			neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1140			if (likely(!neigh))
1141				return NETDEV_TX_OK;
1142		}
1143		break;
1144	case htons(ETH_P_ARP):
1145	case htons(ETH_P_RARP):
1146		/* for unicast ARP and RARP should always perform path find */
1147		unicast_arp_send(skb, dev, phdr);
1148		return NETDEV_TX_OK;
1149	default:
1150		/* ethertype not supported by IPoIB */
1151		++dev->stats.tx_dropped;
1152		dev_kfree_skb_any(skb);
1153		return NETDEV_TX_OK;
1154	}
1155
1156send_using_neigh:
1157	/* note we now hold a ref to neigh */
1158	if (ipoib_cm_get(neigh)) {
1159		if (ipoib_cm_up(neigh)) {
1160			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1161			goto unref;
1162		}
1163	} else if (neigh->ah && neigh->ah->valid) {
1164		neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
1165						IPOIB_QPN(phdr->hwaddr));
1166		goto unref;
1167	} else if (neigh->ah) {
1168		neigh_refresh_path(neigh, phdr->hwaddr, dev);
1169	}
1170
1171	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1172		push_pseudo_header(skb, phdr->hwaddr);
1173		spin_lock_irqsave(&priv->lock, flags);
1174		__skb_queue_tail(&neigh->queue, skb);
1175		spin_unlock_irqrestore(&priv->lock, flags);
1176	} else {
1177		++dev->stats.tx_dropped;
1178		dev_kfree_skb_any(skb);
1179	}
1180
1181unref:
1182	ipoib_neigh_put(neigh);
1183
1184	return NETDEV_TX_OK;
1185}
1186
1187static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
1188{
1189	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1190	struct rdma_netdev *rn = netdev_priv(dev);
1191
1192	if (rn->tx_timeout) {
1193		rn->tx_timeout(dev, txqueue);
1194		return;
1195	}
1196	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1197		   jiffies_to_msecs(jiffies - dev_trans_start(dev)));
1198	ipoib_warn(priv,
1199		   "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
1200		   netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
1201		   priv->global_tx_head, priv->global_tx_tail);
1202
1203
1204	schedule_work(&priv->tx_timeout_work);
1205}
1206
1207void ipoib_ib_tx_timeout_work(struct work_struct *work)
1208{
1209	struct ipoib_dev_priv *priv = container_of(work,
1210						   struct ipoib_dev_priv,
1211						   tx_timeout_work);
1212	int err;
1213
1214	rtnl_lock();
1215
1216	if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1217		goto unlock;
1218
1219	ipoib_stop(priv->dev);
1220	err = ipoib_open(priv->dev);
1221	if (err) {
1222		ipoib_warn(priv, "ipoib_open failed recovering from a tx_timeout, err(%d).\n",
1223				err);
1224		goto unlock;
1225	}
1226
1227	netif_tx_wake_all_queues(priv->dev);
1228unlock:
1229	rtnl_unlock();
1230
1231}
1232
1233static int ipoib_hard_header(struct sk_buff *skb,
1234			     struct net_device *dev,
1235			     unsigned short type,
1236			     const void *daddr,
1237			     const void *saddr,
1238			     unsigned int len)
1239{
1240	struct ipoib_header *header;
1241
1242	header = skb_push(skb, sizeof(*header));
1243
1244	header->proto = htons(type);
1245	header->reserved = 0;
1246
1247	/*
1248	 * we don't rely on dst_entry structure,  always stuff the
1249	 * destination address into skb hard header so we can figure out where
1250	 * to send the packet later.
1251	 */
1252	push_pseudo_header(skb, daddr);
1253
1254	return IPOIB_HARD_LEN;
1255}
1256
1257static void ipoib_set_mcast_list(struct net_device *dev)
1258{
1259	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1260
1261	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1262		ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1263		return;
1264	}
1265
1266	queue_work(priv->wq, &priv->restart_task);
1267}
1268
1269static int ipoib_get_iflink(const struct net_device *dev)
1270{
1271	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1272
1273	/* parent interface */
1274	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1275		return READ_ONCE(dev->ifindex);
1276
1277	/* child/vlan interface */
1278	return READ_ONCE(priv->parent->ifindex);
1279}
1280
1281static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1282{
1283	/*
1284	 * Use only the address parts that contributes to spreading
1285	 * The subnet prefix is not used as one can not connect to
1286	 * same remote port (GUID) using the same remote QPN via two
1287	 * different subnets.
1288	 */
1289	 /* qpn octets[1:4) & port GUID octets[12:20) */
1290	u32 *d32 = (u32 *) daddr;
1291	u32 hv;
1292
1293	hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1294	return hv & htbl->mask;
1295}
1296
1297struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1298{
1299	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1300	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1301	struct ipoib_neigh_hash *htbl;
1302	struct ipoib_neigh *neigh = NULL;
1303	u32 hash_val;
1304
1305	rcu_read_lock_bh();
1306
1307	htbl = rcu_dereference_bh(ntbl->htbl);
1308
1309	if (!htbl)
1310		goto out_unlock;
1311
1312	hash_val = ipoib_addr_hash(htbl, daddr);
1313	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1314	     neigh != NULL;
1315	     neigh = rcu_dereference_bh(neigh->hnext)) {
1316		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1317			/* found, take one ref on behalf of the caller */
1318			if (!refcount_inc_not_zero(&neigh->refcnt)) {
1319				/* deleted */
1320				neigh = NULL;
1321				goto out_unlock;
1322			}
1323
1324			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1325				neigh->alive = jiffies;
1326			goto out_unlock;
1327		}
1328	}
1329
1330out_unlock:
1331	rcu_read_unlock_bh();
1332	return neigh;
1333}
1334
1335static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1336{
1337	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1338	struct ipoib_neigh_hash *htbl;
1339	unsigned long neigh_obsolete;
1340	unsigned long dt;
1341	unsigned long flags;
1342	int i;
1343	LIST_HEAD(remove_list);
1344
1345	spin_lock_irqsave(&priv->lock, flags);
1346
1347	htbl = rcu_dereference_protected(ntbl->htbl,
1348					 lockdep_is_held(&priv->lock));
1349
1350	if (!htbl)
1351		goto out_unlock;
1352
1353	/* neigh is obsolete if it was idle for two GC periods */
1354	dt = 2 * arp_tbl.gc_interval;
1355	neigh_obsolete = jiffies - dt;
1356
1357	for (i = 0; i < htbl->size; i++) {
1358		struct ipoib_neigh *neigh;
1359		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1360
1361		while ((neigh = rcu_dereference_protected(*np,
1362							  lockdep_is_held(&priv->lock))) != NULL) {
1363			/* was the neigh idle for two GC periods */
1364			if (time_after(neigh_obsolete, neigh->alive)) {
1365
1366				ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1367
1368				rcu_assign_pointer(*np,
1369						   rcu_dereference_protected(neigh->hnext,
1370									     lockdep_is_held(&priv->lock)));
1371				/* remove from path/mc list */
1372				list_del_init(&neigh->list);
1373				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1374			} else {
1375				np = &neigh->hnext;
1376			}
1377
1378		}
1379	}
1380
1381out_unlock:
1382	spin_unlock_irqrestore(&priv->lock, flags);
1383	ipoib_mcast_remove_list(&remove_list);
1384}
1385
1386static void ipoib_reap_neigh(struct work_struct *work)
1387{
1388	struct ipoib_dev_priv *priv =
1389		container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1390
1391	__ipoib_reap_neigh(priv);
1392
1393	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1394			   arp_tbl.gc_interval);
1395}
1396
1397
1398static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1399				      struct net_device *dev)
1400{
1401	struct ipoib_neigh *neigh;
1402
1403	neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
1404	if (!neigh)
1405		return NULL;
1406
1407	neigh->dev = dev;
1408	memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1409	skb_queue_head_init(&neigh->queue);
1410	INIT_LIST_HEAD(&neigh->list);
1411	ipoib_cm_set(neigh, NULL);
1412	/* one ref on behalf of the caller */
1413	refcount_set(&neigh->refcnt, 1);
1414
1415	return neigh;
1416}
1417
1418struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1419				      struct net_device *dev)
1420{
1421	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1422	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1423	struct ipoib_neigh_hash *htbl;
1424	struct ipoib_neigh *neigh;
1425	u32 hash_val;
1426
1427	htbl = rcu_dereference_protected(ntbl->htbl,
1428					 lockdep_is_held(&priv->lock));
1429	if (!htbl) {
1430		neigh = NULL;
1431		goto out_unlock;
1432	}
1433
1434	/* need to add a new neigh, but maybe some other thread succeeded?
1435	 * recalc hash, maybe hash resize took place so we do a search
1436	 */
1437	hash_val = ipoib_addr_hash(htbl, daddr);
1438	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1439					       lockdep_is_held(&priv->lock));
1440	     neigh != NULL;
1441	     neigh = rcu_dereference_protected(neigh->hnext,
1442					       lockdep_is_held(&priv->lock))) {
1443		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1444			/* found, take one ref on behalf of the caller */
1445			if (!refcount_inc_not_zero(&neigh->refcnt)) {
1446				/* deleted */
1447				neigh = NULL;
1448				break;
1449			}
1450			neigh->alive = jiffies;
1451			goto out_unlock;
1452		}
1453	}
1454
1455	neigh = ipoib_neigh_ctor(daddr, dev);
1456	if (!neigh)
1457		goto out_unlock;
1458
1459	/* one ref on behalf of the hash table */
1460	refcount_inc(&neigh->refcnt);
1461	neigh->alive = jiffies;
1462	/* put in hash */
1463	rcu_assign_pointer(neigh->hnext,
1464			   rcu_dereference_protected(htbl->buckets[hash_val],
1465						     lockdep_is_held(&priv->lock)));
1466	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1467	atomic_inc(&ntbl->entries);
1468
1469out_unlock:
1470
1471	return neigh;
1472}
1473
1474void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1475{
1476	/* neigh reference count was dropprd to zero */
1477	struct net_device *dev = neigh->dev;
1478	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1479	struct sk_buff *skb;
1480	if (neigh->ah)
1481		ipoib_put_ah(neigh->ah);
1482	while ((skb = __skb_dequeue(&neigh->queue))) {
1483		++dev->stats.tx_dropped;
1484		dev_kfree_skb_any(skb);
1485	}
1486	if (ipoib_cm_get(neigh))
1487		ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1488	ipoib_dbg(ipoib_priv(dev),
1489		  "neigh free for %06x %pI6\n",
1490		  IPOIB_QPN(neigh->daddr),
1491		  neigh->daddr + 4);
1492	kfree(neigh);
1493	if (atomic_dec_and_test(&priv->ntbl.entries)) {
1494		if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1495			complete(&priv->ntbl.flushed);
1496	}
1497}
1498
1499static void ipoib_neigh_reclaim(struct rcu_head *rp)
1500{
1501	/* Called as a result of removal from hash table */
1502	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1503	/* note TX context may hold another ref */
1504	ipoib_neigh_put(neigh);
1505}
1506
1507void ipoib_neigh_free(struct ipoib_neigh *neigh)
1508{
1509	struct net_device *dev = neigh->dev;
1510	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1511	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1512	struct ipoib_neigh_hash *htbl;
1513	struct ipoib_neigh __rcu **np;
1514	struct ipoib_neigh *n;
1515	u32 hash_val;
1516
1517	htbl = rcu_dereference_protected(ntbl->htbl,
1518					lockdep_is_held(&priv->lock));
1519	if (!htbl)
1520		return;
1521
1522	hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1523	np = &htbl->buckets[hash_val];
1524	for (n = rcu_dereference_protected(*np,
1525					    lockdep_is_held(&priv->lock));
1526	     n != NULL;
1527	     n = rcu_dereference_protected(*np,
1528					lockdep_is_held(&priv->lock))) {
1529		if (n == neigh) {
1530			/* found */
1531			rcu_assign_pointer(*np,
1532					   rcu_dereference_protected(neigh->hnext,
1533								     lockdep_is_held(&priv->lock)));
1534			/* remove from parent list */
1535			list_del_init(&neigh->list);
1536			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1537			return;
1538		} else {
1539			np = &n->hnext;
1540		}
1541	}
1542}
1543
1544static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1545{
1546	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1547	struct ipoib_neigh_hash *htbl;
1548	struct ipoib_neigh __rcu **buckets;
1549	u32 size;
1550
1551	clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1552	ntbl->htbl = NULL;
1553	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1554	if (!htbl)
1555		return -ENOMEM;
1556	size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1557	buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
1558	if (!buckets) {
1559		kfree(htbl);
1560		return -ENOMEM;
1561	}
1562	htbl->size = size;
1563	htbl->mask = (size - 1);
1564	htbl->buckets = buckets;
1565	RCU_INIT_POINTER(ntbl->htbl, htbl);
1566	htbl->ntbl = ntbl;
1567	atomic_set(&ntbl->entries, 0);
1568
1569	/* start garbage collection */
1570	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1571			   arp_tbl.gc_interval);
1572
1573	return 0;
1574}
1575
1576static void neigh_hash_free_rcu(struct rcu_head *head)
1577{
1578	struct ipoib_neigh_hash *htbl = container_of(head,
1579						    struct ipoib_neigh_hash,
1580						    rcu);
1581	struct ipoib_neigh __rcu **buckets = htbl->buckets;
1582	struct ipoib_neigh_table *ntbl = htbl->ntbl;
1583
1584	kvfree(buckets);
1585	kfree(htbl);
1586	complete(&ntbl->deleted);
1587}
1588
1589void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1590{
1591	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1592	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1593	struct ipoib_neigh_hash *htbl;
1594	unsigned long flags;
1595	int i;
1596
1597	/* remove all neigh connected to a given path or mcast */
1598	spin_lock_irqsave(&priv->lock, flags);
1599
1600	htbl = rcu_dereference_protected(ntbl->htbl,
1601					 lockdep_is_held(&priv->lock));
1602
1603	if (!htbl)
1604		goto out_unlock;
1605
1606	for (i = 0; i < htbl->size; i++) {
1607		struct ipoib_neigh *neigh;
1608		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1609
1610		while ((neigh = rcu_dereference_protected(*np,
1611							  lockdep_is_held(&priv->lock))) != NULL) {
1612			/* delete neighs belong to this parent */
1613			if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1614				rcu_assign_pointer(*np,
1615						   rcu_dereference_protected(neigh->hnext,
1616									     lockdep_is_held(&priv->lock)));
1617				/* remove from parent list */
1618				list_del_init(&neigh->list);
1619				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1620			} else {
1621				np = &neigh->hnext;
1622			}
1623
1624		}
1625	}
1626out_unlock:
1627	spin_unlock_irqrestore(&priv->lock, flags);
1628}
1629
1630static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1631{
1632	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1633	struct ipoib_neigh_hash *htbl;
1634	unsigned long flags;
1635	int i, wait_flushed = 0;
1636
1637	init_completion(&priv->ntbl.flushed);
1638	set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1639
1640	spin_lock_irqsave(&priv->lock, flags);
1641
1642	htbl = rcu_dereference_protected(ntbl->htbl,
1643					lockdep_is_held(&priv->lock));
1644	if (!htbl)
1645		goto out_unlock;
1646
1647	wait_flushed = atomic_read(&priv->ntbl.entries);
1648	if (!wait_flushed)
1649		goto free_htbl;
1650
1651	for (i = 0; i < htbl->size; i++) {
1652		struct ipoib_neigh *neigh;
1653		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1654
1655		while ((neigh = rcu_dereference_protected(*np,
1656				       lockdep_is_held(&priv->lock))) != NULL) {
1657			rcu_assign_pointer(*np,
1658					   rcu_dereference_protected(neigh->hnext,
1659								     lockdep_is_held(&priv->lock)));
1660			/* remove from path/mc list */
1661			list_del_init(&neigh->list);
1662			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1663		}
1664	}
1665
1666free_htbl:
1667	rcu_assign_pointer(ntbl->htbl, NULL);
1668	call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1669
1670out_unlock:
1671	spin_unlock_irqrestore(&priv->lock, flags);
1672	if (wait_flushed)
1673		wait_for_completion(&priv->ntbl.flushed);
1674}
1675
1676static void ipoib_neigh_hash_uninit(struct net_device *dev)
1677{
1678	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1679
1680	ipoib_dbg(priv, "%s\n", __func__);
1681	init_completion(&priv->ntbl.deleted);
1682
1683	cancel_delayed_work_sync(&priv->neigh_reap_task);
1684
1685	ipoib_flush_neighs(priv);
1686
1687	wait_for_completion(&priv->ntbl.deleted);
1688}
1689
1690static void ipoib_napi_add(struct net_device *dev)
1691{
1692	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1693
1694	netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll,
1695			      IPOIB_NUM_WC);
1696	netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll,
1697			      MAX_SEND_CQE);
1698}
1699
1700static void ipoib_napi_del(struct net_device *dev)
1701{
1702	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1703
1704	netif_napi_del(&priv->recv_napi);
1705	netif_napi_del(&priv->send_napi);
1706}
1707
1708static void ipoib_dev_uninit_default(struct net_device *dev)
1709{
1710	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1711
1712	ipoib_transport_dev_cleanup(dev);
1713
1714	ipoib_napi_del(dev);
1715
1716	ipoib_cm_dev_cleanup(dev);
1717
1718	kfree(priv->rx_ring);
1719	vfree(priv->tx_ring);
1720
1721	priv->rx_ring = NULL;
1722	priv->tx_ring = NULL;
1723}
1724
1725static int ipoib_dev_init_default(struct net_device *dev)
1726{
1727	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1728	u8 addr_mod[3];
1729
1730	ipoib_napi_add(dev);
1731
1732	/* Allocate RX/TX "rings" to hold queued skbs */
1733	priv->rx_ring =	kcalloc(ipoib_recvq_size,
1734				       sizeof(*priv->rx_ring),
1735				       GFP_KERNEL);
1736	if (!priv->rx_ring)
1737		goto out;
1738
1739	priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
1740					   sizeof(*priv->tx_ring)));
1741	if (!priv->tx_ring) {
1742		pr_warn("%s: failed to allocate TX ring (%d entries)\n",
1743			priv->ca->name, ipoib_sendq_size);
1744		goto out_rx_ring_cleanup;
1745	}
1746
1747	/* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
1748
1749	if (ipoib_transport_dev_init(dev, priv->ca)) {
1750		pr_warn("%s: ipoib_transport_dev_init failed\n",
1751			priv->ca->name);
1752		goto out_tx_ring_cleanup;
1753	}
1754
1755	/* after qp created set dev address */
1756	addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff;
1757	addr_mod[1] = (priv->qp->qp_num >>  8) & 0xff;
1758	addr_mod[2] = (priv->qp->qp_num) & 0xff;
1759	dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod));
1760
1761	return 0;
1762
1763out_tx_ring_cleanup:
1764	vfree(priv->tx_ring);
1765
1766out_rx_ring_cleanup:
1767	kfree(priv->rx_ring);
1768
1769out:
1770	ipoib_napi_del(dev);
1771	return -ENOMEM;
1772}
1773
1774static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
1775		       int cmd)
1776{
1777	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1778
1779	if (!priv->rn_ops->ndo_eth_ioctl)
1780		return -EOPNOTSUPP;
1781
1782	return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd);
1783}
1784
1785static int ipoib_dev_init(struct net_device *dev)
1786{
1787	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1788	int ret = -ENOMEM;
1789
1790	priv->qp = NULL;
1791
1792	/*
1793	 * the various IPoIB tasks assume they will never race against
1794	 * themselves, so always use a single thread workqueue
1795	 */
1796	priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
1797	if (!priv->wq) {
1798		pr_warn("%s: failed to allocate device WQ\n", dev->name);
1799		goto out;
1800	}
1801
1802	/* create pd, which used both for control and datapath*/
1803	priv->pd = ib_alloc_pd(priv->ca, 0);
1804	if (IS_ERR(priv->pd)) {
1805		pr_warn("%s: failed to allocate PD\n", priv->ca->name);
1806		goto clean_wq;
1807	}
1808
1809	ret = priv->rn_ops->ndo_init(dev);
1810	if (ret) {
1811		pr_warn("%s failed to init HW resource\n", dev->name);
1812		goto out_free_pd;
1813	}
1814
1815	ret = ipoib_neigh_hash_init(priv);
1816	if (ret) {
1817		pr_warn("%s failed to init neigh hash\n", dev->name);
1818		goto out_dev_uninit;
1819	}
1820
1821	if (dev->flags & IFF_UP) {
1822		if (ipoib_ib_dev_open(dev)) {
1823			pr_warn("%s failed to open device\n", dev->name);
1824			ret = -ENODEV;
1825			goto out_hash_uninit;
1826		}
1827	}
1828
1829	return 0;
1830
1831out_hash_uninit:
1832	ipoib_neigh_hash_uninit(dev);
1833
1834out_dev_uninit:
1835	ipoib_ib_dev_cleanup(dev);
1836
1837out_free_pd:
1838	if (priv->pd) {
1839		ib_dealloc_pd(priv->pd);
1840		priv->pd = NULL;
1841	}
1842
1843clean_wq:
1844	if (priv->wq) {
1845		destroy_workqueue(priv->wq);
1846		priv->wq = NULL;
1847	}
1848
1849out:
1850	return ret;
1851}
1852
1853/*
1854 * This must be called before doing an unregister_netdev on a parent device to
1855 * shutdown the IB event handler.
1856 */
1857static void ipoib_parent_unregister_pre(struct net_device *ndev)
1858{
1859	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1860
1861	/*
1862	 * ipoib_set_mac checks netif_running before pushing work, clearing
1863	 * running ensures the it will not add more work.
1864	 */
1865	rtnl_lock();
1866	dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
1867	rtnl_unlock();
1868
1869	/* ipoib_event() cannot be running once this returns */
1870	ib_unregister_event_handler(&priv->event_handler);
1871
1872	/*
1873	 * Work on the queue grabs the rtnl lock, so this cannot be done while
1874	 * also holding it.
1875	 */
1876	flush_workqueue(ipoib_workqueue);
1877}
1878
1879static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
1880{
1881	priv->hca_caps = priv->ca->attrs.device_cap_flags;
1882	priv->kernel_caps = priv->ca->attrs.kernel_cap_flags;
1883
1884	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1885		priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1886
1887		if (priv->kernel_caps & IBK_UD_TSO)
1888			priv->dev->hw_features |= NETIF_F_TSO;
1889
1890		priv->dev->features |= priv->dev->hw_features;
1891	}
1892}
1893
1894static int ipoib_parent_init(struct net_device *ndev)
1895{
1896	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1897	struct ib_port_attr attr;
1898	int result;
1899
1900	result = ib_query_port(priv->ca, priv->port, &attr);
1901	if (result) {
1902		pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
1903			priv->port);
1904		return result;
1905	}
1906	priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
1907
1908	result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1909	if (result) {
1910		pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
1911			priv->ca->name, priv->port, result);
1912		return result;
1913	}
1914
1915	result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
1916	if (result) {
1917		pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
1918			priv->ca->name, priv->port, result);
1919		return result;
1920	}
1921	dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid));
1922
1923	SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
1924	priv->dev->dev_port = priv->port - 1;
1925	/* Let's set this one too for backwards compatibility. */
1926	priv->dev->dev_id = priv->port - 1;
1927
1928	return 0;
1929}
1930
1931static void ipoib_child_init(struct net_device *ndev)
1932{
1933	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1934	struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1935
1936	priv->max_ib_mtu = ppriv->max_ib_mtu;
1937	set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
1938	if (memchr_inv(priv->dev->dev_addr, 0, INFINIBAND_ALEN))
1939		memcpy(&priv->local_gid, priv->dev->dev_addr + 4,
1940		       sizeof(priv->local_gid));
1941	else {
1942		__dev_addr_set(priv->dev, ppriv->dev->dev_addr,
1943			       INFINIBAND_ALEN);
1944		memcpy(&priv->local_gid, &ppriv->local_gid,
1945		       sizeof(priv->local_gid));
1946	}
1947}
1948
1949static int ipoib_ndo_init(struct net_device *ndev)
1950{
1951	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1952	int rc;
1953	struct rdma_netdev *rn = netdev_priv(ndev);
1954
1955	if (priv->parent) {
1956		ipoib_child_init(ndev);
1957	} else {
1958		rc = ipoib_parent_init(ndev);
1959		if (rc)
1960			return rc;
1961	}
1962
1963	/* MTU will be reset when mcast join happens */
1964	ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1965	priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
1966	rn->mtu = priv->mcast_mtu;
1967	ndev->max_mtu = IPOIB_CM_MTU;
1968
1969	ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
1970
1971	/*
1972	 * Set the full membership bit, so that we join the right
1973	 * broadcast group, etc.
1974	 */
1975	priv->pkey |= 0x8000;
1976
1977	ndev->broadcast[8] = priv->pkey >> 8;
1978	ndev->broadcast[9] = priv->pkey & 0xff;
1979	set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1980
1981	ipoib_set_dev_features(priv);
1982
1983	rc = ipoib_dev_init(ndev);
1984	if (rc) {
1985		pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
1986			priv->ca->name, priv->dev->name, priv->port, rc);
1987		return rc;
1988	}
1989
1990	if (priv->parent) {
1991		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1992
1993		dev_hold(priv->parent);
1994
1995		down_write(&ppriv->vlan_rwsem);
1996		list_add_tail(&priv->list, &ppriv->child_intfs);
1997		up_write(&ppriv->vlan_rwsem);
1998	}
1999
2000	return 0;
2001}
2002
2003static void ipoib_ndo_uninit(struct net_device *dev)
2004{
2005	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2006
2007	ASSERT_RTNL();
2008
2009	/*
2010	 * ipoib_remove_one guarantees the children are removed before the
2011	 * parent, and that is the only place where a parent can be removed.
2012	 */
2013	WARN_ON(!list_empty(&priv->child_intfs));
2014
2015	if (priv->parent) {
2016		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
2017
2018		down_write(&ppriv->vlan_rwsem);
2019		list_del(&priv->list);
2020		up_write(&ppriv->vlan_rwsem);
2021	}
2022
2023	ipoib_neigh_hash_uninit(dev);
2024
2025	ipoib_ib_dev_cleanup(dev);
2026
2027	/* no more works over the priv->wq */
2028	if (priv->wq) {
2029		/* See ipoib_mcast_carrier_on_task() */
2030		WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
2031		destroy_workqueue(priv->wq);
2032		priv->wq = NULL;
2033	}
2034
2035	dev_put(priv->parent);
2036}
2037
2038static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2039{
2040	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2041
2042	return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
2043}
2044
2045static int ipoib_get_vf_config(struct net_device *dev, int vf,
2046			       struct ifla_vf_info *ivf)
2047{
2048	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2049	int err;
2050
2051	err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
2052	if (err)
2053		return err;
2054
2055	ivf->vf = vf;
2056	memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
2057
2058	return 0;
2059}
2060
2061static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
2062{
2063	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2064
2065	if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
2066		return -EINVAL;
2067
2068	return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
2069}
2070
2071static int ipoib_get_vf_guid(struct net_device *dev, int vf,
2072			     struct ifla_vf_guid *node_guid,
2073			     struct ifla_vf_guid *port_guid)
2074{
2075	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2076
2077	return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
2078}
2079
2080static int ipoib_get_vf_stats(struct net_device *dev, int vf,
2081			      struct ifla_vf_stats *vf_stats)
2082{
2083	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2084
2085	return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
2086}
2087
2088static const struct header_ops ipoib_header_ops = {
2089	.create	= ipoib_hard_header,
2090};
2091
2092static const struct net_device_ops ipoib_netdev_ops_pf = {
2093	.ndo_init		 = ipoib_ndo_init,
2094	.ndo_uninit		 = ipoib_ndo_uninit,
2095	.ndo_open		 = ipoib_open,
2096	.ndo_stop		 = ipoib_stop,
2097	.ndo_change_mtu		 = ipoib_change_mtu,
2098	.ndo_fix_features	 = ipoib_fix_features,
2099	.ndo_start_xmit		 = ipoib_start_xmit,
2100	.ndo_tx_timeout		 = ipoib_timeout,
2101	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
2102	.ndo_get_iflink		 = ipoib_get_iflink,
2103	.ndo_set_vf_link_state	 = ipoib_set_vf_link_state,
2104	.ndo_get_vf_config	 = ipoib_get_vf_config,
2105	.ndo_get_vf_stats	 = ipoib_get_vf_stats,
2106	.ndo_get_vf_guid	 = ipoib_get_vf_guid,
2107	.ndo_set_vf_guid	 = ipoib_set_vf_guid,
2108	.ndo_set_mac_address	 = ipoib_set_mac,
2109	.ndo_get_stats64	 = ipoib_get_stats,
2110	.ndo_eth_ioctl		 = ipoib_ioctl,
2111};
2112
2113static const struct net_device_ops ipoib_netdev_ops_vf = {
2114	.ndo_init		 = ipoib_ndo_init,
2115	.ndo_uninit		 = ipoib_ndo_uninit,
2116	.ndo_open		 = ipoib_open,
2117	.ndo_stop		 = ipoib_stop,
2118	.ndo_change_mtu		 = ipoib_change_mtu,
2119	.ndo_fix_features	 = ipoib_fix_features,
2120	.ndo_start_xmit	 	 = ipoib_start_xmit,
2121	.ndo_tx_timeout		 = ipoib_timeout,
2122	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
2123	.ndo_get_iflink		 = ipoib_get_iflink,
2124	.ndo_get_stats64	 = ipoib_get_stats,
2125	.ndo_eth_ioctl		 = ipoib_ioctl,
2126};
2127
2128static const struct net_device_ops ipoib_netdev_default_pf = {
2129	.ndo_init		 = ipoib_dev_init_default,
2130	.ndo_uninit		 = ipoib_dev_uninit_default,
2131	.ndo_open		 = ipoib_ib_dev_open_default,
2132	.ndo_stop		 = ipoib_ib_dev_stop_default,
2133};
2134
2135void ipoib_setup_common(struct net_device *dev)
2136{
2137	dev->header_ops		 = &ipoib_header_ops;
2138	dev->netdev_ops          = &ipoib_netdev_default_pf;
2139
2140	ipoib_set_ethtool_ops(dev);
2141
2142	dev->watchdog_timeo	 = 10 * HZ;
2143
2144	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
2145
2146	dev->hard_header_len	 = IPOIB_HARD_LEN;
2147	dev->addr_len		 = INFINIBAND_ALEN;
2148	dev->type		 = ARPHRD_INFINIBAND;
2149	dev->tx_queue_len	 = ipoib_sendq_size * 2;
2150	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
2151				    NETIF_F_HIGHDMA);
2152	netif_keep_dst(dev);
2153
2154	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
2155
2156	/*
2157	 * unregister_netdev always frees the netdev, we use this mode
2158	 * consistently to unify all the various unregister paths, including
2159	 * those connected to rtnl_link_ops which require it.
2160	 */
2161	dev->needs_free_netdev = true;
2162}
2163
2164static void ipoib_build_priv(struct net_device *dev)
2165{
2166	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2167
2168	priv->dev = dev;
2169	spin_lock_init(&priv->lock);
2170	init_rwsem(&priv->vlan_rwsem);
2171	mutex_init(&priv->mcast_mutex);
2172
2173	INIT_LIST_HEAD(&priv->path_list);
2174	INIT_LIST_HEAD(&priv->child_intfs);
2175	INIT_LIST_HEAD(&priv->dead_ahs);
2176	INIT_LIST_HEAD(&priv->multicast_list);
2177
2178	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
2179	INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
2180	INIT_WORK(&priv->reschedule_napi_work, ipoib_napi_schedule_work);
2181	INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
2182	INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
2183	INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
2184	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
2185	INIT_WORK(&priv->tx_timeout_work, ipoib_ib_tx_timeout_work);
2186	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
2187	INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
2188}
2189
2190static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port,
2191					     const char *name)
2192{
2193	struct net_device *dev;
2194
2195	dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
2196				NET_NAME_UNKNOWN, ipoib_setup_common);
2197	if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
2198		return dev;
2199
2200	dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
2201			   ipoib_setup_common);
2202	if (!dev)
2203		return ERR_PTR(-ENOMEM);
2204	return dev;
2205}
2206
2207int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
2208		    struct net_device *dev)
2209{
2210	struct rdma_netdev *rn = netdev_priv(dev);
2211	struct ipoib_dev_priv *priv;
2212	int rc;
2213
2214	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2215	if (!priv)
2216		return -ENOMEM;
2217
2218	priv->ca = hca;
2219	priv->port = port;
2220
2221	rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
2222			      NET_NAME_UNKNOWN, ipoib_setup_common, dev);
2223	if (rc) {
2224		if (rc != -EOPNOTSUPP)
2225			goto out;
2226
2227		rn->send = ipoib_send;
2228		rn->attach_mcast = ipoib_mcast_attach;
2229		rn->detach_mcast = ipoib_mcast_detach;
2230		rn->hca = hca;
2231
2232		rc = netif_set_real_num_tx_queues(dev, 1);
2233		if (rc)
2234			goto out;
2235
2236		rc = netif_set_real_num_rx_queues(dev, 1);
2237		if (rc)
2238			goto out;
2239	}
2240
2241	priv->rn_ops = dev->netdev_ops;
2242
2243	if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION)
2244		dev->netdev_ops	= &ipoib_netdev_ops_vf;
2245	else
2246		dev->netdev_ops	= &ipoib_netdev_ops_pf;
2247
2248	rn->clnt_priv = priv;
2249	/*
2250	 * Only the child register_netdev flows can handle priv_destructor
2251	 * being set, so we force it to NULL here and handle manually until it
2252	 * is safe to turn on.
2253	 */
2254	priv->next_priv_destructor = dev->priv_destructor;
2255	dev->priv_destructor = NULL;
2256
2257	ipoib_build_priv(dev);
2258
2259	return 0;
2260
2261out:
2262	kfree(priv);
2263	return rc;
2264}
2265
2266struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
2267				    const char *name)
2268{
2269	struct net_device *dev;
2270	int rc;
2271
2272	dev = ipoib_alloc_netdev(hca, port, name);
2273	if (IS_ERR(dev))
2274		return dev;
2275
2276	rc = ipoib_intf_init(hca, port, name, dev);
2277	if (rc) {
2278		free_netdev(dev);
2279		return ERR_PTR(rc);
2280	}
2281
2282	/*
2283	 * Upon success the caller must ensure ipoib_intf_free is called or
2284	 * register_netdevice succeed'd and priv_destructor is set to
2285	 * ipoib_intf_free.
2286	 */
2287	return dev;
2288}
2289
2290void ipoib_intf_free(struct net_device *dev)
2291{
2292	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2293	struct rdma_netdev *rn = netdev_priv(dev);
2294
2295	dev->priv_destructor = priv->next_priv_destructor;
2296	if (dev->priv_destructor)
2297		dev->priv_destructor(dev);
2298
2299	/*
2300	 * There are some error flows around register_netdev failing that may
2301	 * attempt to call priv_destructor twice, prevent that from happening.
2302	 */
2303	dev->priv_destructor = NULL;
2304
2305	/* unregister/destroy is very complicated. Make bugs more obvious. */
2306	rn->clnt_priv = NULL;
2307
2308	kfree(priv);
2309}
2310
2311static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2312			 char *buf)
2313{
2314	struct net_device *ndev = to_net_dev(dev);
2315	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2316
2317	return sysfs_emit(buf, "0x%04x\n", priv->pkey);
2318}
2319static DEVICE_ATTR_RO(pkey);
2320
2321static ssize_t umcast_show(struct device *dev, struct device_attribute *attr,
2322			   char *buf)
2323{
2324	struct net_device *ndev = to_net_dev(dev);
2325	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2326
2327	return sysfs_emit(buf, "%d\n",
2328			  test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
2329}
2330
2331void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
2332{
2333	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2334
2335	if (umcast_val > 0) {
2336		set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2337		ipoib_warn(priv, "ignoring multicast groups joined directly "
2338				"by userspace\n");
2339	} else
2340		clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2341}
2342
2343static ssize_t umcast_store(struct device *dev, struct device_attribute *attr,
2344			    const char *buf, size_t count)
2345{
2346	unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
2347
2348	ipoib_set_umcast(to_net_dev(dev), umcast_val);
2349
2350	return count;
2351}
2352static DEVICE_ATTR_RW(umcast);
2353
2354int ipoib_add_umcast_attr(struct net_device *dev)
2355{
2356	return device_create_file(&dev->dev, &dev_attr_umcast);
2357}
2358
2359static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
2360{
2361	struct ipoib_dev_priv *child_priv;
2362	struct net_device *netdev = priv->dev;
2363
2364	netif_addr_lock_bh(netdev);
2365
2366	memcpy(&priv->local_gid.global.interface_id,
2367	       &gid->global.interface_id,
2368	       sizeof(gid->global.interface_id));
2369	dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid));
2370	clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2371
2372	netif_addr_unlock_bh(netdev);
2373
2374	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
2375		down_read(&priv->vlan_rwsem);
2376		list_for_each_entry(child_priv, &priv->child_intfs, list)
2377			set_base_guid(child_priv, gid);
2378		up_read(&priv->vlan_rwsem);
2379	}
2380}
2381
2382static int ipoib_check_lladdr(struct net_device *dev,
2383			      struct sockaddr_storage *ss)
2384{
2385	union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
2386	int ret = 0;
2387
2388	netif_addr_lock_bh(dev);
2389
2390	/* Make sure the QPN, reserved and subnet prefix match the current
2391	 * lladdr, it also makes sure the lladdr is unicast.
2392	 */
2393	if (memcmp(dev->dev_addr, ss->__data,
2394		   4 + sizeof(gid->global.subnet_prefix)) ||
2395	    gid->global.interface_id == 0)
2396		ret = -EINVAL;
2397
2398	netif_addr_unlock_bh(dev);
2399
2400	return ret;
2401}
2402
2403static int ipoib_set_mac(struct net_device *dev, void *addr)
2404{
2405	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2406	struct sockaddr_storage *ss = addr;
2407	int ret;
2408
2409	if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
2410		return -EBUSY;
2411
2412	ret = ipoib_check_lladdr(dev, ss);
2413	if (ret)
2414		return ret;
2415
2416	set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
2417
2418	queue_work(ipoib_workqueue, &priv->flush_light);
2419
2420	return 0;
2421}
2422
2423static ssize_t create_child_store(struct device *dev,
2424				  struct device_attribute *attr,
2425				  const char *buf, size_t count)
2426{
2427	int pkey;
2428	int ret;
2429
2430	if (sscanf(buf, "%i", &pkey) != 1)
2431		return -EINVAL;
2432
2433	if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
2434		return -EINVAL;
2435
2436	ret = ipoib_vlan_add(to_net_dev(dev), pkey);
2437
2438	return ret ? ret : count;
2439}
2440static DEVICE_ATTR_WO(create_child);
2441
2442static ssize_t delete_child_store(struct device *dev,
2443				  struct device_attribute *attr,
2444				  const char *buf, size_t count)
2445{
2446	int pkey;
2447	int ret;
2448
2449	if (sscanf(buf, "%i", &pkey) != 1)
2450		return -EINVAL;
2451
2452	if (pkey < 0 || pkey > 0xffff)
2453		return -EINVAL;
2454
2455	ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
2456
2457	return ret ? ret : count;
2458
2459}
2460static DEVICE_ATTR_WO(delete_child);
2461
2462int ipoib_add_pkey_attr(struct net_device *dev)
2463{
2464	return device_create_file(&dev->dev, &dev_attr_pkey);
2465}
2466
2467/*
2468 * We erroneously exposed the iface's port number in the dev_id
2469 * sysfs field long after dev_port was introduced for that purpose[1],
2470 * and we need to stop everyone from relying on that.
2471 * Let's overload the shower routine for the dev_id file here
2472 * to gently bring the issue up.
2473 *
2474 * [1] https://www.spinics.net/lists/netdev/msg272123.html
2475 */
2476static ssize_t dev_id_show(struct device *dev,
2477			   struct device_attribute *attr, char *buf)
2478{
2479	struct net_device *ndev = to_net_dev(dev);
2480
2481	/*
2482	 * ndev->dev_port will be equal to 0 in old kernel prior to commit
2483	 * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
2484	 * port numbers") Zero was chosen as special case for user space
2485	 * applications to fallback and query dev_id to check if it has
2486	 * different value or not.
2487	 *
2488	 * Don't print warning in such scenario.
2489	 *
2490	 * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
2491	 */
2492	if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
2493		netdev_info_once(ndev,
2494			"\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
2495			current->comm);
2496
2497	return sysfs_emit(buf, "%#x\n", ndev->dev_id);
2498}
2499static DEVICE_ATTR_RO(dev_id);
2500
2501static int ipoib_intercept_dev_id_attr(struct net_device *dev)
2502{
2503	device_remove_file(&dev->dev, &dev_attr_dev_id);
2504	return device_create_file(&dev->dev, &dev_attr_dev_id);
2505}
2506
2507static struct net_device *ipoib_add_port(const char *format,
2508					 struct ib_device *hca, u32 port)
2509{
2510	struct rtnl_link_ops *ops = ipoib_get_link_ops();
2511	struct rdma_netdev_alloc_params params;
2512	struct ipoib_dev_priv *priv;
2513	struct net_device *ndev;
2514	int result;
2515
2516	ndev = ipoib_intf_alloc(hca, port, format);
2517	if (IS_ERR(ndev)) {
2518		pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
2519			PTR_ERR(ndev));
2520		return ndev;
2521	}
2522	priv = ipoib_priv(ndev);
2523
2524	INIT_IB_EVENT_HANDLER(&priv->event_handler,
2525			      priv->ca, ipoib_event);
2526	ib_register_event_handler(&priv->event_handler);
2527
2528	/* call event handler to ensure pkey in sync */
2529	queue_work(ipoib_workqueue, &priv->flush_heavy);
2530
2531	ndev->rtnl_link_ops = ipoib_get_link_ops();
2532
2533	result = register_netdev(ndev);
2534	if (result) {
2535		pr_warn("%s: couldn't register ipoib port %d; error %d\n",
2536			hca->name, port, result);
2537
2538		ipoib_parent_unregister_pre(ndev);
2539		ipoib_intf_free(ndev);
2540		free_netdev(ndev);
2541
2542		return ERR_PTR(result);
2543	}
2544
2545	if (hca->ops.rdma_netdev_get_params) {
2546		int rc = hca->ops.rdma_netdev_get_params(hca, port,
2547						     RDMA_NETDEV_IPOIB,
2548						     &params);
2549
2550		if (!rc && ops->priv_size < params.sizeof_priv)
2551			ops->priv_size = params.sizeof_priv;
2552	}
2553	/*
2554	 * We cannot set priv_destructor before register_netdev because we
2555	 * need priv to be always valid during the error flow to execute
2556	 * ipoib_parent_unregister_pre(). Instead handle it manually and only
2557	 * enter priv_destructor mode once we are completely registered.
2558	 */
2559	ndev->priv_destructor = ipoib_intf_free;
2560
2561	if (ipoib_intercept_dev_id_attr(ndev))
2562		goto sysfs_failed;
2563	if (ipoib_cm_add_mode_attr(ndev))
2564		goto sysfs_failed;
2565	if (ipoib_add_pkey_attr(ndev))
2566		goto sysfs_failed;
2567	if (ipoib_add_umcast_attr(ndev))
2568		goto sysfs_failed;
2569	if (device_create_file(&ndev->dev, &dev_attr_create_child))
2570		goto sysfs_failed;
2571	if (device_create_file(&ndev->dev, &dev_attr_delete_child))
2572		goto sysfs_failed;
2573
2574	return ndev;
2575
2576sysfs_failed:
2577	ipoib_parent_unregister_pre(ndev);
2578	unregister_netdev(ndev);
2579	return ERR_PTR(-ENOMEM);
2580}
2581
2582static int ipoib_add_one(struct ib_device *device)
2583{
2584	struct list_head *dev_list;
2585	struct net_device *dev;
2586	struct ipoib_dev_priv *priv;
2587	unsigned int p;
2588	int count = 0;
2589
2590	dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
2591	if (!dev_list)
2592		return -ENOMEM;
2593
2594	INIT_LIST_HEAD(dev_list);
2595
2596	rdma_for_each_port (device, p) {
2597		if (!rdma_protocol_ib(device, p))
2598			continue;
2599		dev = ipoib_add_port("ib%d", device, p);
2600		if (!IS_ERR(dev)) {
2601			priv = ipoib_priv(dev);
2602			list_add_tail(&priv->list, dev_list);
2603			count++;
2604		}
2605	}
2606
2607	if (!count) {
2608		kfree(dev_list);
2609		return -EOPNOTSUPP;
2610	}
2611
2612	ib_set_client_data(device, &ipoib_client, dev_list);
2613	return 0;
2614}
2615
2616static void ipoib_remove_one(struct ib_device *device, void *client_data)
2617{
2618	struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2619	struct list_head *dev_list = client_data;
2620
2621	list_for_each_entry_safe(priv, tmp, dev_list, list) {
2622		LIST_HEAD(head);
2623		ipoib_parent_unregister_pre(priv->dev);
2624
2625		rtnl_lock();
2626
2627		list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
2628					 list)
2629			unregister_netdevice_queue(cpriv->dev, &head);
2630		unregister_netdevice_queue(priv->dev, &head);
2631		unregister_netdevice_many(&head);
2632
2633		rtnl_unlock();
2634	}
2635
2636	kfree(dev_list);
2637}
2638
2639#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2640static struct notifier_block ipoib_netdev_notifier = {
2641	.notifier_call = ipoib_netdev_event,
2642};
2643#endif
2644
2645static int __init ipoib_init_module(void)
2646{
2647	int ret;
2648
2649	ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2650	ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2651	ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2652
2653	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2654	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2655	ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2656#ifdef CONFIG_INFINIBAND_IPOIB_CM
2657	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2658	ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
2659#endif
2660
2661	/*
2662	 * When copying small received packets, we only copy from the
2663	 * linear data part of the SKB, so we rely on this condition.
2664	 */
2665	BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2666
2667	ipoib_register_debugfs();
2668
2669	/*
2670	 * We create a global workqueue here that is used for all flush
2671	 * operations.  However, if you attempt to flush a workqueue
2672	 * from a task on that same workqueue, it deadlocks the system.
2673	 * We want to be able to flush the tasks associated with a
2674	 * specific net device, so we also create a workqueue for each
2675	 * netdevice.  We queue up the tasks for that device only on
2676	 * its private workqueue, and we only queue up flush events
2677	 * on our global flush workqueue.  This avoids the deadlocks.
2678	 */
2679	ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
2680	if (!ipoib_workqueue) {
2681		ret = -ENOMEM;
2682		goto err_fs;
2683	}
2684
2685	ib_sa_register_client(&ipoib_sa_client);
2686
2687	ret = ib_register_client(&ipoib_client);
2688	if (ret)
2689		goto err_sa;
2690
2691	ret = ipoib_netlink_init();
2692	if (ret)
2693		goto err_client;
2694
2695#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2696	register_netdevice_notifier(&ipoib_netdev_notifier);
2697#endif
2698	return 0;
2699
2700err_client:
2701	ib_unregister_client(&ipoib_client);
2702
2703err_sa:
2704	ib_sa_unregister_client(&ipoib_sa_client);
2705	destroy_workqueue(ipoib_workqueue);
2706
2707err_fs:
2708	ipoib_unregister_debugfs();
2709
2710	return ret;
2711}
2712
2713static void __exit ipoib_cleanup_module(void)
2714{
2715#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2716	unregister_netdevice_notifier(&ipoib_netdev_notifier);
2717#endif
2718	ipoib_netlink_fini();
2719	ib_unregister_client(&ipoib_client);
2720	ib_sa_unregister_client(&ipoib_sa_client);
2721	ipoib_unregister_debugfs();
2722	destroy_workqueue(ipoib_workqueue);
2723}
2724
2725module_init(ipoib_init_module);
2726module_exit(ipoib_cleanup_module);
2727