main.c revision 255932
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35
36#ifdef __linux__
37#include <linux/proc_fs.h>
38#endif
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/errno.h>
43#include <linux/netdevice.h>
44#include <linux/inetdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/if_vlan.h>
47#include <linux/bitops.h>
48#include <linux/if_ether.h>
49
50#include <rdma/ib_smi.h>
51#include <rdma/ib_user_verbs.h>
52#include <rdma/ib_addr.h>
53
54#include <linux/mlx4/driver.h>
55#include <linux/mlx4/cmd.h>
56#include <linux/sched.h>
57#include "mlx4_ib.h"
58#include "user.h"
59#include "wc.h"
60
61#define DRV_NAME	MLX4_IB_DRV_NAME
62#define DRV_VERSION	"1.0"
63#define DRV_RELDATE	"April 4, 2008"
64
65#define MLX4_IB_DRIVER_PROC_DIR_NAME "driver/mlx4_ib"
66#define MLX4_IB_MRS_PROC_DIR_NAME "mrs"
67
68MODULE_AUTHOR("Roland Dreier");
69MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
70MODULE_LICENSE("Dual BSD/GPL");
71MODULE_VERSION(DRV_VERSION);
72
73int mlx4_ib_sm_guid_assign = 1;
74
75#ifdef __linux__
76struct proc_dir_entry *mlx4_mrs_dir_entry;
77static struct proc_dir_entry *mlx4_ib_driver_dir_entry;
78#endif
79
80module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
81MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
82
83static char dev_assign_str[512];
84//module_param_string(dev_assign_str, dev_assign_str, sizeof(dev_assign_str), 0644);
85MODULE_PARM_DESC(dev_assign_str, "Map all device function numbers to "
86		 "IB device numbers following the  pattern: "
87		 "bb:dd.f-0,bb:dd.f-1,... (all numbers are hexadecimals)."
88		 " Max supported devices - 32");
89
90static const char mlx4_ib_version[] =
91	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
92	DRV_VERSION " (" DRV_RELDATE ")\n";
93
94struct update_gid_work {
95	struct work_struct	work;
96	union ib_gid		gids[128];
97	struct mlx4_ib_dev     *dev;
98	int			port;
99};
100
101struct dev_rec {
102	int	bus;
103	int	dev;
104	int	func;
105	int	nr;
106};
107
108#define MAX_DR 32
109static struct dev_rec dr[MAX_DR];
110
111static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
112
113static struct workqueue_struct *wq;
114
115static void init_query_mad(struct ib_smp *mad)
116{
117	mad->base_version  = 1;
118	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
119	mad->class_version = 1;
120	mad->method	   = IB_MGMT_METHOD_GET;
121}
122
123static union ib_gid zgid;
124
125static int mlx4_ib_query_device(struct ib_device *ibdev,
126				struct ib_device_attr *props)
127{
128	struct mlx4_ib_dev *dev = to_mdev(ibdev);
129	struct ib_smp *in_mad  = NULL;
130	struct ib_smp *out_mad = NULL;
131	int err = -ENOMEM;
132
133	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
134	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
135	if (!in_mad || !out_mad)
136		goto out;
137
138	init_query_mad(in_mad);
139	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
140
141	err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
142			   1, NULL, NULL, in_mad, out_mad);
143	if (err)
144		goto out;
145
146	memset(props, 0, sizeof *props);
147
148	props->fw_ver = dev->dev->caps.fw_ver;
149	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
150		IB_DEVICE_PORT_ACTIVE_EVENT		|
151		IB_DEVICE_SYS_IMAGE_GUID		|
152		IB_DEVICE_RC_RNR_NAK_GEN		|
153		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	|
154		IB_DEVICE_SHARED_MR;
155
156	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
157		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
158	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
159		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
160	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
161		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
162	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
163		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
164	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
165		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
166	if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
167		props->device_cap_flags |= IB_DEVICE_UD_TSO;
168	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
169		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
170	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
171	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
172	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
173		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
174	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
175		props->device_cap_flags |= IB_DEVICE_XRC;
176
177	props->device_cap_flags |= IB_DEVICE_QPG;
178	if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
179		props->device_cap_flags |= IB_DEVICE_UD_RSS;
180		props->max_rss_tbl_sz = dev->dev->caps.max_rss_tbl_sz;
181	}
182	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
183		0xffffff;
184	props->vendor_part_id	   = dev->dev->pdev->device;
185	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
186	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
187
188	props->max_mr_size	   = ~0ull;
189	props->page_size_cap	   = dev->dev->caps.page_size_cap;
190	props->max_qp		   = dev->dev->quotas.qp;
191	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
192	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
193					 dev->dev->caps.max_rq_sg);
194	props->max_cq		   = dev->dev->quotas.cq;
195	props->max_cqe		   = dev->dev->caps.max_cqes;
196	props->max_mr		   = dev->dev->quotas.mpt;
197	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
198	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
199	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
200	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
201	props->max_srq		   = dev->dev->quotas.srq;
202	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
203	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
204	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
205	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
206	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
207		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
208	props->masked_atomic_cap   = props->atomic_cap;
209	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
210	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
211	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
212	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
213					   props->max_mcast_grp;
214	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
215
216out:
217	kfree(in_mad);
218	kfree(out_mad);
219
220	return err;
221}
222
223static enum rdma_link_layer
224mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
225{
226	struct mlx4_dev *dev = to_mdev(device)->dev;
227
228	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
229		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
230}
231
232static int ib_link_query_port(struct ib_device *ibdev, u8 port,
233			      struct ib_port_attr *props, int netw_view)
234{
235	struct ib_smp *in_mad  = NULL;
236	struct ib_smp *out_mad = NULL;
237	int ext_active_speed;
238	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
239	int err = -ENOMEM;
240
241	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
242	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
243	if (!in_mad || !out_mad)
244		goto out;
245
246	init_query_mad(in_mad);
247	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
248	in_mad->attr_mod = cpu_to_be32(port);
249
250	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
251		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
252
253	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
254				in_mad, out_mad);
255	if (err)
256		goto out;
257
258
259	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
260	props->lmc		= out_mad->data[34] & 0x7;
261	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
262	props->sm_sl		= out_mad->data[36] & 0xf;
263	props->state		= out_mad->data[32] & 0xf;
264	props->phys_state	= out_mad->data[33] >> 4;
265	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
266	if (netw_view)
267		props->gid_tbl_len = out_mad->data[50];
268	else
269		props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
270	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
271	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
272	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
273	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
274	props->active_width	= out_mad->data[31] & 0xf;
275	props->active_speed	= out_mad->data[35] >> 4;
276	props->max_mtu		= out_mad->data[41] & 0xf;
277	props->active_mtu	= out_mad->data[36] >> 4;
278	props->subnet_timeout	= out_mad->data[51] & 0x1f;
279	props->max_vl_num	= out_mad->data[37] >> 4;
280	props->init_type_reply	= out_mad->data[41] >> 4;
281
282	/* Check if extended speeds (EDR/FDR/...) are supported */
283	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
284		ext_active_speed = out_mad->data[62] >> 4;
285
286		switch (ext_active_speed) {
287		case 1:
288			props->active_speed = IB_SPEED_FDR;
289			break;
290		case 2:
291			props->active_speed = IB_SPEED_EDR;
292			break;
293		}
294	}
295
296	/* If reported active speed is QDR, check if is FDR-10 */
297	if (props->active_speed == IB_SPEED_QDR) {
298		init_query_mad(in_mad);
299		in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
300		in_mad->attr_mod = cpu_to_be32(port);
301
302		err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
303				   NULL, NULL, in_mad, out_mad);
304		if (err)
305			goto out;
306
307		/* Checking LinkSpeedActive for FDR-10 */
308		if (out_mad->data[15] & 0x1)
309			props->active_speed = IB_SPEED_FDR10;
310	}
311
312	/* Avoid wrong speed value returned by FW if the IB link is down. */
313	if (props->state == IB_PORT_DOWN)
314		 props->active_speed = IB_SPEED_SDR;
315
316out:
317	kfree(in_mad);
318	kfree(out_mad);
319	return err;
320}
321
322static u8 state_to_phys_state(enum ib_port_state state)
323{
324	return state == IB_PORT_ACTIVE ? 5 : 3;
325}
326
327static int eth_link_query_port(struct ib_device *ibdev, u8 port,
328			       struct ib_port_attr *props, int netw_view)
329{
330
331	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
332	struct mlx4_ib_iboe *iboe = &mdev->iboe;
333	struct net_device *ndev;
334	enum ib_mtu tmp;
335	struct mlx4_cmd_mailbox *mailbox;
336	int err = 0;
337
338	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
339	if (IS_ERR(mailbox))
340		return PTR_ERR(mailbox);
341
342	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
343			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
344			   MLX4_CMD_WRAPPED);
345	if (err)
346		goto out;
347
348	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
349						IB_WIDTH_4X : IB_WIDTH_1X;
350	props->active_speed	= IB_SPEED_QDR;
351	props->port_cap_flags	= IB_PORT_CM_SUP;
352	if (netw_view)
353		props->gid_tbl_len = MLX4_ROCE_MAX_GIDS;
354	else
355		props->gid_tbl_len   = mdev->dev->caps.gid_table_len[port];
356
357	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
358	props->pkey_tbl_len	= 1;
359	props->max_mtu		= IB_MTU_4096;
360	props->max_vl_num	= 2;
361	props->state		= IB_PORT_DOWN;
362	props->phys_state	= state_to_phys_state(props->state);
363	props->active_mtu	= IB_MTU_256;
364	spin_lock(&iboe->lock);
365	ndev = iboe->netdevs[port - 1];
366	if (!ndev)
367		goto out_unlock;
368
369	tmp = iboe_get_mtu(ndev->if_mtu);
370	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
371
372	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
373					IB_PORT_ACTIVE : IB_PORT_DOWN;
374	props->phys_state	= state_to_phys_state(props->state);
375out_unlock:
376	spin_unlock(&iboe->lock);
377out:
378	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
379	return err;
380}
381
382int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
383			 struct ib_port_attr *props, int netw_view)
384{
385	int err;
386
387	memset(props, 0, sizeof *props);
388
389	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
390		ib_link_query_port(ibdev, port, props, netw_view) :
391				eth_link_query_port(ibdev, port, props, netw_view);
392
393	return err;
394}
395
396static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
397			      struct ib_port_attr *props)
398{
399	/* returns host view */
400	return __mlx4_ib_query_port(ibdev, port, props, 0);
401}
402
403int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
404			union ib_gid *gid, int netw_view)
405{
406	struct ib_smp *in_mad  = NULL;
407	struct ib_smp *out_mad = NULL;
408	int err = -ENOMEM;
409	struct mlx4_ib_dev *dev = to_mdev(ibdev);
410	int clear = 0;
411	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
412
413	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
414	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
415	if (!in_mad || !out_mad)
416		goto out;
417
418	init_query_mad(in_mad);
419	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
420	in_mad->attr_mod = cpu_to_be32(port);
421
422	if (mlx4_is_mfunc(dev->dev) && netw_view)
423		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
424
425	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
426	if (err)
427		goto out;
428
429	memcpy(gid->raw, out_mad->data + 8, 8);
430
431	if (mlx4_is_mfunc(dev->dev) && !netw_view) {
432		if (index) {
433			/* For any index > 0, return the null guid */
434			err = 0;
435			clear = 1;
436			goto out;
437		}
438	}
439
440	init_query_mad(in_mad);
441	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
442	in_mad->attr_mod = cpu_to_be32(index / 8);
443
444	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
445			   NULL, NULL, in_mad, out_mad);
446	if (err)
447		goto out;
448
449	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
450
451out:
452	if (clear)
453		memset(gid->raw + 8, 0, 8);
454	kfree(in_mad);
455	kfree(out_mad);
456	return err;
457}
458
459static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
460			  union ib_gid *gid)
461{
462	struct mlx4_ib_dev *dev = to_mdev(ibdev);
463
464	*gid = dev->iboe.gid_table[port - 1][index];
465
466	return 0;
467}
468
469static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
470			     union ib_gid *gid)
471{
472	if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
473		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
474	else
475		return iboe_query_gid(ibdev, port, index, gid);
476}
477
478int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
479			 u16 *pkey, int netw_view)
480{
481	struct ib_smp *in_mad  = NULL;
482	struct ib_smp *out_mad = NULL;
483	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
484	int err = -ENOMEM;
485
486	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
487	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
488	if (!in_mad || !out_mad)
489		goto out;
490
491	init_query_mad(in_mad);
492	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
493	in_mad->attr_mod = cpu_to_be32(index / 32);
494
495	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
496		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
497
498	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
499			   in_mad, out_mad);
500	if (err)
501		goto out;
502
503	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
504
505out:
506	kfree(in_mad);
507	kfree(out_mad);
508	return err;
509}
510
511static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
512{
513	return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
514}
515
516static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
517				 struct ib_device_modify *props)
518{
519	struct mlx4_cmd_mailbox *mailbox;
520	unsigned long flags;
521
522	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
523		return -EOPNOTSUPP;
524
525	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
526		return 0;
527
528	if (mlx4_is_slave(to_mdev(ibdev)->dev))
529		return -EOPNOTSUPP;
530
531	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
532	memcpy(ibdev->node_desc, props->node_desc, 64);
533	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
534
535	/*
536	 * If possible, pass node desc to FW, so it can generate
537	 * a 144 trap.  If cmd fails, just ignore.
538	 */
539	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
540	if (IS_ERR(mailbox))
541		return 0;
542
543	memset(mailbox->buf, 0, 256);
544	memcpy(mailbox->buf, props->node_desc, 64);
545	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
546		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
547
548	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
549
550	return 0;
551}
552
553static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
554			 u32 cap_mask)
555{
556	struct mlx4_cmd_mailbox *mailbox;
557	int err;
558	u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
559
560	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
561	if (IS_ERR(mailbox))
562		return PTR_ERR(mailbox);
563
564	memset(mailbox->buf, 0, 256);
565
566	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
567		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
568		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
569	} else {
570		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
571		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
572	}
573
574	err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
575		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
576
577	mlx4_free_cmd_mailbox(dev->dev, mailbox);
578	return err;
579}
580
581static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
582			       struct ib_port_modify *props)
583{
584	struct ib_port_attr attr;
585	u32 cap_mask;
586	int err;
587
588	mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
589
590	err = mlx4_ib_query_port(ibdev, port, &attr);
591	if (err)
592		goto out;
593
594	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
595		~props->clr_port_cap_mask;
596
597	err = mlx4_SET_PORT(to_mdev(ibdev), port,
598			    !!(mask & IB_PORT_RESET_QKEY_CNTR),
599			    cap_mask);
600
601out:
602	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
603	return err;
604}
605
606static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
607						  struct ib_udata *udata)
608{
609	struct mlx4_ib_dev *dev = to_mdev(ibdev);
610	struct mlx4_ib_ucontext *context;
611	struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
612	struct mlx4_ib_alloc_ucontext_resp resp;
613	int err;
614
615	if (!dev->ib_active)
616		return ERR_PTR(-EAGAIN);
617
618	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
619		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
620		if (mlx4_wc_enabled()) {
621			resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
622			resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
623		} else {
624			resp_v3.bf_reg_size      = 0;
625			resp_v3.bf_regs_per_page = 0;
626		}
627	} else {
628		resp.dev_caps	      = dev->dev->caps.userspace_caps;
629		resp.qp_tab_size      = dev->dev->caps.num_qps;
630		if (mlx4_wc_enabled()) {
631			resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
632			resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
633		} else {
634			resp.bf_reg_size      = 0;
635			resp.bf_regs_per_page = 0;
636		}
637		resp.cqe_size	      = dev->dev->caps.cqe_size;
638	}
639
640	context = kmalloc(sizeof *context, GFP_KERNEL);
641	if (!context)
642		return ERR_PTR(-ENOMEM);
643
644	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
645	if (err) {
646		kfree(context);
647		return ERR_PTR(err);
648	}
649
650	INIT_LIST_HEAD(&context->db_page_list);
651	mutex_init(&context->db_page_mutex);
652
653	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
654		err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
655	else
656		err = ib_copy_to_udata(udata, &resp, sizeof(resp));
657
658	if (err) {
659		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
660		kfree(context);
661		return ERR_PTR(-EFAULT);
662	}
663
664	return &context->ibucontext;
665}
666
667static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
668{
669	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
670
671	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
672	kfree(context);
673
674	return 0;
675}
676#ifdef __linux__
677static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
678			unsigned long addr,
679			unsigned long len, unsigned long pgoff,
680			unsigned long flags)
681{
682	struct mm_struct *mm;
683	struct vm_area_struct *vma;
684	unsigned long start_addr;
685	unsigned long page_size_order;
686	unsigned long  command;
687
688	mm = current->mm;
689	if (addr)
690		return current->mm->get_unmapped_area(file, addr, len,
691						pgoff, flags);
692
693	/* Last 8 bits hold the  command others are data per that command */
694	command = pgoff & MLX4_IB_MMAP_CMD_MASK;
695	if (command != MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES)
696		return current->mm->get_unmapped_area(file, addr, len,
697						pgoff, flags);
698
699	page_size_order = pgoff >> MLX4_IB_MMAP_CMD_BITS;
700	/* code is based on the huge-pages get_unmapped_area code */
701	start_addr = mm->free_area_cache;
702
703	if (len <= mm->cached_hole_size)
704		start_addr = TASK_UNMAPPED_BASE;
705
706
707full_search:
708	addr = ALIGN(start_addr, 1 << page_size_order);
709
710	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
711		/* At this point:  (!vma || addr < vma->vm_end). */
712		if (TASK_SIZE - len < addr) {
713			/*
714			 * Start a new search - just in case we missed
715			 * some holes.
716			 */
717			if (start_addr != TASK_UNMAPPED_BASE) {
718				start_addr = TASK_UNMAPPED_BASE;
719				goto full_search;
720			}
721			return -ENOMEM;
722		}
723
724		if (!vma || addr + len <= vma->vm_start)
725			return addr;
726		addr = ALIGN(vma->vm_end, 1 << page_size_order);
727	}
728}
729
730static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
731{
732	struct mlx4_ib_dev *dev = to_mdev(context->device);
733	int err;
734
735	/* Last 8 bits hold the  command others are data per that command */
736	unsigned long  command = vma->vm_pgoff & MLX4_IB_MMAP_CMD_MASK;
737
738	if (command < MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
739		/* compatability handling for commands 0 & 1*/
740		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
741			return -EINVAL;
742	}
743	if (command == MLX4_IB_MMAP_UAR_PAGE) {
744		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
745
746		if (io_remap_pfn_range(vma, vma->vm_start,
747				       to_mucontext(context)->uar.pfn,
748				       PAGE_SIZE, vma->vm_page_prot))
749			return -EAGAIN;
750	} else if (command == MLX4_IB_MMAP_BLUE_FLAME_PAGE &&
751			dev->dev->caps.bf_reg_size != 0) {
752		vma->vm_page_prot = pgprot_wc(vma->vm_page_prot);
753
754		if (io_remap_pfn_range(vma, vma->vm_start,
755				       to_mucontext(context)->uar.pfn +
756				       dev->dev->caps.num_uars,
757				       PAGE_SIZE, vma->vm_page_prot))
758			return -EAGAIN;
759	} else if (command == MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
760		/* Getting contiguous physical pages */
761		unsigned long total_size = vma->vm_end - vma->vm_start;
762		unsigned long page_size_order = (vma->vm_pgoff) >>
763						MLX4_IB_MMAP_CMD_BITS;
764		struct ib_cmem *ib_cmem;
765		ib_cmem = ib_cmem_alloc_contiguous_pages(context, total_size,
766							page_size_order);
767		if (IS_ERR(ib_cmem)) {
768			err = PTR_ERR(ib_cmem);
769			return err;
770		}
771
772		err = ib_cmem_map_contiguous_pages_to_vma(ib_cmem, vma);
773		if (err) {
774			ib_cmem_release_contiguous_pages(ib_cmem);
775			return err;
776		}
777		return 0;
778	} else
779		return -EINVAL;
780
781	return 0;
782}
783#endif
784
785static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
786				      struct ib_ucontext *context,
787				      struct ib_udata *udata)
788{
789	struct mlx4_ib_pd *pd;
790	int err;
791
792	pd = kmalloc(sizeof *pd, GFP_KERNEL);
793	if (!pd)
794		return ERR_PTR(-ENOMEM);
795
796	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
797	if (err) {
798		kfree(pd);
799		return ERR_PTR(err);
800	}
801
802	if (context)
803		if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
804			mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
805			kfree(pd);
806			return ERR_PTR(-EFAULT);
807		}
808
809	return &pd->ibpd;
810}
811
812static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
813{
814	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
815	kfree(pd);
816
817	return 0;
818}
819
820static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
821					  struct ib_ucontext *context,
822					  struct ib_udata *udata)
823{
824	struct mlx4_ib_xrcd *xrcd;
825	int err;
826
827	if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
828		return ERR_PTR(-ENOSYS);
829
830	xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
831	if (!xrcd)
832		return ERR_PTR(-ENOMEM);
833
834	err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
835	if (err)
836		goto err1;
837
838	xrcd->pd = ib_alloc_pd(ibdev);
839	if (IS_ERR(xrcd->pd)) {
840		err = PTR_ERR(xrcd->pd);
841		goto err2;
842	}
843
844	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
845	if (IS_ERR(xrcd->cq)) {
846		err = PTR_ERR(xrcd->cq);
847		goto err3;
848	}
849
850	return &xrcd->ibxrcd;
851
852err3:
853	ib_dealloc_pd(xrcd->pd);
854err2:
855	mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
856err1:
857	kfree(xrcd);
858	return ERR_PTR(err);
859}
860
861static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
862{
863	ib_destroy_cq(to_mxrcd(xrcd)->cq);
864	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
865	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
866	kfree(xrcd);
867
868	return 0;
869}
870
871static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
872{
873	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
874	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
875	struct mlx4_ib_gid_entry *ge;
876
877	ge = kzalloc(sizeof *ge, GFP_KERNEL);
878	if (!ge)
879		return -ENOMEM;
880
881	ge->gid = *gid;
882	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
883		ge->port = mqp->port;
884		ge->added = 1;
885	}
886
887	mutex_lock(&mqp->mutex);
888	list_add_tail(&ge->list, &mqp->gid_list);
889	mutex_unlock(&mqp->mutex);
890
891	return 0;
892}
893
894int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
895		   union ib_gid *gid)
896{
897	u8 mac[6];
898	struct net_device *ndev;
899	int ret = 0;
900
901	if (!mqp->port)
902		return 0;
903
904	spin_lock(&mdev->iboe.lock);
905	ndev = mdev->iboe.netdevs[mqp->port - 1];
906	if (ndev)
907		dev_hold(ndev);
908	spin_unlock(&mdev->iboe.lock);
909
910	if (ndev) {
911		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
912		rtnl_lock();
913		dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0);
914		ret = 1;
915		rtnl_unlock();
916		dev_put(ndev);
917	}
918
919	return ret;
920}
921
922struct mlx4_ib_steering {
923	struct list_head list;
924	u64 reg_id;
925	union ib_gid gid;
926};
927
928static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
929{
930	int err;
931	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
932	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
933	u64 reg_id;
934	struct mlx4_ib_steering *ib_steering = NULL;
935
936	if (mdev->dev->caps.steering_mode ==
937	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
938		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
939		if (!ib_steering)
940			return -ENOMEM;
941	}
942
943	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
944				    !!(mqp->flags &
945				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
946				    MLX4_PROT_IB_IPV6, &reg_id);
947	if (err)
948		goto err_malloc;
949
950	err = add_gid_entry(ibqp, gid);
951	if (err)
952		goto err_add;
953
954	if (ib_steering) {
955		memcpy(ib_steering->gid.raw, gid->raw, 16);
956		ib_steering->reg_id = reg_id;
957		mutex_lock(&mqp->mutex);
958		list_add(&ib_steering->list, &mqp->steering_rules);
959		mutex_unlock(&mqp->mutex);
960	}
961	return 0;
962
963err_add:
964	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
965			      MLX4_PROT_IB_IPV6, reg_id);
966err_malloc:
967	kfree(ib_steering);
968
969	return err;
970}
971
972enum {
973	IBV_FLOW_L4_NONE = 0,
974	IBV_FLOW_L4_OTHER = 3,
975	IBV_FLOW_L4_UDP = 5,
976	IBV_FLOW_L4_TCP = 6
977};
978
979struct mlx4_cm_steering {
980	struct list_head list;
981	u64 reg_id;
982	struct ib_flow_spec spec;
983};
984
985static int flow_spec_to_net_rule(struct ib_device *dev, struct ib_flow_spec *flow_spec,
986				  struct list_head *rule_list_h)
987{
988	struct mlx4_spec_list *spec_l2, *spec_l3, *spec_l4;
989	u64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
990
991	spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
992	if (!spec_l2)
993		return -ENOMEM;
994
995	switch (flow_spec->type) {
996	case IB_FLOW_ETH:
997		spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
998		memcpy(spec_l2->eth.dst_mac, flow_spec->l2_id.eth.mac, ETH_ALEN);
999		memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
1000		spec_l2->eth.ether_type = flow_spec->l2_id.eth.ethertype;
1001		if (flow_spec->l2_id.eth.vlan_present) {
1002			spec_l2->eth.vlan_id = flow_spec->l2_id.eth.vlan;
1003			spec_l2->eth.vlan_id_msk = cpu_to_be16(0x0fff);
1004		}
1005		break;
1006	case IB_FLOW_IB_UC:
1007		spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1008		if(flow_spec->l2_id.ib_uc.qpn) {
1009			spec_l2->ib.r_u_qpn = cpu_to_be32(flow_spec->l2_id.ib_uc.qpn);
1010			spec_l2->ib.qpn_msk = cpu_to_be32(0xffffff);
1011                    }
1012		break;
1013	case IB_FLOW_IB_MC_IPV4:
1014	case IB_FLOW_IB_MC_IPV6:
1015		spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1016		memcpy(spec_l2->ib.dst_gid, flow_spec->l2_id.ib_mc.mgid, 16);
1017		memset(spec_l2->ib.dst_gid_msk, 0xff, 16);
1018		break;
1019	}
1020
1021
1022	list_add_tail(&spec_l2->list, rule_list_h);
1023
1024	if (flow_spec->l2_id.eth.ethertype == cpu_to_be16(ETH_P_IP) ||
1025	    flow_spec->type != IB_FLOW_ETH) {
1026		spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
1027		if (!spec_l3)
1028			return -ENOMEM;
1029
1030		spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1031		spec_l3->ipv4.src_ip = flow_spec->src_ip;
1032		if (flow_spec->type != IB_FLOW_IB_MC_IPV4 &&
1033		    flow_spec->type != IB_FLOW_IB_MC_IPV6)
1034			spec_l3->ipv4.dst_ip = flow_spec->dst_ip;
1035
1036		if (spec_l3->ipv4.src_ip)
1037			spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1038		if (spec_l3->ipv4.dst_ip)
1039			spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1040
1041		list_add_tail(&spec_l3->list, rule_list_h);
1042	}
1043
1044	if (flow_spec->l4_protocol) {
1045		spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
1046		if (!spec_l4)
1047			return -ENOMEM;
1048
1049		spec_l4->tcp_udp.src_port = flow_spec->src_port;
1050		spec_l4->tcp_udp.dst_port = flow_spec->dst_port;
1051		if (spec_l4->tcp_udp.src_port)
1052			spec_l4->tcp_udp.src_port_msk =
1053						MLX4_BE_SHORT_MASK;
1054		if (spec_l4->tcp_udp.dst_port)
1055			spec_l4->tcp_udp.dst_port_msk =
1056						MLX4_BE_SHORT_MASK;
1057
1058		switch (flow_spec->l4_protocol) {
1059		case IBV_FLOW_L4_UDP:
1060			spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
1061			break;
1062		case IBV_FLOW_L4_TCP:
1063			spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
1064			break;
1065		default:
1066			dev_err(dev->dma_device,
1067				"Unsupported l4 protocol.\n");
1068			kfree(spec_l4);
1069			return -EPROTONOSUPPORT;
1070		}
1071		list_add_tail(&spec_l4->list, rule_list_h);
1072	}
1073	return 0;
1074}
1075
1076static int __mlx4_ib_flow_attach(struct mlx4_ib_dev *mdev,
1077				 struct mlx4_ib_qp *mqp,
1078				 struct ib_flow_spec *flow_spec,
1079				 int priority, int lock_qp)
1080{
1081	u64 reg_id = 0;
1082	int err = 0;
1083	struct mlx4_cm_steering *cm_flow;
1084	struct mlx4_spec_list *spec, *tmp_spec;
1085
1086	struct mlx4_net_trans_rule rule =
1087	{	.queue_mode = MLX4_NET_TRANS_Q_FIFO,
1088		.exclusive = 0,
1089	};
1090
1091	rule.promisc_mode = flow_spec->rule_type;
1092	rule.port = mqp->port;
1093	rule.qpn = mqp->mqp.qpn;
1094	INIT_LIST_HEAD(&rule.list);
1095
1096	cm_flow = kmalloc(sizeof(*cm_flow), GFP_KERNEL);
1097	if (!cm_flow)
1098		return -ENOMEM;
1099
1100	if (rule.promisc_mode == MLX4_FS_REGULAR) {
1101		rule.allow_loopback = !flow_spec->block_mc_loopback;
1102		rule.priority = MLX4_DOMAIN_UVERBS | priority;
1103		err = flow_spec_to_net_rule(&mdev->ib_dev, flow_spec,
1104					    &rule.list);
1105		if (err)
1106			goto free_list;
1107	}
1108
1109	err = mlx4_flow_attach(mdev->dev, &rule, &reg_id);
1110	if (err)
1111		goto free_list;
1112
1113	memcpy(&cm_flow->spec, flow_spec, sizeof(*flow_spec));
1114	cm_flow->reg_id = reg_id;
1115
1116	if (lock_qp)
1117		mutex_lock(&mqp->mutex);
1118	list_add(&cm_flow->list, &mqp->rules_list);
1119	if (lock_qp)
1120                mutex_unlock(&mqp->mutex);
1121
1122free_list:
1123	list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
1124		list_del(&spec->list);
1125		kfree(spec);
1126	}
1127	if (err) {
1128		kfree(cm_flow);
1129		dev_err(mdev->ib_dev.dma_device,
1130			"Fail to attach flow steering rule\n");
1131	}
1132	return err;
1133}
1134
1135static int __mlx4_ib_flow_detach(struct mlx4_ib_dev *mdev,
1136				 struct mlx4_ib_qp *mqp,
1137				 struct ib_flow_spec *spec, int priority,
1138				 int lock_qp)
1139{
1140	struct mlx4_cm_steering *cm_flow;
1141	int ret;
1142
1143	if (lock_qp)
1144		mutex_lock(&mqp->mutex);
1145	list_for_each_entry(cm_flow, &mqp->rules_list, list) {
1146		if (!memcmp(&cm_flow->spec, spec, sizeof(*spec))) {
1147			list_del(&cm_flow->list);
1148			break;
1149		}
1150	}
1151	if (lock_qp)
1152		mutex_unlock(&mqp->mutex);
1153
1154	if (&cm_flow->list == &mqp->rules_list) {
1155		dev_err(mdev->ib_dev.dma_device, "Couldn't find reg_id for flow spec. "
1156			"Steering rule is left attached\n");
1157		return -EINVAL;
1158	}
1159
1160	ret = mlx4_flow_detach(mdev->dev, cm_flow->reg_id);
1161
1162	kfree(cm_flow);
1163	return ret;
1164}
1165
1166static int mlx4_ib_flow_attach(struct ib_qp *qp, struct ib_flow_spec *flow_spec,
1167			       int priority)
1168{
1169	return __mlx4_ib_flow_attach(to_mdev(qp->device), to_mqp(qp),
1170				     flow_spec, priority, 1);
1171}
1172
1173static int mlx4_ib_flow_detach(struct ib_qp *qp, struct ib_flow_spec *spec,
1174			       int priority)
1175{
1176	return __mlx4_ib_flow_detach(to_mdev(qp->device), to_mqp(qp),
1177				     spec, priority, 1);
1178}
1179
1180static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1181{
1182	struct mlx4_ib_gid_entry *ge;
1183	struct mlx4_ib_gid_entry *tmp;
1184	struct mlx4_ib_gid_entry *ret = NULL;
1185
1186	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1187		if (!memcmp(raw, ge->gid.raw, 16)) {
1188			ret = ge;
1189			break;
1190		}
1191	}
1192
1193	return ret;
1194}
1195
1196static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1197{
1198	int err;
1199	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1200	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1201	u8 mac[6];
1202	struct net_device *ndev;
1203	struct mlx4_ib_gid_entry *ge;
1204	u64 reg_id = 0;
1205
1206	if (mdev->dev->caps.steering_mode ==
1207	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1208		struct mlx4_ib_steering *ib_steering;
1209
1210		mutex_lock(&mqp->mutex);
1211		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1212			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1213				list_del(&ib_steering->list);
1214				break;
1215			}
1216		}
1217		mutex_unlock(&mqp->mutex);
1218		if (&ib_steering->list == &mqp->steering_rules) {
1219			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1220			return -EINVAL;
1221		}
1222		reg_id = ib_steering->reg_id;
1223		kfree(ib_steering);
1224	}
1225
1226	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1227				    MLX4_PROT_IB_IPV6, reg_id);
1228	if (err)
1229		return err;
1230
1231	mutex_lock(&mqp->mutex);
1232	ge = find_gid_entry(mqp, gid->raw);
1233	if (ge) {
1234		spin_lock(&mdev->iboe.lock);
1235		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1236		if (ndev)
1237			dev_hold(ndev);
1238		spin_unlock(&mdev->iboe.lock);
1239		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
1240		if (ndev) {
1241			rtnl_lock();
1242			dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0);
1243			rtnl_unlock();
1244			dev_put(ndev);
1245		}
1246		list_del(&ge->list);
1247		kfree(ge);
1248	} else
1249		pr_warn("could not find mgid entry\n");
1250
1251	mutex_unlock(&mqp->mutex);
1252
1253	return 0;
1254}
1255
1256static int init_node_data(struct mlx4_ib_dev *dev)
1257{
1258	struct ib_smp *in_mad  = NULL;
1259	struct ib_smp *out_mad = NULL;
1260	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1261	int err = -ENOMEM;
1262
1263	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1264	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1265	if (!in_mad || !out_mad)
1266		goto out;
1267
1268	init_query_mad(in_mad);
1269	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1270	if (mlx4_is_master(dev->dev))
1271		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1272
1273	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1274	if (err)
1275		goto out;
1276
1277	memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1278
1279	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1280
1281	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1282	if (err)
1283		goto out;
1284
1285	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1286	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1287
1288out:
1289	kfree(in_mad);
1290	kfree(out_mad);
1291	return err;
1292}
1293
1294static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1295			char *buf)
1296{
1297	struct mlx4_ib_dev *dev =
1298		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1299	return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1300}
1301
1302static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1303			   char *buf)
1304{
1305	struct mlx4_ib_dev *dev =
1306		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1307	return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1308		       (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1309		       (int) dev->dev->caps.fw_ver & 0xffff);
1310}
1311
1312static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1313			char *buf)
1314{
1315	struct mlx4_ib_dev *dev =
1316		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1317	return sprintf(buf, "%x\n", dev->dev->rev_id);
1318}
1319
1320static ssize_t show_board(struct device *device, struct device_attribute *attr,
1321			  char *buf)
1322{
1323	struct mlx4_ib_dev *dev =
1324		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1325	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1326		       dev->dev->board_id);
1327}
1328
1329static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1330static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1331static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1332static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1333
1334static struct device_attribute *mlx4_class_attributes[] = {
1335	&dev_attr_hw_rev,
1336	&dev_attr_fw_ver,
1337	&dev_attr_hca_type,
1338	&dev_attr_board_id
1339};
1340
1341static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
1342{
1343#ifdef __linux__
1344	memcpy(eui, dev->dev_addr, 3);
1345	memcpy(eui + 5, dev->dev_addr + 3, 3);
1346#else
1347        memcpy(eui, IF_LLADDR(dev), 3);
1348        memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
1349#endif
1350	if (vlan_id < 0x1000) {
1351		eui[3] = vlan_id >> 8;
1352		eui[4] = vlan_id & 0xff;
1353	} else {
1354		eui[3] = 0xff;
1355		eui[4] = 0xfe;
1356	}
1357	eui[0] ^= 2;
1358}
1359
1360static void update_gids_task(struct work_struct *work)
1361{
1362	struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1363	struct mlx4_cmd_mailbox *mailbox;
1364	union ib_gid *gids;
1365	int err;
1366	struct mlx4_dev	*dev = gw->dev->dev;
1367
1368	mailbox = mlx4_alloc_cmd_mailbox(dev);
1369	if (IS_ERR(mailbox)) {
1370		pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1371		return;
1372	}
1373
1374	gids = mailbox->buf;
1375	memcpy(gids, gw->gids, sizeof gw->gids);
1376
1377	err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1378		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1379		       MLX4_CMD_WRAPPED);
1380	if (err)
1381		pr_warn("set port command failed\n");
1382	else {
1383		memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
1384		mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1385	}
1386
1387	mlx4_free_cmd_mailbox(dev, mailbox);
1388	kfree(gw);
1389}
1390
1391static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
1392{
1393	struct net_device *ndev = dev->iboe.netdevs[port - 1];
1394	struct update_gid_work *work;
1395	struct net_device *tmp;
1396	int i;
1397	u8 *hits;
1398	union ib_gid gid;
1399	int index_free;
1400	int found;
1401	int need_update = 0;
1402	int max_gids;
1403	u16 vid;
1404
1405	work = kzalloc(sizeof *work, GFP_ATOMIC);
1406	if (!work)
1407		return -ENOMEM;
1408
1409	hits = kzalloc(128, GFP_ATOMIC);
1410	if (!hits) {
1411		kfree(work);
1412		return -ENOMEM;
1413	}
1414
1415	max_gids = dev->dev->caps.gid_table_len[port];
1416
1417#ifdef __linux__
1418	rcu_read_lock();
1419	for_each_netdev_rcu(&init_net, tmp) {
1420#else
1421        IFNET_RLOCK();
1422        TAILQ_FOREACH(tmp, &V_ifnet, if_link) {
1423#endif
1424		if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
1425			gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1426			vid = rdma_vlan_dev_vlan_id(tmp);
1427			mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1428			found = 0;
1429			index_free = -1;
1430			for (i = 0; i < max_gids; ++i) {
1431				if (index_free < 0 &&
1432				    !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1433					index_free = i;
1434				if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1435					hits[i] = 1;
1436					found = 1;
1437					break;
1438				}
1439			}
1440
1441			if (!found) {
1442				if (tmp == ndev &&
1443				    (memcmp(&dev->iboe.gid_table[port - 1][0],
1444					    &gid, sizeof gid) ||
1445				     !memcmp(&dev->iboe.gid_table[port - 1][0],
1446					     &zgid, sizeof gid))) {
1447					dev->iboe.gid_table[port - 1][0] = gid;
1448					++need_update;
1449					hits[0] = 1;
1450				} else if (index_free >= 0) {
1451					dev->iboe.gid_table[port - 1][index_free] = gid;
1452					hits[index_free] = 1;
1453					++need_update;
1454				}
1455			}
1456		}
1457#ifdef __linux__
1458        }
1459	rcu_read_unlock();
1460#else
1461        }
1462        IFNET_RUNLOCK();
1463#endif
1464
1465	for (i = 0; i < max_gids; ++i)
1466		if (!hits[i]) {
1467			if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1468				++need_update;
1469			dev->iboe.gid_table[port - 1][i] = zgid;
1470		}
1471
1472	if (need_update) {
1473		memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
1474		INIT_WORK(&work->work, update_gids_task);
1475		work->port = port;
1476		work->dev = dev;
1477		queue_work(wq, &work->work);
1478	} else
1479		kfree(work);
1480
1481	kfree(hits);
1482	return 0;
1483}
1484
1485static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
1486{
1487	switch (event) {
1488	case NETDEV_UP:
1489#ifdef __linux__
1490	case NETDEV_CHANGEADDR:
1491#endif
1492		update_ipv6_gids(dev, port, 0);
1493		break;
1494
1495	case NETDEV_DOWN:
1496		update_ipv6_gids(dev, port, 1);
1497		dev->iboe.netdevs[port - 1] = NULL;
1498	}
1499}
1500
1501static void netdev_added(struct mlx4_ib_dev *dev, int port)
1502{
1503	update_ipv6_gids(dev, port, 0);
1504}
1505
1506static void netdev_removed(struct mlx4_ib_dev *dev, int port)
1507{
1508	update_ipv6_gids(dev, port, 1);
1509}
1510
1511static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
1512				void *ptr)
1513{
1514	struct net_device *dev = ptr;
1515	struct mlx4_ib_dev *ibdev;
1516	struct net_device *oldnd;
1517	struct mlx4_ib_iboe *iboe;
1518	int port;
1519
1520#ifdef __linux__
1521	if (!net_eq(dev_net(dev), &init_net))
1522		return NOTIFY_DONE;
1523#endif
1524
1525	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1526	iboe = &ibdev->iboe;
1527
1528	spin_lock(&iboe->lock);
1529	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1530		oldnd = iboe->netdevs[port - 1];
1531		iboe->netdevs[port - 1] =
1532			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1533		if (oldnd != iboe->netdevs[port - 1]) {
1534			if (iboe->netdevs[port - 1])
1535				netdev_added(ibdev, port);
1536			else
1537				netdev_removed(ibdev, port);
1538		}
1539	}
1540
1541	if (dev == iboe->netdevs[0] ||
1542	    (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
1543		handle_en_event(ibdev, 1, event);
1544	else if (dev == iboe->netdevs[1]
1545		 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
1546		handle_en_event(ibdev, 2, event);
1547
1548	spin_unlock(&iboe->lock);
1549
1550	return NOTIFY_DONE;
1551}
1552
1553static void init_pkeys(struct mlx4_ib_dev *ibdev)
1554{
1555	int port;
1556	int slave;
1557	int i;
1558
1559	if (mlx4_is_master(ibdev->dev)) {
1560		for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1561			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1562				for (i = 0;
1563				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1564				     ++i) {
1565					ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1566					/* master has the identity virt2phys pkey mapping */
1567						(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1568							ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1569					mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1570							     ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1571				}
1572			}
1573		}
1574		/* initialize pkey cache */
1575		for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1576			for (i = 0;
1577			     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1578			     ++i)
1579				ibdev->pkeys.phys_pkey_cache[port-1][i] =
1580					(i) ? 0 : 0xFFFF;
1581		}
1582	}
1583}
1584
1585static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1586{
1587	char name[32];
1588	int eq_per_port = 0;
1589	int added_eqs = 0;
1590	int total_eqs = 0;
1591	int i, j, eq;
1592
1593	/* Legacy mode or comp_pool is not large enough */
1594	if (dev->caps.comp_pool == 0 ||
1595	    dev->caps.num_ports > dev->caps.comp_pool)
1596		return;
1597
1598	eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1599					dev->caps.num_ports);
1600
1601	/* Init eq table */
1602	added_eqs = 0;
1603	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1604		added_eqs += eq_per_port;
1605
1606	total_eqs = dev->caps.num_comp_vectors + added_eqs;
1607
1608	ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1609	if (!ibdev->eq_table)
1610		return;
1611
1612	ibdev->eq_added = added_eqs;
1613
1614	eq = 0;
1615	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1616		for (j = 0; j < eq_per_port; j++) {
1617			//sprintf(name, "mlx4-ib-%d-%d@%s",
1618			//	i, j, dev->pdev->bus->conf.pd_name);
1619			/* Set IRQ for specific name (per ring) */
1620			if (mlx4_assign_eq(dev, name,
1621					   &ibdev->eq_table[eq])) {
1622				/* Use legacy (same as mlx4_en driver) */
1623				pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1624				ibdev->eq_table[eq] =
1625					(eq % dev->caps.num_comp_vectors);
1626			}
1627			eq++;
1628		}
1629	}
1630
1631	/* Fill the reset of the vector with legacy EQ */
1632	for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1633		ibdev->eq_table[eq++] = i;
1634
1635	/* Advertise the new number of EQs to clients */
1636	ibdev->ib_dev.num_comp_vectors = total_eqs;
1637}
1638
1639static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1640{
1641	int i;
1642
1643	/* no additional eqs were added */
1644	if (!ibdev->eq_table)
1645		return;
1646
1647	/* Reset the advertised EQ number */
1648	ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1649
1650	/* Free only the added eqs */
1651	for (i = 0; i < ibdev->eq_added; i++) {
1652		/* Don't free legacy eqs if used */
1653		if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1654			continue;
1655		mlx4_release_eq(dev, ibdev->eq_table[i]);
1656	}
1657
1658	kfree(ibdev->eq_table);
1659}
1660
1661/*
1662 * create show function and a device_attribute struct pointing to
1663 * the function for _name
1664 */
1665#define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod)		\
1666static ssize_t show_rprt_##_name(struct device *dev,		\
1667				 struct device_attribute *attr,	\
1668				 char *buf){			\
1669	return show_diag_rprt(dev, buf, _offset, _op_mod);	\
1670}								\
1671static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
1672
1673#define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
1674
1675static size_t show_diag_rprt(struct device *device, char *buf,
1676			     u32 offset, u8 op_modifier)
1677{
1678	size_t ret;
1679	u32 counter_offset = offset;
1680	u32 diag_counter = 0;
1681	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1682					       ib_dev.dev);
1683
1684	ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
1685				       &counter_offset, &diag_counter);
1686	if (ret)
1687		return ret;
1688
1689	return sprintf(buf, "%d\n", diag_counter);
1690}
1691
1692static ssize_t clear_diag_counters(struct device *device,
1693				   struct device_attribute *attr,
1694				   const char *buf, size_t length)
1695{
1696	size_t ret;
1697	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1698					       ib_dev.dev);
1699
1700	ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
1701				       NULL, NULL);
1702	if (ret)
1703		return ret;
1704
1705	return length;
1706}
1707
1708DEVICE_DIAG_RPRT_ATTR(rq_num_lle	, 0x00, 2);
1709DEVICE_DIAG_RPRT_ATTR(sq_num_lle	, 0x04, 2);
1710DEVICE_DIAG_RPRT_ATTR(rq_num_lqpoe	, 0x08, 2);
1711DEVICE_DIAG_RPRT_ATTR(sq_num_lqpoe 	, 0x0C, 2);
1712DEVICE_DIAG_RPRT_ATTR(rq_num_lpe	, 0x18, 2);
1713DEVICE_DIAG_RPRT_ATTR(sq_num_lpe	, 0x1C, 2);
1714DEVICE_DIAG_RPRT_ATTR(rq_num_wrfe	, 0x20, 2);
1715DEVICE_DIAG_RPRT_ATTR(sq_num_wrfe	, 0x24, 2);
1716DEVICE_DIAG_RPRT_ATTR(sq_num_mwbe	, 0x2C, 2);
1717DEVICE_DIAG_RPRT_ATTR(sq_num_bre	, 0x34, 2);
1718DEVICE_DIAG_RPRT_ATTR(rq_num_lae	, 0x38, 2);
1719DEVICE_DIAG_RPRT_ATTR(sq_num_rire	, 0x44, 2);
1720DEVICE_DIAG_RPRT_ATTR(rq_num_rire	, 0x48, 2);
1721DEVICE_DIAG_RPRT_ATTR(sq_num_rae	, 0x4C, 2);
1722DEVICE_DIAG_RPRT_ATTR(rq_num_rae	, 0x50, 2);
1723DEVICE_DIAG_RPRT_ATTR(sq_num_roe	, 0x54, 2);
1724DEVICE_DIAG_RPRT_ATTR(sq_num_tree	, 0x5C, 2);
1725DEVICE_DIAG_RPRT_ATTR(sq_num_rree	, 0x64, 2);
1726DEVICE_DIAG_RPRT_ATTR(rq_num_rnr	, 0x68, 2);
1727DEVICE_DIAG_RPRT_ATTR(sq_num_rnr	, 0x6C, 2);
1728DEVICE_DIAG_RPRT_ATTR(rq_num_oos	, 0x100, 2);
1729DEVICE_DIAG_RPRT_ATTR(sq_num_oos	, 0x104, 2);
1730DEVICE_DIAG_RPRT_ATTR(rq_num_mce	, 0x108, 2);
1731DEVICE_DIAG_RPRT_ATTR(rq_num_udsdprd	, 0x118, 2);
1732DEVICE_DIAG_RPRT_ATTR(rq_num_ucsdprd	, 0x120, 2);
1733DEVICE_DIAG_RPRT_ATTR(num_cqovf		, 0x1A0, 2);
1734DEVICE_DIAG_RPRT_ATTR(num_eqovf		, 0x1A4, 2);
1735DEVICE_DIAG_RPRT_ATTR(num_baddb		, 0x1A8, 2);
1736
1737static DEVICE_ATTR(clear_diag, S_IWUSR, NULL, clear_diag_counters);
1738
1739static struct attribute *diag_rprt_attrs[] = {
1740	&dev_attr_rq_num_lle.attr,
1741	&dev_attr_sq_num_lle.attr,
1742	&dev_attr_rq_num_lqpoe.attr,
1743	&dev_attr_sq_num_lqpoe.attr,
1744	&dev_attr_rq_num_lpe.attr,
1745	&dev_attr_sq_num_lpe.attr,
1746	&dev_attr_rq_num_wrfe.attr,
1747	&dev_attr_sq_num_wrfe.attr,
1748	&dev_attr_sq_num_mwbe.attr,
1749	&dev_attr_sq_num_bre.attr,
1750	&dev_attr_rq_num_lae.attr,
1751	&dev_attr_sq_num_rire.attr,
1752	&dev_attr_rq_num_rire.attr,
1753	&dev_attr_sq_num_rae.attr,
1754	&dev_attr_rq_num_rae.attr,
1755	&dev_attr_sq_num_roe.attr,
1756	&dev_attr_sq_num_tree.attr,
1757	&dev_attr_sq_num_rree.attr,
1758	&dev_attr_rq_num_rnr.attr,
1759	&dev_attr_sq_num_rnr.attr,
1760	&dev_attr_rq_num_oos.attr,
1761	&dev_attr_sq_num_oos.attr,
1762	&dev_attr_rq_num_mce.attr,
1763	&dev_attr_rq_num_udsdprd.attr,
1764	&dev_attr_rq_num_ucsdprd.attr,
1765	&dev_attr_num_cqovf.attr,
1766	&dev_attr_num_eqovf.attr,
1767	&dev_attr_num_baddb.attr,
1768	&dev_attr_clear_diag.attr,
1769	NULL
1770};
1771
1772static struct attribute_group diag_counters_group = {
1773	.name  = "diag_counters",
1774	.attrs  = diag_rprt_attrs
1775};
1776
1777#ifdef __linux__
1778static int mlx4_ib_proc_init(void)
1779{
1780	/* Creating procfs directories /proc/drivers/mlx4_ib/ &&
1781	      /proc/drivers/mlx4_ib/mrs for further use by the driver.
1782	*/
1783	int err;
1784
1785        mlx4_ib_driver_dir_entry = proc_mkdir(MLX4_IB_DRIVER_PROC_DIR_NAME,
1786				NULL);
1787	if (!mlx4_ib_driver_dir_entry) {
1788		pr_err("mlx4_ib_proc_init has failed for %s\n",
1789		       MLX4_IB_DRIVER_PROC_DIR_NAME);
1790		err = -ENODEV;
1791		goto error;
1792	}
1793
1794        mlx4_mrs_dir_entry = proc_mkdir(MLX4_IB_MRS_PROC_DIR_NAME,
1795					mlx4_ib_driver_dir_entry);
1796	if (!mlx4_mrs_dir_entry) {
1797		pr_err("mlx4_ib_proc_init has failed for %s\n",
1798		       MLX4_IB_MRS_PROC_DIR_NAME);
1799		err = -ENODEV;
1800		goto remove_entry;
1801	}
1802
1803	return 0;
1804
1805remove_entry:
1806	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME,
1807				NULL);
1808error:
1809	return err;
1810}
1811#endif
1812
1813static void init_dev_assign(void)
1814{
1815	int bus, slot, fn, ib_idx;
1816	char *p = dev_assign_str, *t;
1817	char curr_val[32] = {0};
1818	int ret;
1819	int j, i = 0;
1820
1821	memset(dr, 0, sizeof dr);
1822
1823	if (dev_assign_str[0] == 0)
1824		return;
1825
1826	while (strlen(p)) {
1827		ret = sscanf(p, "%02x:%02x.%x-%x", &bus, &slot, &fn, &ib_idx);
1828		if (ret != 4 || ib_idx < 0)
1829			goto err;
1830
1831		for (j = 0; j < i; j++)
1832			if (dr[j].nr == ib_idx)
1833				goto err;
1834
1835		dr[i].bus = bus;
1836		dr[i].dev = slot;
1837		dr[i].func = fn;
1838		dr[i].nr = ib_idx;
1839
1840		t = strchr(p, ',');
1841		sprintf(curr_val, "%02x:%02x.%x-%x", bus, slot, fn, ib_idx);
1842		if ((!t) && strlen(p) == strlen(curr_val))
1843			return;
1844
1845		if (!t || (t + 1) >= dev_assign_str + sizeof dev_assign_str)
1846			goto err;
1847
1848		++i;
1849		if (i >= MAX_DR)
1850			goto err;
1851
1852		p = t + 1;
1853	}
1854
1855	return;
1856err:
1857	memset(dr, 0, sizeof dr);
1858	printk(KERN_WARNING "mlx4_ib: The value of 'dev_assign_str' parameter "
1859			    "is incorrect. The parameter value is discarded!");
1860}
1861
1862static int mlx4_ib_dev_idx(struct mlx4_dev *dev)
1863{
1864	int /*bus,*/ slot, fn;
1865	int i;
1866
1867	if (!dev)
1868		return -1;
1869	else if (!dev->pdev)
1870		return -1;
1871	//else if (!dev->pdev->bus)
1872	//	return -1;
1873
1874	//bus	= dev->pdev->bus->conf.pc_sel.pc_bus;
1875	slot	= PCI_SLOT(dev->pdev->devfn);
1876	fn	= PCI_FUNC(dev->pdev->devfn);
1877
1878	for (i = 0; i < MAX_DR; ++i) {
1879		if (/*dr[i].bus == bus &&*/
1880		    dr[i].dev == slot &&
1881		    dr[i].func == fn) {
1882			return dr[i].nr;
1883		}
1884	}
1885
1886	return -1;
1887}
1888
1889static void *mlx4_ib_add(struct mlx4_dev *dev)
1890{
1891	struct mlx4_ib_dev *ibdev;
1892	int num_ports = 0;
1893	int i, j;
1894	int err;
1895	struct mlx4_ib_iboe *iboe;
1896	int dev_idx;
1897
1898	printk(KERN_INFO "%s", mlx4_ib_version);
1899
1900	mlx4_foreach_ib_transport_port(i, dev)
1901		num_ports++;
1902
1903	/* No point in registering a device with no ports... */
1904	if (num_ports == 0)
1905		return NULL;
1906
1907	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1908	if (!ibdev) {
1909		dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1910		return NULL;
1911	}
1912
1913	iboe = &ibdev->iboe;
1914
1915	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1916		goto err_dealloc;
1917
1918	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1919		goto err_pd;
1920
1921	ibdev->priv_uar.map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT,
1922		PAGE_SIZE);
1923
1924	if (!ibdev->priv_uar.map)
1925		goto err_uar;
1926
1927	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1928
1929	ibdev->dev = dev;
1930
1931	dev_idx = mlx4_ib_dev_idx(dev);
1932	if (dev_idx >= 0)
1933		sprintf(ibdev->ib_dev.name, "mlx4_%d", dev_idx);
1934	else
1935		strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1936
1937	ibdev->ib_dev.owner		= THIS_MODULE;
1938	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
1939	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
1940	ibdev->num_ports		= num_ports;
1941	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
1942	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
1943	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
1944
1945	if (dev->caps.userspace_caps)
1946		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
1947	else
1948		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
1949
1950	ibdev->ib_dev.uverbs_cmd_mask	=
1951		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
1952		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
1953		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
1954		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
1955		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
1956		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
1957		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
1958		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
1959		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
1960		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
1961		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
1962		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
1963		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
1964		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
1965		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
1966		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
1967		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
1968		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
1969		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
1970		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
1971		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
1972		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
1973		(1ull << IB_USER_VERBS_CMD_OPEN_QP)		|
1974		(1ull << IB_USER_VERBS_CMD_ATTACH_FLOW)		|
1975		(1ull << IB_USER_VERBS_CMD_DETACH_FLOW)		|
1976		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1977
1978	ibdev->ib_dev.query_device	= mlx4_ib_query_device;
1979	ibdev->ib_dev.query_port	= mlx4_ib_query_port;
1980	ibdev->ib_dev.get_link_layer	= mlx4_ib_port_link_layer;
1981	ibdev->ib_dev.query_gid		= mlx4_ib_query_gid;
1982	ibdev->ib_dev.query_pkey	= mlx4_ib_query_pkey;
1983	ibdev->ib_dev.modify_device	= mlx4_ib_modify_device;
1984	ibdev->ib_dev.modify_port	= mlx4_ib_modify_port;
1985	ibdev->ib_dev.alloc_ucontext	= mlx4_ib_alloc_ucontext;
1986	ibdev->ib_dev.dealloc_ucontext	= mlx4_ib_dealloc_ucontext;
1987#ifdef __linux__
1988	ibdev->ib_dev.mmap		= mlx4_ib_mmap;
1989	ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
1990#endif
1991	ibdev->ib_dev.alloc_pd		= mlx4_ib_alloc_pd;
1992	ibdev->ib_dev.dealloc_pd	= mlx4_ib_dealloc_pd;
1993	ibdev->ib_dev.create_ah		= mlx4_ib_create_ah;
1994	ibdev->ib_dev.query_ah		= mlx4_ib_query_ah;
1995	ibdev->ib_dev.destroy_ah	= mlx4_ib_destroy_ah;
1996	ibdev->ib_dev.create_srq	= mlx4_ib_create_srq;
1997	ibdev->ib_dev.modify_srq	= mlx4_ib_modify_srq;
1998	ibdev->ib_dev.query_srq		= mlx4_ib_query_srq;
1999	ibdev->ib_dev.destroy_srq	= mlx4_ib_destroy_srq;
2000	ibdev->ib_dev.post_srq_recv	= mlx4_ib_post_srq_recv;
2001	ibdev->ib_dev.create_qp		= mlx4_ib_create_qp;
2002	ibdev->ib_dev.modify_qp		= mlx4_ib_modify_qp;
2003	ibdev->ib_dev.query_qp		= mlx4_ib_query_qp;
2004	ibdev->ib_dev.destroy_qp	= mlx4_ib_destroy_qp;
2005	ibdev->ib_dev.post_send		= mlx4_ib_post_send;
2006	ibdev->ib_dev.post_recv		= mlx4_ib_post_recv;
2007	ibdev->ib_dev.create_cq		= mlx4_ib_create_cq;
2008	ibdev->ib_dev.modify_cq		= mlx4_ib_modify_cq;
2009	ibdev->ib_dev.resize_cq		= mlx4_ib_resize_cq;
2010	ibdev->ib_dev.destroy_cq	= mlx4_ib_destroy_cq;
2011	ibdev->ib_dev.poll_cq		= mlx4_ib_poll_cq;
2012	ibdev->ib_dev.req_notify_cq	= mlx4_ib_arm_cq;
2013	ibdev->ib_dev.get_dma_mr	= mlx4_ib_get_dma_mr;
2014	ibdev->ib_dev.reg_user_mr	= mlx4_ib_reg_user_mr;
2015	ibdev->ib_dev.dereg_mr		= mlx4_ib_dereg_mr;
2016	ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
2017	ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
2018	ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
2019	ibdev->ib_dev.attach_mcast	= mlx4_ib_mcg_attach;
2020	ibdev->ib_dev.detach_mcast	= mlx4_ib_mcg_detach;
2021	ibdev->ib_dev.attach_flow	= mlx4_ib_flow_attach;
2022	ibdev->ib_dev.detach_flow	= mlx4_ib_flow_detach;
2023	ibdev->ib_dev.process_mad	= mlx4_ib_process_mad;
2024
2025	if (!mlx4_is_slave(ibdev->dev)) {
2026		ibdev->ib_dev.alloc_fmr		= mlx4_ib_fmr_alloc;
2027		ibdev->ib_dev.map_phys_fmr	= mlx4_ib_map_phys_fmr;
2028		ibdev->ib_dev.unmap_fmr		= mlx4_ib_unmap_fmr;
2029		ibdev->ib_dev.dealloc_fmr	= mlx4_ib_fmr_dealloc;
2030	}
2031
2032	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2033		ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2034		ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2035		ibdev->ib_dev.uverbs_cmd_mask |=
2036			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2037			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2038	}
2039
2040	mlx4_ib_alloc_eqs(dev, ibdev);
2041
2042	spin_lock_init(&iboe->lock);
2043
2044	if (init_node_data(ibdev))
2045		goto err_map;
2046
2047	for (i = 0; i < ibdev->num_ports; ++i) {
2048		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2049						IB_LINK_LAYER_ETHERNET) {
2050			err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
2051			if (err)
2052				ibdev->counters[i] = -1;
2053		} else
2054				ibdev->counters[i] = -1;
2055	}
2056
2057	spin_lock_init(&ibdev->sm_lock);
2058	mutex_init(&ibdev->cap_mask_mutex);
2059
2060	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2061	    !mlx4_is_slave(dev)) {
2062		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2063		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2064					    MLX4_IB_UC_STEER_QPN_ALIGN, &ibdev->steer_qpn_base, 0);
2065		if (err)
2066			goto err_counter;
2067
2068		ibdev->ib_uc_qpns_bitmap =
2069			kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2070				sizeof(long),
2071				GFP_KERNEL);
2072		if (!ibdev->ib_uc_qpns_bitmap) {
2073			dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2074			goto err_steer_qp_release;
2075		}
2076
2077		bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2078
2079		err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(dev, ibdev->steer_qpn_base,
2080				ibdev->steer_qpn_base + ibdev->steer_qpn_count - 1);
2081		if (err)
2082			goto err_steer_free_bitmap;
2083	}
2084
2085	if (ib_register_device(&ibdev->ib_dev, NULL))
2086		goto err_steer_free_bitmap;
2087
2088	if (mlx4_ib_mad_init(ibdev))
2089		goto err_reg;
2090
2091	if (mlx4_ib_init_sriov(ibdev))
2092		goto err_mad;
2093
2094	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
2095		iboe->nb.notifier_call = mlx4_ib_netdev_event;
2096		err = register_netdevice_notifier(&iboe->nb);
2097		if (err)
2098			goto err_sriov;
2099	}
2100
2101	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2102		if (device_create_file(&ibdev->ib_dev.dev,
2103				       mlx4_class_attributes[j]))
2104			goto err_notif;
2105	}
2106	if (sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
2107		goto err_notif;
2108
2109	ibdev->ib_active = true;
2110
2111	if (mlx4_is_mfunc(ibdev->dev))
2112		init_pkeys(ibdev);
2113
2114	/* create paravirt contexts for any VFs which are active */
2115	if (mlx4_is_master(ibdev->dev)) {
2116		for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2117			if (j == mlx4_master_func_num(ibdev->dev))
2118				continue;
2119			if (mlx4_is_slave_active(ibdev->dev, j))
2120				do_slave_init(ibdev, j, 1);
2121		}
2122	}
2123	return ibdev;
2124
2125err_notif:
2126	if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2127		pr_warn("failure unregistering notifier\n");
2128	flush_workqueue(wq);
2129
2130err_sriov:
2131	mlx4_ib_close_sriov(ibdev);
2132
2133err_mad:
2134	mlx4_ib_mad_cleanup(ibdev);
2135
2136err_reg:
2137	ib_unregister_device(&ibdev->ib_dev);
2138
2139err_steer_free_bitmap:
2140	kfree(ibdev->ib_uc_qpns_bitmap);
2141
2142err_steer_qp_release:
2143	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED)
2144		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2145				ibdev->steer_qpn_count);
2146err_counter:
2147	for (; i; --i)
2148		if (ibdev->counters[i - 1] != -1)
2149			mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
2150
2151err_map:
2152	iounmap(ibdev->priv_uar.map);
2153	mlx4_ib_free_eqs(dev, ibdev);
2154
2155err_uar:
2156	mlx4_uar_free(dev, &ibdev->priv_uar);
2157
2158err_pd:
2159	mlx4_pd_free(dev, ibdev->priv_pdn);
2160
2161err_dealloc:
2162	ib_dealloc_device(&ibdev->ib_dev);
2163
2164	return NULL;
2165}
2166
2167int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2168{
2169	int offset;
2170
2171	WARN_ON(!dev->ib_uc_qpns_bitmap);
2172
2173	offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2174					 dev->steer_qpn_count,
2175					 get_count_order(count));
2176	if (offset < 0)
2177		return offset;
2178
2179	*qpn = dev->steer_qpn_base + offset;
2180	return 0;
2181}
2182
2183void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2184{
2185	if (!qpn ||
2186	    dev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
2187		return;
2188
2189	BUG_ON(qpn < dev->steer_qpn_base);
2190
2191	bitmap_release_region(dev->ib_uc_qpns_bitmap,
2192			qpn - dev->steer_qpn_base, get_count_order(count));
2193}
2194
2195int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2196			 int is_attach)
2197{
2198	struct ib_flow_spec spec = {
2199		.type = IB_FLOW_IB_UC,
2200		.l2_id.ib_uc.qpn  = mqp->ibqp.qp_num,
2201	};
2202
2203	return is_attach ?
2204		__mlx4_ib_flow_attach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0)
2205                : __mlx4_ib_flow_detach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0);
2206}
2207
2208static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2209{
2210	struct mlx4_ib_dev *ibdev = ibdev_ptr;
2211	int p,j;
2212
2213	mlx4_ib_close_sriov(ibdev);
2214	sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
2215	mlx4_ib_mad_cleanup(ibdev);
2216
2217	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2218		device_remove_file(&ibdev->ib_dev.dev, mlx4_class_attributes[j]);
2219	}
2220
2221	ib_unregister_device(&ibdev->ib_dev);
2222
2223	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2224		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2225				ibdev->steer_qpn_count);
2226		kfree(ibdev->ib_uc_qpns_bitmap);
2227	}
2228
2229	if (ibdev->iboe.nb.notifier_call) {
2230		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2231			pr_warn("failure unregistering notifier\n");
2232		ibdev->iboe.nb.notifier_call = NULL;
2233	}
2234	iounmap(ibdev->priv_uar.map);
2235	for (p = 0; p < ibdev->num_ports; ++p)
2236		if (ibdev->counters[p] != -1)
2237			mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
2238	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2239		mlx4_CLOSE_PORT(dev, p);
2240
2241	mlx4_ib_free_eqs(dev, ibdev);
2242
2243	mlx4_uar_free(dev, &ibdev->priv_uar);
2244	mlx4_pd_free(dev, ibdev->priv_pdn);
2245	ib_dealloc_device(&ibdev->ib_dev);
2246}
2247
2248static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2249{
2250	struct mlx4_ib_demux_work **dm = NULL;
2251	struct mlx4_dev *dev = ibdev->dev;
2252	int i;
2253	unsigned long flags;
2254
2255	if (!mlx4_is_master(dev))
2256		return;
2257
2258	dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
2259	if (!dm) {
2260		pr_err("failed to allocate memory for tunneling qp update\n");
2261		goto out;
2262	}
2263
2264	for (i = 0; i < dev->caps.num_ports; i++) {
2265		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2266		if (!dm[i]) {
2267			pr_err("failed to allocate memory for tunneling qp update work struct\n");
2268			for (i = 0; i < dev->caps.num_ports; i++) {
2269				if (dm[i])
2270					kfree(dm[i]);
2271			}
2272			goto out;
2273		}
2274	}
2275	/* initialize or tear down tunnel QPs for the slave */
2276	for (i = 0; i < dev->caps.num_ports; i++) {
2277		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2278		dm[i]->port = i + 1;
2279		dm[i]->slave = slave;
2280		dm[i]->do_init = do_init;
2281		dm[i]->dev = ibdev;
2282		spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2283		if (!ibdev->sriov.is_going_down)
2284			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2285		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2286	}
2287out:
2288	if (dm)
2289		kfree(dm);
2290	return;
2291}
2292
2293static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2294			  enum mlx4_dev_event event, unsigned long param)
2295{
2296	struct ib_event ibev;
2297	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2298	struct mlx4_eqe *eqe = NULL;
2299	struct ib_event_work *ew;
2300	int p = 0;
2301
2302	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2303		eqe = (struct mlx4_eqe *)param;
2304	else
2305		p = (int) param;
2306
2307	switch (event) {
2308	case MLX4_DEV_EVENT_PORT_UP:
2309		if (p > ibdev->num_ports)
2310			return;
2311		if (mlx4_is_master(dev) &&
2312		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2313			IB_LINK_LAYER_INFINIBAND) {
2314			mlx4_ib_invalidate_all_guid_record(ibdev, p);
2315		}
2316		mlx4_ib_info((struct ib_device *) ibdev_ptr,
2317			     "Port %d logical link is up\n", p);
2318		ibev.event = IB_EVENT_PORT_ACTIVE;
2319		break;
2320
2321	case MLX4_DEV_EVENT_PORT_DOWN:
2322		if (p > ibdev->num_ports)
2323			return;
2324		mlx4_ib_info((struct ib_device *) ibdev_ptr,
2325			     "Port %d logical link is down\n", p);
2326		ibev.event = IB_EVENT_PORT_ERR;
2327		break;
2328
2329	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2330		ibdev->ib_active = false;
2331		ibev.event = IB_EVENT_DEVICE_FATAL;
2332		break;
2333
2334	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2335		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2336		if (!ew) {
2337			pr_err("failed to allocate memory for events work\n");
2338			break;
2339		}
2340
2341		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2342		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2343		ew->ib_dev = ibdev;
2344		/* need to queue only for port owner, which uses GEN_EQE */
2345		if (mlx4_is_master(dev))
2346			queue_work(wq, &ew->work);
2347		else
2348			handle_port_mgmt_change_event(&ew->work);
2349		return;
2350
2351	case MLX4_DEV_EVENT_SLAVE_INIT:
2352		/* here, p is the slave id */
2353		do_slave_init(ibdev, p, 1);
2354		return;
2355
2356	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2357		/* here, p is the slave id */
2358		do_slave_init(ibdev, p, 0);
2359		return;
2360
2361	default:
2362		return;
2363	}
2364
2365	ibev.device	      = ibdev_ptr;
2366	ibev.element.port_num = (u8) p;
2367
2368	ib_dispatch_event(&ibev);
2369}
2370
2371static struct mlx4_interface mlx4_ib_interface = {
2372	.add		= mlx4_ib_add,
2373	.remove		= mlx4_ib_remove,
2374	.event		= mlx4_ib_event,
2375	.protocol	= MLX4_PROT_IB_IPV6
2376};
2377
2378static int __init mlx4_ib_init(void)
2379{
2380	int err;
2381
2382	wq = create_singlethread_workqueue("mlx4_ib");
2383	if (!wq)
2384		return -ENOMEM;
2385
2386#ifdef __linux__
2387	err = mlx4_ib_proc_init();
2388	if (err)
2389		goto clean_wq;
2390#endif
2391
2392	err = mlx4_ib_mcg_init();
2393	if (err)
2394		goto clean_proc;
2395
2396	init_dev_assign();
2397
2398	err = mlx4_register_interface(&mlx4_ib_interface);
2399	if (err)
2400		goto clean_mcg;
2401
2402	return 0;
2403
2404clean_mcg:
2405	mlx4_ib_mcg_destroy();
2406
2407clean_proc:
2408#ifdef __linux__
2409	remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2410			  mlx4_ib_driver_dir_entry);
2411	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2412
2413clean_wq:
2414#endif
2415	destroy_workqueue(wq);
2416	return err;
2417}
2418
2419static void __exit mlx4_ib_cleanup(void)
2420{
2421	mlx4_unregister_interface(&mlx4_ib_interface);
2422	mlx4_ib_mcg_destroy();
2423	destroy_workqueue(wq);
2424
2425	/* Remove proc entries */
2426#ifdef __linux__
2427	remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2428				mlx4_ib_driver_dir_entry);
2429	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2430#endif
2431
2432}
2433
2434module_init(mlx4_ib_init);
2435module_exit(mlx4_ib_cleanup);
2436
2437#undef MODULE_VERSION
2438#include <sys/module.h>
2439static int
2440mlx4ib_evhand(module_t mod, int event, void *arg)
2441{
2442        return (0);
2443}
2444
2445static moduledata_t mlx4ib_mod = {
2446        .name = "mlx4ib",
2447        .evhand = mlx4ib_evhand,
2448};
2449
2450DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_SMP, SI_ORDER_ANY);
2451MODULE_DEPEND(mlx4ib, mlx4, 1, 1, 1);
2452MODULE_DEPEND(mlx4ib, ibcore, 1, 1, 1);
2453