main.c revision 272407
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35
36#ifdef __linux__
37#include <linux/proc_fs.h>
38#endif
39
40#include <linux/slab.h>
41#include <linux/errno.h>
42#include <linux/netdevice.h>
43#include <linux/inetdevice.h>
44#include <linux/if_vlan.h>
45#include <linux/bitops.h>
46#include <linux/if_ether.h>
47#include <linux/fs.h>
48
49#include <rdma/ib_smi.h>
50#include <rdma/ib_user_verbs.h>
51#include <rdma/ib_addr.h>
52
53#include <linux/mlx4/driver.h>
54#include <linux/mlx4/cmd.h>
55#include <linux/sched.h>
56#include "mlx4_ib.h"
57#include "user.h"
58#include "wc.h"
59
60#define DRV_NAME	MLX4_IB_DRV_NAME
61#define DRV_VERSION	"1.0"
62#define DRV_RELDATE	"April 4, 2008"
63
64#define MLX4_IB_DRIVER_PROC_DIR_NAME "driver/mlx4_ib"
65#define MLX4_IB_MRS_PROC_DIR_NAME "mrs"
66
67MODULE_AUTHOR("Roland Dreier");
68MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_VERSION(DRV_VERSION);
71
72int mlx4_ib_sm_guid_assign = 1;
73
74#ifdef __linux__
75struct proc_dir_entry *mlx4_mrs_dir_entry;
76static struct proc_dir_entry *mlx4_ib_driver_dir_entry;
77#endif
78
79module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
80MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
81
82static char dev_assign_str[512];
83//module_param_string(dev_assign_str, dev_assign_str, sizeof(dev_assign_str), 0644);
84MODULE_PARM_DESC(dev_assign_str, "Map all device function numbers to "
85		 "IB device numbers following the  pattern: "
86		 "bb:dd.f-0,bb:dd.f-1,... (all numbers are hexadecimals)."
87		 " Max supported devices - 32");
88
89static const char mlx4_ib_version[] =
90	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
91	DRV_VERSION " (" DRV_RELDATE ")\n";
92
93struct update_gid_work {
94	struct work_struct	work;
95	union ib_gid		gids[128];
96	struct mlx4_ib_dev     *dev;
97	int			port;
98};
99
100struct dev_rec {
101	int	bus;
102	int	dev;
103	int	func;
104	int	nr;
105};
106
107#define MAX_DR 32
108static struct dev_rec dr[MAX_DR];
109
110static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
111
112static struct workqueue_struct *wq;
113
114static void init_query_mad(struct ib_smp *mad)
115{
116	mad->base_version  = 1;
117	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
118	mad->class_version = 1;
119	mad->method	   = IB_MGMT_METHOD_GET;
120}
121
122static union ib_gid zgid;
123
124static int mlx4_ib_query_device(struct ib_device *ibdev,
125				struct ib_device_attr *props)
126{
127	struct mlx4_ib_dev *dev = to_mdev(ibdev);
128	struct ib_smp *in_mad  = NULL;
129	struct ib_smp *out_mad = NULL;
130	int err = -ENOMEM;
131
132	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
133	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
134	if (!in_mad || !out_mad)
135		goto out;
136
137	init_query_mad(in_mad);
138	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
139
140	err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
141			   1, NULL, NULL, in_mad, out_mad);
142	if (err)
143		goto out;
144
145	memset(props, 0, sizeof *props);
146
147	props->fw_ver = dev->dev->caps.fw_ver;
148	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
149		IB_DEVICE_PORT_ACTIVE_EVENT		|
150		IB_DEVICE_SYS_IMAGE_GUID		|
151		IB_DEVICE_RC_RNR_NAK_GEN		|
152		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	|
153		IB_DEVICE_SHARED_MR;
154
155	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
156		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
157	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
158		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
159	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
160		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
161	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
162		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
163	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
164		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
165	if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
166		props->device_cap_flags |= IB_DEVICE_UD_TSO;
167	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
168		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
169	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
170	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
171	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
172		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
173	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
174		props->device_cap_flags |= IB_DEVICE_XRC;
175
176	props->device_cap_flags |= IB_DEVICE_QPG;
177	if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
178		props->device_cap_flags |= IB_DEVICE_UD_RSS;
179		props->max_rss_tbl_sz = dev->dev->caps.max_rss_tbl_sz;
180	}
181	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
182		0xffffff;
183	props->vendor_part_id	   = dev->dev->pdev->device;
184	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
185	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
186
187	props->max_mr_size	   = ~0ull;
188	props->page_size_cap	   = dev->dev->caps.page_size_cap;
189	props->max_qp		   = dev->dev->quotas.qp;
190	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
191	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
192					 dev->dev->caps.max_rq_sg);
193	props->max_cq		   = dev->dev->quotas.cq;
194	props->max_cqe		   = dev->dev->caps.max_cqes;
195	props->max_mr		   = dev->dev->quotas.mpt;
196	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
197	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
198	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
199	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
200	props->max_srq		   = dev->dev->quotas.srq;
201	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
202	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
203	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
204	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
205	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
206		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
207	props->masked_atomic_cap   = props->atomic_cap;
208	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
209	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
210	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
211	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
212					   props->max_mcast_grp;
213	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
214
215out:
216	kfree(in_mad);
217	kfree(out_mad);
218
219	return err;
220}
221
222static enum rdma_link_layer
223mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
224{
225	struct mlx4_dev *dev = to_mdev(device)->dev;
226
227	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
228		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
229}
230
231static int ib_link_query_port(struct ib_device *ibdev, u8 port,
232			      struct ib_port_attr *props, int netw_view)
233{
234	struct ib_smp *in_mad  = NULL;
235	struct ib_smp *out_mad = NULL;
236	int ext_active_speed;
237	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
238	int err = -ENOMEM;
239
240	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
241	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
242	if (!in_mad || !out_mad)
243		goto out;
244
245	init_query_mad(in_mad);
246	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
247	in_mad->attr_mod = cpu_to_be32(port);
248
249	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
250		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
251
252	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
253				in_mad, out_mad);
254	if (err)
255		goto out;
256
257
258	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
259	props->lmc		= out_mad->data[34] & 0x7;
260	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
261	props->sm_sl		= out_mad->data[36] & 0xf;
262	props->state		= out_mad->data[32] & 0xf;
263	props->phys_state	= out_mad->data[33] >> 4;
264	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
265	if (netw_view)
266		props->gid_tbl_len = out_mad->data[50];
267	else
268		props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
269	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
270	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
271	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
272	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
273	props->active_width	= out_mad->data[31] & 0xf;
274	props->active_speed	= out_mad->data[35] >> 4;
275	props->max_mtu		= out_mad->data[41] & 0xf;
276	props->active_mtu	= out_mad->data[36] >> 4;
277	props->subnet_timeout	= out_mad->data[51] & 0x1f;
278	props->max_vl_num	= out_mad->data[37] >> 4;
279	props->init_type_reply	= out_mad->data[41] >> 4;
280
281	/* Check if extended speeds (EDR/FDR/...) are supported */
282	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
283		ext_active_speed = out_mad->data[62] >> 4;
284
285		switch (ext_active_speed) {
286		case 1:
287			props->active_speed = IB_SPEED_FDR;
288			break;
289		case 2:
290			props->active_speed = IB_SPEED_EDR;
291			break;
292		}
293	}
294
295	/* If reported active speed is QDR, check if is FDR-10 */
296	if (props->active_speed == IB_SPEED_QDR) {
297		init_query_mad(in_mad);
298		in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
299		in_mad->attr_mod = cpu_to_be32(port);
300
301		err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
302				   NULL, NULL, in_mad, out_mad);
303		if (err)
304			goto out;
305
306		/* Checking LinkSpeedActive for FDR-10 */
307		if (out_mad->data[15] & 0x1)
308			props->active_speed = IB_SPEED_FDR10;
309	}
310
311	/* Avoid wrong speed value returned by FW if the IB link is down. */
312	if (props->state == IB_PORT_DOWN)
313		 props->active_speed = IB_SPEED_SDR;
314
315out:
316	kfree(in_mad);
317	kfree(out_mad);
318	return err;
319}
320
321static u8 state_to_phys_state(enum ib_port_state state)
322{
323	return state == IB_PORT_ACTIVE ? 5 : 3;
324}
325
326static int eth_link_query_port(struct ib_device *ibdev, u8 port,
327			       struct ib_port_attr *props, int netw_view)
328{
329
330	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
331	struct mlx4_ib_iboe *iboe = &mdev->iboe;
332	struct net_device *ndev;
333	enum ib_mtu tmp;
334	struct mlx4_cmd_mailbox *mailbox;
335	int err = 0;
336
337	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
338	if (IS_ERR(mailbox))
339		return PTR_ERR(mailbox);
340
341	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
342			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
343			   MLX4_CMD_WRAPPED);
344	if (err)
345		goto out;
346
347	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
348						IB_WIDTH_4X : IB_WIDTH_1X;
349	props->active_speed	= IB_SPEED_QDR;
350	props->port_cap_flags	= IB_PORT_CM_SUP;
351	if (netw_view)
352		props->gid_tbl_len = MLX4_ROCE_MAX_GIDS;
353	else
354		props->gid_tbl_len   = mdev->dev->caps.gid_table_len[port];
355
356	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
357	props->pkey_tbl_len	= 1;
358	props->max_mtu		= IB_MTU_4096;
359	props->max_vl_num	= 2;
360	props->state		= IB_PORT_DOWN;
361	props->phys_state	= state_to_phys_state(props->state);
362	props->active_mtu	= IB_MTU_256;
363	spin_lock(&iboe->lock);
364	ndev = iboe->netdevs[port - 1];
365	if (!ndev)
366		goto out_unlock;
367
368	tmp = iboe_get_mtu(ndev->if_mtu);
369	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
370
371	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
372					IB_PORT_ACTIVE : IB_PORT_DOWN;
373	props->phys_state	= state_to_phys_state(props->state);
374out_unlock:
375	spin_unlock(&iboe->lock);
376out:
377	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
378	return err;
379}
380
381int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
382			 struct ib_port_attr *props, int netw_view)
383{
384	int err;
385
386	memset(props, 0, sizeof *props);
387
388	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
389		ib_link_query_port(ibdev, port, props, netw_view) :
390				eth_link_query_port(ibdev, port, props, netw_view);
391
392	return err;
393}
394
395static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
396			      struct ib_port_attr *props)
397{
398	/* returns host view */
399	return __mlx4_ib_query_port(ibdev, port, props, 0);
400}
401
402int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
403			union ib_gid *gid, int netw_view)
404{
405	struct ib_smp *in_mad  = NULL;
406	struct ib_smp *out_mad = NULL;
407	int err = -ENOMEM;
408	struct mlx4_ib_dev *dev = to_mdev(ibdev);
409	int clear = 0;
410	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
411
412	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
413	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
414	if (!in_mad || !out_mad)
415		goto out;
416
417	init_query_mad(in_mad);
418	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
419	in_mad->attr_mod = cpu_to_be32(port);
420
421	if (mlx4_is_mfunc(dev->dev) && netw_view)
422		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
423
424	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
425	if (err)
426		goto out;
427
428	memcpy(gid->raw, out_mad->data + 8, 8);
429
430	if (mlx4_is_mfunc(dev->dev) && !netw_view) {
431		if (index) {
432			/* For any index > 0, return the null guid */
433			err = 0;
434			clear = 1;
435			goto out;
436		}
437	}
438
439	init_query_mad(in_mad);
440	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
441	in_mad->attr_mod = cpu_to_be32(index / 8);
442
443	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
444			   NULL, NULL, in_mad, out_mad);
445	if (err)
446		goto out;
447
448	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
449
450out:
451	if (clear)
452		memset(gid->raw + 8, 0, 8);
453	kfree(in_mad);
454	kfree(out_mad);
455	return err;
456}
457
458static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
459			  union ib_gid *gid)
460{
461	struct mlx4_ib_dev *dev = to_mdev(ibdev);
462
463	*gid = dev->iboe.gid_table[port - 1][index];
464
465	return 0;
466}
467
468static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
469			     union ib_gid *gid)
470{
471	if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
472		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
473	else
474		return iboe_query_gid(ibdev, port, index, gid);
475}
476
477int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
478			 u16 *pkey, int netw_view)
479{
480	struct ib_smp *in_mad  = NULL;
481	struct ib_smp *out_mad = NULL;
482	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
483	int err = -ENOMEM;
484
485	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
486	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
487	if (!in_mad || !out_mad)
488		goto out;
489
490	init_query_mad(in_mad);
491	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
492	in_mad->attr_mod = cpu_to_be32(index / 32);
493
494	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
495		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
496
497	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
498			   in_mad, out_mad);
499	if (err)
500		goto out;
501
502	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
503
504out:
505	kfree(in_mad);
506	kfree(out_mad);
507	return err;
508}
509
510static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
511{
512	return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
513}
514
515static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
516				 struct ib_device_modify *props)
517{
518	struct mlx4_cmd_mailbox *mailbox;
519	unsigned long flags;
520
521	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
522		return -EOPNOTSUPP;
523
524	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
525		return 0;
526
527	if (mlx4_is_slave(to_mdev(ibdev)->dev))
528		return -EOPNOTSUPP;
529
530	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
531	memcpy(ibdev->node_desc, props->node_desc, 64);
532	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
533
534	/*
535	 * If possible, pass node desc to FW, so it can generate
536	 * a 144 trap.  If cmd fails, just ignore.
537	 */
538	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
539	if (IS_ERR(mailbox))
540		return 0;
541
542	memset(mailbox->buf, 0, 256);
543	memcpy(mailbox->buf, props->node_desc, 64);
544	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
545		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
546
547	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
548
549	return 0;
550}
551
552static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
553			 u32 cap_mask)
554{
555	struct mlx4_cmd_mailbox *mailbox;
556	int err;
557	u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
558
559	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
560	if (IS_ERR(mailbox))
561		return PTR_ERR(mailbox);
562
563	memset(mailbox->buf, 0, 256);
564
565	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
566		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
567		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
568	} else {
569		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
570		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
571	}
572
573	err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
574		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
575
576	mlx4_free_cmd_mailbox(dev->dev, mailbox);
577	return err;
578}
579
580static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
581			       struct ib_port_modify *props)
582{
583	struct ib_port_attr attr;
584	u32 cap_mask;
585	int err;
586
587	mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
588
589	err = mlx4_ib_query_port(ibdev, port, &attr);
590	if (err)
591		goto out;
592
593	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
594		~props->clr_port_cap_mask;
595
596	err = mlx4_SET_PORT(to_mdev(ibdev), port,
597			    !!(mask & IB_PORT_RESET_QKEY_CNTR),
598			    cap_mask);
599
600out:
601	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
602	return err;
603}
604
605static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
606						  struct ib_udata *udata)
607{
608	struct mlx4_ib_dev *dev = to_mdev(ibdev);
609	struct mlx4_ib_ucontext *context;
610	struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
611	struct mlx4_ib_alloc_ucontext_resp resp;
612	int err;
613
614	if (!dev->ib_active)
615		return ERR_PTR(-EAGAIN);
616
617	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
618		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
619		if (mlx4_wc_enabled()) {
620			resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
621			resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
622		} else {
623			resp_v3.bf_reg_size      = 0;
624			resp_v3.bf_regs_per_page = 0;
625		}
626	} else {
627		resp.dev_caps	      = dev->dev->caps.userspace_caps;
628		resp.qp_tab_size      = dev->dev->caps.num_qps;
629		if (mlx4_wc_enabled()) {
630			resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
631			resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
632		} else {
633			resp.bf_reg_size      = 0;
634			resp.bf_regs_per_page = 0;
635		}
636		resp.cqe_size	      = dev->dev->caps.cqe_size;
637	}
638
639	context = kmalloc(sizeof *context, GFP_KERNEL);
640	if (!context)
641		return ERR_PTR(-ENOMEM);
642
643	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
644	if (err) {
645		kfree(context);
646		return ERR_PTR(err);
647	}
648
649	INIT_LIST_HEAD(&context->db_page_list);
650	mutex_init(&context->db_page_mutex);
651
652	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
653		err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
654	else
655		err = ib_copy_to_udata(udata, &resp, sizeof(resp));
656
657	if (err) {
658		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
659		kfree(context);
660		return ERR_PTR(-EFAULT);
661	}
662
663	return &context->ibucontext;
664}
665
666static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
667{
668	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
669
670	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
671	kfree(context);
672
673	return 0;
674}
675#ifdef __linux__
676static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
677			unsigned long addr,
678			unsigned long len, unsigned long pgoff,
679			unsigned long flags)
680{
681	struct mm_struct *mm;
682	struct vm_area_struct *vma;
683	unsigned long start_addr;
684	unsigned long page_size_order;
685	unsigned long  command;
686
687	mm = current->mm;
688	if (addr)
689		return current->mm->get_unmapped_area(file, addr, len,
690						pgoff, flags);
691
692	/* Last 8 bits hold the  command others are data per that command */
693	command = pgoff & MLX4_IB_MMAP_CMD_MASK;
694	if (command != MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES)
695		return current->mm->get_unmapped_area(file, addr, len,
696						pgoff, flags);
697
698	page_size_order = pgoff >> MLX4_IB_MMAP_CMD_BITS;
699	/* code is based on the huge-pages get_unmapped_area code */
700	start_addr = mm->free_area_cache;
701
702	if (len <= mm->cached_hole_size)
703		start_addr = TASK_UNMAPPED_BASE;
704
705
706full_search:
707	addr = ALIGN(start_addr, 1 << page_size_order);
708
709	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
710		/* At this point:  (!vma || addr < vma->vm_end). */
711		if (TASK_SIZE - len < addr) {
712			/*
713			 * Start a new search - just in case we missed
714			 * some holes.
715			 */
716			if (start_addr != TASK_UNMAPPED_BASE) {
717				start_addr = TASK_UNMAPPED_BASE;
718				goto full_search;
719			}
720			return -ENOMEM;
721		}
722
723		if (!vma || addr + len <= vma->vm_start)
724			return addr;
725		addr = ALIGN(vma->vm_end, 1 << page_size_order);
726	}
727}
728#endif
729
730static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
731{
732	struct mlx4_ib_dev *dev = to_mdev(context->device);
733	int err;
734
735	/* Last 8 bits hold the  command others are data per that command */
736	unsigned long  command = vma->vm_pgoff & MLX4_IB_MMAP_CMD_MASK;
737
738	if (command < MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
739		/* compatability handling for commands 0 & 1*/
740		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
741			return -EINVAL;
742	}
743	if (command == MLX4_IB_MMAP_UAR_PAGE) {
744		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
745
746		if (io_remap_pfn_range(vma, vma->vm_start,
747				       to_mucontext(context)->uar.pfn,
748				       PAGE_SIZE, vma->vm_page_prot))
749			return -EAGAIN;
750	} else if (command == MLX4_IB_MMAP_BLUE_FLAME_PAGE &&
751			dev->dev->caps.bf_reg_size != 0) {
752		vma->vm_page_prot = pgprot_wc(vma->vm_page_prot);
753
754		if (io_remap_pfn_range(vma, vma->vm_start,
755				       to_mucontext(context)->uar.pfn +
756				       dev->dev->caps.num_uars,
757				       PAGE_SIZE, vma->vm_page_prot))
758			return -EAGAIN;
759	} else if (command == MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
760		/* Getting contiguous physical pages */
761		unsigned long total_size = vma->vm_end - vma->vm_start;
762		unsigned long page_size_order = (vma->vm_pgoff) >>
763						MLX4_IB_MMAP_CMD_BITS;
764		struct ib_cmem *ib_cmem;
765		ib_cmem = ib_cmem_alloc_contiguous_pages(context, total_size,
766							page_size_order);
767		if (IS_ERR(ib_cmem)) {
768			err = PTR_ERR(ib_cmem);
769			return err;
770		}
771
772		err = ib_cmem_map_contiguous_pages_to_vma(ib_cmem, vma);
773		if (err) {
774			ib_cmem_release_contiguous_pages(ib_cmem);
775			return err;
776		}
777		return 0;
778	} else
779		return -EINVAL;
780
781	return 0;
782}
783
784static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
785				      struct ib_ucontext *context,
786				      struct ib_udata *udata)
787{
788	struct mlx4_ib_pd *pd;
789	int err;
790
791	pd = kmalloc(sizeof *pd, GFP_KERNEL);
792	if (!pd)
793		return ERR_PTR(-ENOMEM);
794
795	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
796	if (err) {
797		kfree(pd);
798		return ERR_PTR(err);
799	}
800
801	if (context)
802		if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
803			mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
804			kfree(pd);
805			return ERR_PTR(-EFAULT);
806		}
807
808	return &pd->ibpd;
809}
810
811static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
812{
813	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
814	kfree(pd);
815
816	return 0;
817}
818
819static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
820					  struct ib_ucontext *context,
821					  struct ib_udata *udata)
822{
823	struct mlx4_ib_xrcd *xrcd;
824	int err;
825
826	if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
827		return ERR_PTR(-ENOSYS);
828
829	xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
830	if (!xrcd)
831		return ERR_PTR(-ENOMEM);
832
833	err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
834	if (err)
835		goto err1;
836
837	xrcd->pd = ib_alloc_pd(ibdev);
838	if (IS_ERR(xrcd->pd)) {
839		err = PTR_ERR(xrcd->pd);
840		goto err2;
841	}
842
843	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
844	if (IS_ERR(xrcd->cq)) {
845		err = PTR_ERR(xrcd->cq);
846		goto err3;
847	}
848
849	return &xrcd->ibxrcd;
850
851err3:
852	ib_dealloc_pd(xrcd->pd);
853err2:
854	mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
855err1:
856	kfree(xrcd);
857	return ERR_PTR(err);
858}
859
860static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
861{
862	ib_destroy_cq(to_mxrcd(xrcd)->cq);
863	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
864	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
865	kfree(xrcd);
866
867	return 0;
868}
869
870static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
871{
872	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
873	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
874	struct mlx4_ib_gid_entry *ge;
875
876	ge = kzalloc(sizeof *ge, GFP_KERNEL);
877	if (!ge)
878		return -ENOMEM;
879
880	ge->gid = *gid;
881	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
882		ge->port = mqp->port;
883		ge->added = 1;
884	}
885
886	mutex_lock(&mqp->mutex);
887	list_add_tail(&ge->list, &mqp->gid_list);
888	mutex_unlock(&mqp->mutex);
889
890	return 0;
891}
892
893int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
894		   union ib_gid *gid)
895{
896	u8 mac[6];
897	struct net_device *ndev;
898	int ret = 0;
899
900	if (!mqp->port)
901		return 0;
902
903	spin_lock(&mdev->iboe.lock);
904	ndev = mdev->iboe.netdevs[mqp->port - 1];
905	if (ndev)
906		dev_hold(ndev);
907	spin_unlock(&mdev->iboe.lock);
908
909	if (ndev) {
910		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
911		rtnl_lock();
912		dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0);
913		ret = 1;
914		rtnl_unlock();
915		dev_put(ndev);
916	}
917
918	return ret;
919}
920
921struct mlx4_ib_steering {
922	struct list_head list;
923	u64 reg_id;
924	union ib_gid gid;
925};
926
927static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
928{
929	int err;
930	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
931	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
932	u64 reg_id;
933	struct mlx4_ib_steering *ib_steering = NULL;
934
935	if (mdev->dev->caps.steering_mode ==
936	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
937		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
938		if (!ib_steering)
939			return -ENOMEM;
940	}
941
942	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
943				    !!(mqp->flags &
944				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
945				    MLX4_PROT_IB_IPV6, &reg_id);
946	if (err)
947		goto err_malloc;
948
949	err = add_gid_entry(ibqp, gid);
950	if (err)
951		goto err_add;
952
953	if (ib_steering) {
954		memcpy(ib_steering->gid.raw, gid->raw, 16);
955		ib_steering->reg_id = reg_id;
956		mutex_lock(&mqp->mutex);
957		list_add(&ib_steering->list, &mqp->steering_rules);
958		mutex_unlock(&mqp->mutex);
959	}
960	return 0;
961
962err_add:
963	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
964			      MLX4_PROT_IB_IPV6, reg_id);
965err_malloc:
966	kfree(ib_steering);
967
968	return err;
969}
970
971enum {
972	IBV_FLOW_L4_NONE = 0,
973	IBV_FLOW_L4_OTHER = 3,
974	IBV_FLOW_L4_UDP = 5,
975	IBV_FLOW_L4_TCP = 6
976};
977
978struct mlx4_cm_steering {
979	struct list_head list;
980	u64 reg_id;
981	struct ib_flow_spec spec;
982};
983
984static int flow_spec_to_net_rule(struct ib_device *dev, struct ib_flow_spec *flow_spec,
985				  struct list_head *rule_list_h)
986{
987	struct mlx4_spec_list *spec_l2, *spec_l3, *spec_l4;
988	u64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
989
990	spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
991	if (!spec_l2)
992		return -ENOMEM;
993
994	switch (flow_spec->type) {
995	case IB_FLOW_ETH:
996		spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
997		memcpy(spec_l2->eth.dst_mac, flow_spec->l2_id.eth.mac, ETH_ALEN);
998		memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
999		spec_l2->eth.ether_type = flow_spec->l2_id.eth.ethertype;
1000		if (flow_spec->l2_id.eth.vlan_present) {
1001			spec_l2->eth.vlan_id = flow_spec->l2_id.eth.vlan;
1002			spec_l2->eth.vlan_id_msk = cpu_to_be16(0x0fff);
1003		}
1004		break;
1005	case IB_FLOW_IB_UC:
1006		spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1007		if(flow_spec->l2_id.ib_uc.qpn) {
1008			spec_l2->ib.l3_qpn = cpu_to_be32(flow_spec->l2_id.ib_uc.qpn);
1009			spec_l2->ib.qpn_msk = cpu_to_be32(0xffffff);
1010                    }
1011		break;
1012	case IB_FLOW_IB_MC_IPV4:
1013	case IB_FLOW_IB_MC_IPV6:
1014		spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1015		memcpy(spec_l2->ib.dst_gid, flow_spec->l2_id.ib_mc.mgid, 16);
1016		memset(spec_l2->ib.dst_gid_msk, 0xff, 16);
1017		break;
1018	}
1019
1020
1021	list_add_tail(&spec_l2->list, rule_list_h);
1022
1023	if (flow_spec->l2_id.eth.ethertype == cpu_to_be16(ETH_P_IP) ||
1024	    flow_spec->type != IB_FLOW_ETH) {
1025		spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
1026		if (!spec_l3)
1027			return -ENOMEM;
1028
1029		spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1030		spec_l3->ipv4.src_ip = flow_spec->src_ip;
1031		if (flow_spec->type != IB_FLOW_IB_MC_IPV4 &&
1032		    flow_spec->type != IB_FLOW_IB_MC_IPV6)
1033			spec_l3->ipv4.dst_ip = flow_spec->dst_ip;
1034
1035		if (spec_l3->ipv4.src_ip)
1036			spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1037		if (spec_l3->ipv4.dst_ip)
1038			spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1039
1040		list_add_tail(&spec_l3->list, rule_list_h);
1041	}
1042
1043	if (flow_spec->l4_protocol) {
1044		spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
1045		if (!spec_l4)
1046			return -ENOMEM;
1047
1048		spec_l4->tcp_udp.src_port = flow_spec->src_port;
1049		spec_l4->tcp_udp.dst_port = flow_spec->dst_port;
1050		if (spec_l4->tcp_udp.src_port)
1051			spec_l4->tcp_udp.src_port_msk =
1052						MLX4_BE_SHORT_MASK;
1053		if (spec_l4->tcp_udp.dst_port)
1054			spec_l4->tcp_udp.dst_port_msk =
1055						MLX4_BE_SHORT_MASK;
1056
1057		switch (flow_spec->l4_protocol) {
1058		case IBV_FLOW_L4_UDP:
1059			spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
1060			break;
1061		case IBV_FLOW_L4_TCP:
1062			spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
1063			break;
1064		default:
1065			dev_err(dev->dma_device,
1066				"Unsupported l4 protocol.\n");
1067			kfree(spec_l4);
1068			return -EPROTONOSUPPORT;
1069		}
1070		list_add_tail(&spec_l4->list, rule_list_h);
1071	}
1072	return 0;
1073}
1074
1075static int __mlx4_ib_flow_attach(struct mlx4_ib_dev *mdev,
1076				 struct mlx4_ib_qp *mqp,
1077				 struct ib_flow_spec *flow_spec,
1078				 int priority, int lock_qp)
1079{
1080	u64 reg_id = 0;
1081	int err = 0;
1082	struct mlx4_cm_steering *cm_flow;
1083	struct mlx4_spec_list *spec, *tmp_spec;
1084
1085	struct mlx4_net_trans_rule rule =
1086	{	.queue_mode = MLX4_NET_TRANS_Q_FIFO,
1087		.exclusive = 0,
1088	};
1089
1090	rule.promisc_mode = flow_spec->rule_type;
1091	rule.port = mqp->port;
1092	rule.qpn = mqp->mqp.qpn;
1093	INIT_LIST_HEAD(&rule.list);
1094
1095	cm_flow = kmalloc(sizeof(*cm_flow), GFP_KERNEL);
1096	if (!cm_flow)
1097		return -ENOMEM;
1098
1099	if (rule.promisc_mode == MLX4_FS_REGULAR) {
1100		rule.allow_loopback = !flow_spec->block_mc_loopback;
1101		rule.priority = MLX4_DOMAIN_UVERBS | priority;
1102		err = flow_spec_to_net_rule(&mdev->ib_dev, flow_spec,
1103					    &rule.list);
1104		if (err)
1105			goto free_list;
1106	}
1107
1108	err = mlx4_flow_attach(mdev->dev, &rule, &reg_id);
1109	if (err)
1110		goto free_list;
1111
1112	memcpy(&cm_flow->spec, flow_spec, sizeof(*flow_spec));
1113	cm_flow->reg_id = reg_id;
1114
1115	if (lock_qp)
1116		mutex_lock(&mqp->mutex);
1117	list_add(&cm_flow->list, &mqp->rules_list);
1118	if (lock_qp)
1119                mutex_unlock(&mqp->mutex);
1120
1121free_list:
1122	list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
1123		list_del(&spec->list);
1124		kfree(spec);
1125	}
1126	if (err) {
1127		kfree(cm_flow);
1128		dev_err(mdev->ib_dev.dma_device,
1129			"Fail to attach flow steering rule\n");
1130	}
1131	return err;
1132}
1133
1134static int __mlx4_ib_flow_detach(struct mlx4_ib_dev *mdev,
1135				 struct mlx4_ib_qp *mqp,
1136				 struct ib_flow_spec *spec, int priority,
1137				 int lock_qp)
1138{
1139	struct mlx4_cm_steering *cm_flow;
1140	int ret;
1141
1142	if (lock_qp)
1143		mutex_lock(&mqp->mutex);
1144	list_for_each_entry(cm_flow, &mqp->rules_list, list) {
1145		if (!memcmp(&cm_flow->spec, spec, sizeof(*spec))) {
1146			list_del(&cm_flow->list);
1147			break;
1148		}
1149	}
1150	if (lock_qp)
1151		mutex_unlock(&mqp->mutex);
1152
1153	if (&cm_flow->list == &mqp->rules_list) {
1154		dev_err(mdev->ib_dev.dma_device, "Couldn't find reg_id for flow spec. "
1155			"Steering rule is left attached\n");
1156		return -EINVAL;
1157	}
1158
1159	ret = mlx4_flow_detach(mdev->dev, cm_flow->reg_id);
1160
1161	kfree(cm_flow);
1162	return ret;
1163}
1164
1165static int mlx4_ib_flow_attach(struct ib_qp *qp, struct ib_flow_spec *flow_spec,
1166			       int priority)
1167{
1168	return __mlx4_ib_flow_attach(to_mdev(qp->device), to_mqp(qp),
1169				     flow_spec, priority, 1);
1170}
1171
1172static int mlx4_ib_flow_detach(struct ib_qp *qp, struct ib_flow_spec *spec,
1173			       int priority)
1174{
1175	return __mlx4_ib_flow_detach(to_mdev(qp->device), to_mqp(qp),
1176				     spec, priority, 1);
1177}
1178
1179static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1180{
1181	struct mlx4_ib_gid_entry *ge;
1182	struct mlx4_ib_gid_entry *tmp;
1183	struct mlx4_ib_gid_entry *ret = NULL;
1184
1185	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1186		if (!memcmp(raw, ge->gid.raw, 16)) {
1187			ret = ge;
1188			break;
1189		}
1190	}
1191
1192	return ret;
1193}
1194
1195static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1196{
1197	int err;
1198	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1199	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1200	u8 mac[6];
1201	struct net_device *ndev;
1202	struct mlx4_ib_gid_entry *ge;
1203	u64 reg_id = 0;
1204
1205	if (mdev->dev->caps.steering_mode ==
1206	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1207		struct mlx4_ib_steering *ib_steering;
1208
1209		mutex_lock(&mqp->mutex);
1210		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1211			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1212				list_del(&ib_steering->list);
1213				break;
1214			}
1215		}
1216		mutex_unlock(&mqp->mutex);
1217		if (&ib_steering->list == &mqp->steering_rules) {
1218			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1219			return -EINVAL;
1220		}
1221		reg_id = ib_steering->reg_id;
1222		kfree(ib_steering);
1223	}
1224
1225	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1226				    MLX4_PROT_IB_IPV6, reg_id);
1227	if (err)
1228		return err;
1229
1230	mutex_lock(&mqp->mutex);
1231	ge = find_gid_entry(mqp, gid->raw);
1232	if (ge) {
1233		spin_lock(&mdev->iboe.lock);
1234		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1235		if (ndev)
1236			dev_hold(ndev);
1237		spin_unlock(&mdev->iboe.lock);
1238		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
1239		if (ndev) {
1240			rtnl_lock();
1241			dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0);
1242			rtnl_unlock();
1243			dev_put(ndev);
1244		}
1245		list_del(&ge->list);
1246		kfree(ge);
1247	} else
1248		pr_warn("could not find mgid entry\n");
1249
1250	mutex_unlock(&mqp->mutex);
1251
1252	return 0;
1253}
1254
1255static int init_node_data(struct mlx4_ib_dev *dev)
1256{
1257	struct ib_smp *in_mad  = NULL;
1258	struct ib_smp *out_mad = NULL;
1259	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1260	int err = -ENOMEM;
1261
1262	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1263	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1264	if (!in_mad || !out_mad)
1265		goto out;
1266
1267	init_query_mad(in_mad);
1268	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1269	if (mlx4_is_master(dev->dev))
1270		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1271
1272	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1273	if (err)
1274		goto out;
1275
1276	memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1277
1278	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1279
1280	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1281	if (err)
1282		goto out;
1283
1284	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1285	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1286
1287out:
1288	kfree(in_mad);
1289	kfree(out_mad);
1290	return err;
1291}
1292
1293static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1294			char *buf)
1295{
1296	struct mlx4_ib_dev *dev =
1297		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1298	return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1299}
1300
1301static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1302			   char *buf)
1303{
1304	struct mlx4_ib_dev *dev =
1305		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1306	return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1307		       (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1308		       (int) dev->dev->caps.fw_ver & 0xffff);
1309}
1310
1311static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1312			char *buf)
1313{
1314	struct mlx4_ib_dev *dev =
1315		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1316	return sprintf(buf, "%x\n", dev->dev->rev_id);
1317}
1318
1319static ssize_t show_board(struct device *device, struct device_attribute *attr,
1320			  char *buf)
1321{
1322	struct mlx4_ib_dev *dev =
1323		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1324	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1325		       dev->dev->board_id);
1326}
1327
1328static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1329static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1330static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1331static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1332
1333static struct device_attribute *mlx4_class_attributes[] = {
1334	&dev_attr_hw_rev,
1335	&dev_attr_fw_ver,
1336	&dev_attr_hca_type,
1337	&dev_attr_board_id
1338};
1339
1340static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
1341{
1342#ifdef __linux__
1343	memcpy(eui, dev->dev_addr, 3);
1344	memcpy(eui + 5, dev->dev_addr + 3, 3);
1345#else
1346        memcpy(eui, IF_LLADDR(dev), 3);
1347        memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
1348#endif
1349	if (vlan_id < 0x1000) {
1350		eui[3] = vlan_id >> 8;
1351		eui[4] = vlan_id & 0xff;
1352	} else {
1353		eui[3] = 0xff;
1354		eui[4] = 0xfe;
1355	}
1356	eui[0] ^= 2;
1357}
1358
1359static void update_gids_task(struct work_struct *work)
1360{
1361	struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1362	struct mlx4_cmd_mailbox *mailbox;
1363	union ib_gid *gids;
1364	int err;
1365	struct mlx4_dev	*dev = gw->dev->dev;
1366
1367	mailbox = mlx4_alloc_cmd_mailbox(dev);
1368	if (IS_ERR(mailbox)) {
1369		pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1370		return;
1371	}
1372
1373	gids = mailbox->buf;
1374	memcpy(gids, gw->gids, sizeof gw->gids);
1375
1376	err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1377		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1378		       MLX4_CMD_WRAPPED);
1379	if (err)
1380		pr_warn("set port command failed\n");
1381	else {
1382		memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
1383		mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1384	}
1385
1386	mlx4_free_cmd_mailbox(dev, mailbox);
1387	kfree(gw);
1388}
1389
1390static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
1391{
1392	struct net_device *ndev = dev->iboe.netdevs[port - 1];
1393	struct update_gid_work *work;
1394	struct net_device *tmp;
1395	int i;
1396	u8 *hits;
1397	union ib_gid gid;
1398	int index_free;
1399	int found;
1400	int need_update = 0;
1401	int max_gids;
1402	u16 vid;
1403
1404	work = kzalloc(sizeof *work, GFP_ATOMIC);
1405	if (!work)
1406		return -ENOMEM;
1407
1408	hits = kzalloc(128, GFP_ATOMIC);
1409	if (!hits) {
1410		kfree(work);
1411		return -ENOMEM;
1412	}
1413
1414	max_gids = dev->dev->caps.gid_table_len[port];
1415
1416#ifdef __linux__
1417	rcu_read_lock();
1418	for_each_netdev_rcu(&init_net, tmp) {
1419#else
1420        IFNET_RLOCK();
1421        TAILQ_FOREACH(tmp, &V_ifnet, if_link) {
1422#endif
1423		if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
1424			gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1425			vid = rdma_vlan_dev_vlan_id(tmp);
1426			mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1427			found = 0;
1428			index_free = -1;
1429			for (i = 0; i < max_gids; ++i) {
1430				if (index_free < 0 &&
1431				    !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1432					index_free = i;
1433				if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1434					hits[i] = 1;
1435					found = 1;
1436					break;
1437				}
1438			}
1439
1440			if (!found) {
1441				if (tmp == ndev &&
1442				    (memcmp(&dev->iboe.gid_table[port - 1][0],
1443					    &gid, sizeof gid) ||
1444				     !memcmp(&dev->iboe.gid_table[port - 1][0],
1445					     &zgid, sizeof gid))) {
1446					dev->iboe.gid_table[port - 1][0] = gid;
1447					++need_update;
1448					hits[0] = 1;
1449				} else if (index_free >= 0) {
1450					dev->iboe.gid_table[port - 1][index_free] = gid;
1451					hits[index_free] = 1;
1452					++need_update;
1453				}
1454			}
1455		}
1456#ifdef __linux__
1457        }
1458	rcu_read_unlock();
1459#else
1460        }
1461        IFNET_RUNLOCK();
1462#endif
1463
1464	for (i = 0; i < max_gids; ++i)
1465		if (!hits[i]) {
1466			if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1467				++need_update;
1468			dev->iboe.gid_table[port - 1][i] = zgid;
1469		}
1470
1471	if (need_update) {
1472		memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
1473		INIT_WORK(&work->work, update_gids_task);
1474		work->port = port;
1475		work->dev = dev;
1476		queue_work(wq, &work->work);
1477	} else
1478		kfree(work);
1479
1480	kfree(hits);
1481	return 0;
1482}
1483
1484static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
1485{
1486	switch (event) {
1487	case NETDEV_UP:
1488#ifdef __linux__
1489	case NETDEV_CHANGEADDR:
1490#endif
1491		update_ipv6_gids(dev, port, 0);
1492		break;
1493
1494	case NETDEV_DOWN:
1495		update_ipv6_gids(dev, port, 1);
1496		dev->iboe.netdevs[port - 1] = NULL;
1497	}
1498}
1499
1500static void netdev_added(struct mlx4_ib_dev *dev, int port)
1501{
1502	update_ipv6_gids(dev, port, 0);
1503}
1504
1505static void netdev_removed(struct mlx4_ib_dev *dev, int port)
1506{
1507	update_ipv6_gids(dev, port, 1);
1508}
1509
1510static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
1511				void *ptr)
1512{
1513	struct net_device *dev = ptr;
1514	struct mlx4_ib_dev *ibdev;
1515	struct net_device *oldnd;
1516	struct mlx4_ib_iboe *iboe;
1517	int port;
1518
1519#ifdef __linux__
1520	if (!net_eq(dev_net(dev), &init_net))
1521		return NOTIFY_DONE;
1522#endif
1523
1524	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1525	iboe = &ibdev->iboe;
1526
1527	spin_lock(&iboe->lock);
1528	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1529		oldnd = iboe->netdevs[port - 1];
1530		iboe->netdevs[port - 1] =
1531			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1532		if (oldnd != iboe->netdevs[port - 1]) {
1533			if (iboe->netdevs[port - 1])
1534				netdev_added(ibdev, port);
1535			else
1536				netdev_removed(ibdev, port);
1537		}
1538	}
1539
1540	if (dev == iboe->netdevs[0] ||
1541	    (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
1542		handle_en_event(ibdev, 1, event);
1543	else if (dev == iboe->netdevs[1]
1544		 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
1545		handle_en_event(ibdev, 2, event);
1546
1547	spin_unlock(&iboe->lock);
1548
1549	return NOTIFY_DONE;
1550}
1551
1552static void init_pkeys(struct mlx4_ib_dev *ibdev)
1553{
1554	int port;
1555	int slave;
1556	int i;
1557
1558	if (mlx4_is_master(ibdev->dev)) {
1559		for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1560			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1561				for (i = 0;
1562				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1563				     ++i) {
1564					ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1565					/* master has the identity virt2phys pkey mapping */
1566						(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1567							ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1568					mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1569							     ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1570				}
1571			}
1572		}
1573		/* initialize pkey cache */
1574		for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1575			for (i = 0;
1576			     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1577			     ++i)
1578				ibdev->pkeys.phys_pkey_cache[port-1][i] =
1579					(i) ? 0 : 0xFFFF;
1580		}
1581	}
1582}
1583
1584static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1585{
1586	char name[32];
1587	int eq_per_port = 0;
1588	int added_eqs = 0;
1589	int total_eqs = 0;
1590	int i, j, eq;
1591
1592	/* Legacy mode or comp_pool is not large enough */
1593	if (dev->caps.comp_pool == 0 ||
1594	    dev->caps.num_ports > dev->caps.comp_pool)
1595		return;
1596
1597	eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1598					dev->caps.num_ports);
1599
1600	/* Init eq table */
1601	added_eqs = 0;
1602	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1603		added_eqs += eq_per_port;
1604
1605	total_eqs = dev->caps.num_comp_vectors + added_eqs;
1606
1607	ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1608	if (!ibdev->eq_table)
1609		return;
1610
1611	ibdev->eq_added = added_eqs;
1612
1613	eq = 0;
1614	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1615		for (j = 0; j < eq_per_port; j++) {
1616			//sprintf(name, "mlx4-ib-%d-%d@%s",
1617			//	i, j, dev->pdev->bus->conf.pd_name);
1618			/* Set IRQ for specific name (per ring) */
1619			if (mlx4_assign_eq(dev, name,
1620					   &ibdev->eq_table[eq])) {
1621				/* Use legacy (same as mlx4_en driver) */
1622				pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1623				ibdev->eq_table[eq] =
1624					(eq % dev->caps.num_comp_vectors);
1625			}
1626			eq++;
1627		}
1628	}
1629
1630	/* Fill the reset of the vector with legacy EQ */
1631	for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1632		ibdev->eq_table[eq++] = i;
1633
1634	/* Advertise the new number of EQs to clients */
1635	ibdev->ib_dev.num_comp_vectors = total_eqs;
1636}
1637
1638static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1639{
1640	int i;
1641
1642	/* no additional eqs were added */
1643	if (!ibdev->eq_table)
1644		return;
1645
1646	/* Reset the advertised EQ number */
1647	ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1648
1649	/* Free only the added eqs */
1650	for (i = 0; i < ibdev->eq_added; i++) {
1651		/* Don't free legacy eqs if used */
1652		if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1653			continue;
1654		mlx4_release_eq(dev, ibdev->eq_table[i]);
1655	}
1656
1657	kfree(ibdev->eq_table);
1658}
1659
1660/*
1661 * create show function and a device_attribute struct pointing to
1662 * the function for _name
1663 */
1664#define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod)		\
1665static ssize_t show_rprt_##_name(struct device *dev,		\
1666				 struct device_attribute *attr,	\
1667				 char *buf){			\
1668	return show_diag_rprt(dev, buf, _offset, _op_mod);	\
1669}								\
1670static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
1671
1672#define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
1673
1674static size_t show_diag_rprt(struct device *device, char *buf,
1675			     u32 offset, u8 op_modifier)
1676{
1677	size_t ret;
1678	u32 counter_offset = offset;
1679	u32 diag_counter = 0;
1680	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1681					       ib_dev.dev);
1682
1683	ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
1684				       &counter_offset, &diag_counter);
1685	if (ret)
1686		return ret;
1687
1688	return sprintf(buf, "%d\n", diag_counter);
1689}
1690
1691static ssize_t clear_diag_counters(struct device *device,
1692				   struct device_attribute *attr,
1693				   const char *buf, size_t length)
1694{
1695	size_t ret;
1696	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1697					       ib_dev.dev);
1698
1699	ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
1700				       NULL, NULL);
1701	if (ret)
1702		return ret;
1703
1704	return length;
1705}
1706
1707DEVICE_DIAG_RPRT_ATTR(rq_num_lle	, 0x00, 2);
1708DEVICE_DIAG_RPRT_ATTR(sq_num_lle	, 0x04, 2);
1709DEVICE_DIAG_RPRT_ATTR(rq_num_lqpoe	, 0x08, 2);
1710DEVICE_DIAG_RPRT_ATTR(sq_num_lqpoe 	, 0x0C, 2);
1711DEVICE_DIAG_RPRT_ATTR(rq_num_lpe	, 0x18, 2);
1712DEVICE_DIAG_RPRT_ATTR(sq_num_lpe	, 0x1C, 2);
1713DEVICE_DIAG_RPRT_ATTR(rq_num_wrfe	, 0x20, 2);
1714DEVICE_DIAG_RPRT_ATTR(sq_num_wrfe	, 0x24, 2);
1715DEVICE_DIAG_RPRT_ATTR(sq_num_mwbe	, 0x2C, 2);
1716DEVICE_DIAG_RPRT_ATTR(sq_num_bre	, 0x34, 2);
1717DEVICE_DIAG_RPRT_ATTR(rq_num_lae	, 0x38, 2);
1718DEVICE_DIAG_RPRT_ATTR(sq_num_rire	, 0x44, 2);
1719DEVICE_DIAG_RPRT_ATTR(rq_num_rire	, 0x48, 2);
1720DEVICE_DIAG_RPRT_ATTR(sq_num_rae	, 0x4C, 2);
1721DEVICE_DIAG_RPRT_ATTR(rq_num_rae	, 0x50, 2);
1722DEVICE_DIAG_RPRT_ATTR(sq_num_roe	, 0x54, 2);
1723DEVICE_DIAG_RPRT_ATTR(sq_num_tree	, 0x5C, 2);
1724DEVICE_DIAG_RPRT_ATTR(sq_num_rree	, 0x64, 2);
1725DEVICE_DIAG_RPRT_ATTR(rq_num_rnr	, 0x68, 2);
1726DEVICE_DIAG_RPRT_ATTR(sq_num_rnr	, 0x6C, 2);
1727DEVICE_DIAG_RPRT_ATTR(rq_num_oos	, 0x100, 2);
1728DEVICE_DIAG_RPRT_ATTR(sq_num_oos	, 0x104, 2);
1729DEVICE_DIAG_RPRT_ATTR(rq_num_mce	, 0x108, 2);
1730DEVICE_DIAG_RPRT_ATTR(rq_num_udsdprd	, 0x118, 2);
1731DEVICE_DIAG_RPRT_ATTR(rq_num_ucsdprd	, 0x120, 2);
1732DEVICE_DIAG_RPRT_ATTR(num_cqovf		, 0x1A0, 2);
1733DEVICE_DIAG_RPRT_ATTR(num_eqovf		, 0x1A4, 2);
1734DEVICE_DIAG_RPRT_ATTR(num_baddb		, 0x1A8, 2);
1735
1736static DEVICE_ATTR(clear_diag, S_IWUSR, NULL, clear_diag_counters);
1737
1738static struct attribute *diag_rprt_attrs[] = {
1739	&dev_attr_rq_num_lle.attr,
1740	&dev_attr_sq_num_lle.attr,
1741	&dev_attr_rq_num_lqpoe.attr,
1742	&dev_attr_sq_num_lqpoe.attr,
1743	&dev_attr_rq_num_lpe.attr,
1744	&dev_attr_sq_num_lpe.attr,
1745	&dev_attr_rq_num_wrfe.attr,
1746	&dev_attr_sq_num_wrfe.attr,
1747	&dev_attr_sq_num_mwbe.attr,
1748	&dev_attr_sq_num_bre.attr,
1749	&dev_attr_rq_num_lae.attr,
1750	&dev_attr_sq_num_rire.attr,
1751	&dev_attr_rq_num_rire.attr,
1752	&dev_attr_sq_num_rae.attr,
1753	&dev_attr_rq_num_rae.attr,
1754	&dev_attr_sq_num_roe.attr,
1755	&dev_attr_sq_num_tree.attr,
1756	&dev_attr_sq_num_rree.attr,
1757	&dev_attr_rq_num_rnr.attr,
1758	&dev_attr_sq_num_rnr.attr,
1759	&dev_attr_rq_num_oos.attr,
1760	&dev_attr_sq_num_oos.attr,
1761	&dev_attr_rq_num_mce.attr,
1762	&dev_attr_rq_num_udsdprd.attr,
1763	&dev_attr_rq_num_ucsdprd.attr,
1764	&dev_attr_num_cqovf.attr,
1765	&dev_attr_num_eqovf.attr,
1766	&dev_attr_num_baddb.attr,
1767	&dev_attr_clear_diag.attr,
1768	NULL
1769};
1770
1771static struct attribute_group diag_counters_group = {
1772	.name  = "diag_counters",
1773	.attrs  = diag_rprt_attrs
1774};
1775
1776#ifdef __linux__
1777static int mlx4_ib_proc_init(void)
1778{
1779	/* Creating procfs directories /proc/drivers/mlx4_ib/ &&
1780	      /proc/drivers/mlx4_ib/mrs for further use by the driver.
1781	*/
1782	int err;
1783
1784        mlx4_ib_driver_dir_entry = proc_mkdir(MLX4_IB_DRIVER_PROC_DIR_NAME,
1785				NULL);
1786	if (!mlx4_ib_driver_dir_entry) {
1787		pr_err("mlx4_ib_proc_init has failed for %s\n",
1788		       MLX4_IB_DRIVER_PROC_DIR_NAME);
1789		err = -ENODEV;
1790		goto error;
1791	}
1792
1793        mlx4_mrs_dir_entry = proc_mkdir(MLX4_IB_MRS_PROC_DIR_NAME,
1794					mlx4_ib_driver_dir_entry);
1795	if (!mlx4_mrs_dir_entry) {
1796		pr_err("mlx4_ib_proc_init has failed for %s\n",
1797		       MLX4_IB_MRS_PROC_DIR_NAME);
1798		err = -ENODEV;
1799		goto remove_entry;
1800	}
1801
1802	return 0;
1803
1804remove_entry:
1805	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME,
1806				NULL);
1807error:
1808	return err;
1809}
1810#endif
1811
1812static void init_dev_assign(void)
1813{
1814	int bus, slot, fn, ib_idx;
1815	char *p = dev_assign_str, *t;
1816	char curr_val[32] = {0};
1817	int ret;
1818	int j, i = 0;
1819
1820	memset(dr, 0, sizeof dr);
1821
1822	if (dev_assign_str[0] == 0)
1823		return;
1824
1825	while (strlen(p)) {
1826		ret = sscanf(p, "%02x:%02x.%x-%x", &bus, &slot, &fn, &ib_idx);
1827		if (ret != 4 || ib_idx < 0)
1828			goto err;
1829
1830		for (j = 0; j < i; j++)
1831			if (dr[j].nr == ib_idx)
1832				goto err;
1833
1834		dr[i].bus = bus;
1835		dr[i].dev = slot;
1836		dr[i].func = fn;
1837		dr[i].nr = ib_idx;
1838
1839		t = strchr(p, ',');
1840		sprintf(curr_val, "%02x:%02x.%x-%x", bus, slot, fn, ib_idx);
1841		if ((!t) && strlen(p) == strlen(curr_val))
1842			return;
1843
1844		if (!t || (t + 1) >= dev_assign_str + sizeof dev_assign_str)
1845			goto err;
1846
1847		++i;
1848		if (i >= MAX_DR)
1849			goto err;
1850
1851		p = t + 1;
1852	}
1853
1854	return;
1855err:
1856	memset(dr, 0, sizeof dr);
1857	printk(KERN_WARNING "mlx4_ib: The value of 'dev_assign_str' parameter "
1858			    "is incorrect. The parameter value is discarded!");
1859}
1860
1861static void *mlx4_ib_add(struct mlx4_dev *dev)
1862{
1863	struct mlx4_ib_dev *ibdev;
1864	int num_ports = 0;
1865	int i, j;
1866	int err;
1867	struct mlx4_ib_iboe *iboe;
1868
1869	printk(KERN_INFO "%s", mlx4_ib_version);
1870
1871	mlx4_foreach_ib_transport_port(i, dev)
1872		num_ports++;
1873
1874	/* No point in registering a device with no ports... */
1875	if (num_ports == 0)
1876		return NULL;
1877
1878	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1879	if (!ibdev) {
1880		dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1881		return NULL;
1882	}
1883
1884	iboe = &ibdev->iboe;
1885
1886	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1887		goto err_dealloc;
1888
1889	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1890		goto err_pd;
1891
1892	ibdev->priv_uar.map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT,
1893		PAGE_SIZE);
1894
1895	if (!ibdev->priv_uar.map)
1896		goto err_uar;
1897
1898	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1899
1900	ibdev->dev = dev;
1901
1902	strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1903	ibdev->ib_dev.owner		= THIS_MODULE;
1904	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
1905	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
1906	ibdev->num_ports		= num_ports;
1907	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
1908	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
1909	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
1910
1911	if (dev->caps.userspace_caps)
1912		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
1913	else
1914		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
1915
1916	ibdev->ib_dev.uverbs_cmd_mask	=
1917		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
1918		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
1919		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
1920		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
1921		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
1922		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
1923		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
1924		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
1925		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
1926		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
1927		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
1928		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
1929		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
1930		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
1931		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
1932		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
1933		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
1934		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
1935		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
1936		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
1937		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
1938		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
1939		(1ull << IB_USER_VERBS_CMD_OPEN_QP)		|
1940		(1ull << IB_USER_VERBS_CMD_ATTACH_FLOW)		|
1941		(1ull << IB_USER_VERBS_CMD_DETACH_FLOW)		|
1942		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1943
1944	ibdev->ib_dev.query_device	= mlx4_ib_query_device;
1945	ibdev->ib_dev.query_port	= mlx4_ib_query_port;
1946	ibdev->ib_dev.get_link_layer	= mlx4_ib_port_link_layer;
1947	ibdev->ib_dev.query_gid		= mlx4_ib_query_gid;
1948	ibdev->ib_dev.query_pkey	= mlx4_ib_query_pkey;
1949	ibdev->ib_dev.modify_device	= mlx4_ib_modify_device;
1950	ibdev->ib_dev.modify_port	= mlx4_ib_modify_port;
1951	ibdev->ib_dev.alloc_ucontext	= mlx4_ib_alloc_ucontext;
1952	ibdev->ib_dev.dealloc_ucontext	= mlx4_ib_dealloc_ucontext;
1953	ibdev->ib_dev.mmap		= mlx4_ib_mmap;
1954#ifdef __linux__
1955	ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
1956#endif
1957	ibdev->ib_dev.alloc_pd		= mlx4_ib_alloc_pd;
1958	ibdev->ib_dev.dealloc_pd	= mlx4_ib_dealloc_pd;
1959	ibdev->ib_dev.create_ah		= mlx4_ib_create_ah;
1960	ibdev->ib_dev.query_ah		= mlx4_ib_query_ah;
1961	ibdev->ib_dev.destroy_ah	= mlx4_ib_destroy_ah;
1962	ibdev->ib_dev.create_srq	= mlx4_ib_create_srq;
1963	ibdev->ib_dev.modify_srq	= mlx4_ib_modify_srq;
1964	ibdev->ib_dev.query_srq		= mlx4_ib_query_srq;
1965	ibdev->ib_dev.destroy_srq	= mlx4_ib_destroy_srq;
1966	ibdev->ib_dev.post_srq_recv	= mlx4_ib_post_srq_recv;
1967	ibdev->ib_dev.create_qp		= mlx4_ib_create_qp;
1968	ibdev->ib_dev.modify_qp		= mlx4_ib_modify_qp;
1969	ibdev->ib_dev.query_qp		= mlx4_ib_query_qp;
1970	ibdev->ib_dev.destroy_qp	= mlx4_ib_destroy_qp;
1971	ibdev->ib_dev.post_send		= mlx4_ib_post_send;
1972	ibdev->ib_dev.post_recv		= mlx4_ib_post_recv;
1973	ibdev->ib_dev.create_cq		= mlx4_ib_create_cq;
1974	ibdev->ib_dev.modify_cq		= mlx4_ib_modify_cq;
1975	ibdev->ib_dev.resize_cq		= mlx4_ib_resize_cq;
1976	ibdev->ib_dev.destroy_cq	= mlx4_ib_destroy_cq;
1977	ibdev->ib_dev.poll_cq		= mlx4_ib_poll_cq;
1978	ibdev->ib_dev.req_notify_cq	= mlx4_ib_arm_cq;
1979	ibdev->ib_dev.get_dma_mr	= mlx4_ib_get_dma_mr;
1980	ibdev->ib_dev.reg_user_mr	= mlx4_ib_reg_user_mr;
1981	ibdev->ib_dev.dereg_mr		= mlx4_ib_dereg_mr;
1982	ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
1983	ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
1984	ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
1985	ibdev->ib_dev.attach_mcast	= mlx4_ib_mcg_attach;
1986	ibdev->ib_dev.detach_mcast	= mlx4_ib_mcg_detach;
1987	ibdev->ib_dev.attach_flow	= mlx4_ib_flow_attach;
1988	ibdev->ib_dev.detach_flow	= mlx4_ib_flow_detach;
1989	ibdev->ib_dev.process_mad	= mlx4_ib_process_mad;
1990
1991	if (!mlx4_is_slave(ibdev->dev)) {
1992		ibdev->ib_dev.alloc_fmr		= mlx4_ib_fmr_alloc;
1993		ibdev->ib_dev.map_phys_fmr	= mlx4_ib_map_phys_fmr;
1994		ibdev->ib_dev.unmap_fmr		= mlx4_ib_unmap_fmr;
1995		ibdev->ib_dev.dealloc_fmr	= mlx4_ib_fmr_dealloc;
1996	}
1997
1998	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
1999		ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2000		ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2001		ibdev->ib_dev.uverbs_cmd_mask |=
2002			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2003			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2004	}
2005
2006	mlx4_ib_alloc_eqs(dev, ibdev);
2007
2008	spin_lock_init(&iboe->lock);
2009
2010	if (init_node_data(ibdev))
2011		goto err_map;
2012
2013	for (i = 0; i < ibdev->num_ports; ++i) {
2014		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2015						IB_LINK_LAYER_ETHERNET) {
2016			err = mlx4_counter_alloc(ibdev->dev, i + 1, &ibdev->counters[i]);
2017			if (err)
2018				ibdev->counters[i] = -1;
2019		} else
2020				ibdev->counters[i] = -1;
2021	}
2022
2023	spin_lock_init(&ibdev->sm_lock);
2024	mutex_init(&ibdev->cap_mask_mutex);
2025
2026	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2027	    !mlx4_is_slave(dev)) {
2028		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2029		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2030					    MLX4_IB_UC_STEER_QPN_ALIGN, &ibdev->steer_qpn_base, 0);
2031		if (err)
2032			goto err_counter;
2033
2034		ibdev->ib_uc_qpns_bitmap =
2035			kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2036				sizeof(long),
2037				GFP_KERNEL);
2038		if (!ibdev->ib_uc_qpns_bitmap) {
2039			dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2040			goto err_steer_qp_release;
2041		}
2042
2043		bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2044
2045		err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(dev, ibdev->steer_qpn_base,
2046				ibdev->steer_qpn_base + ibdev->steer_qpn_count - 1);
2047		if (err)
2048			goto err_steer_free_bitmap;
2049	}
2050
2051	if (ib_register_device(&ibdev->ib_dev, NULL))
2052		goto err_steer_free_bitmap;
2053
2054	if (mlx4_ib_mad_init(ibdev))
2055		goto err_reg;
2056
2057	if (mlx4_ib_init_sriov(ibdev))
2058		goto err_mad;
2059
2060	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
2061		iboe->nb.notifier_call = mlx4_ib_netdev_event;
2062		err = register_netdevice_notifier(&iboe->nb);
2063		if (err)
2064			goto err_sriov;
2065	}
2066
2067	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2068		if (device_create_file(&ibdev->ib_dev.dev,
2069				       mlx4_class_attributes[j]))
2070			goto err_notif;
2071	}
2072	if (sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
2073		goto err_notif;
2074
2075	ibdev->ib_active = true;
2076
2077	if (mlx4_is_mfunc(ibdev->dev))
2078		init_pkeys(ibdev);
2079
2080	/* create paravirt contexts for any VFs which are active */
2081	if (mlx4_is_master(ibdev->dev)) {
2082		for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2083			if (j == mlx4_master_func_num(ibdev->dev))
2084				continue;
2085			if (mlx4_is_slave_active(ibdev->dev, j))
2086				do_slave_init(ibdev, j, 1);
2087		}
2088	}
2089	return ibdev;
2090
2091err_notif:
2092	if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2093		pr_warn("failure unregistering notifier\n");
2094	flush_workqueue(wq);
2095
2096err_sriov:
2097	mlx4_ib_close_sriov(ibdev);
2098
2099err_mad:
2100	mlx4_ib_mad_cleanup(ibdev);
2101
2102err_reg:
2103	ib_unregister_device(&ibdev->ib_dev);
2104
2105err_steer_free_bitmap:
2106	kfree(ibdev->ib_uc_qpns_bitmap);
2107
2108err_steer_qp_release:
2109	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED)
2110		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2111				ibdev->steer_qpn_count);
2112err_counter:
2113	for (; i; --i)
2114		if (ibdev->counters[i - 1] != -1)
2115			mlx4_counter_free(ibdev->dev, i, ibdev->counters[i - 1]);
2116
2117err_map:
2118	iounmap(ibdev->priv_uar.map);
2119	mlx4_ib_free_eqs(dev, ibdev);
2120
2121err_uar:
2122	mlx4_uar_free(dev, &ibdev->priv_uar);
2123
2124err_pd:
2125	mlx4_pd_free(dev, ibdev->priv_pdn);
2126
2127err_dealloc:
2128	ib_dealloc_device(&ibdev->ib_dev);
2129
2130	return NULL;
2131}
2132
2133int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2134{
2135	int offset;
2136
2137	WARN_ON(!dev->ib_uc_qpns_bitmap);
2138
2139	offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2140					 dev->steer_qpn_count,
2141					 get_count_order(count));
2142	if (offset < 0)
2143		return offset;
2144
2145	*qpn = dev->steer_qpn_base + offset;
2146	return 0;
2147}
2148
2149void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2150{
2151	if (!qpn ||
2152	    dev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
2153		return;
2154
2155	BUG_ON(qpn < dev->steer_qpn_base);
2156
2157	bitmap_release_region(dev->ib_uc_qpns_bitmap,
2158			qpn - dev->steer_qpn_base, get_count_order(count));
2159}
2160
2161int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2162			 int is_attach)
2163{
2164	struct ib_flow_spec spec = {
2165		.type = IB_FLOW_IB_UC,
2166		.l2_id.ib_uc.qpn  = mqp->ibqp.qp_num,
2167	};
2168
2169	return is_attach ?
2170		__mlx4_ib_flow_attach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0)
2171                : __mlx4_ib_flow_detach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0);
2172}
2173
2174static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2175{
2176	struct mlx4_ib_dev *ibdev = ibdev_ptr;
2177	int p,j;
2178
2179	mlx4_ib_close_sriov(ibdev);
2180	sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
2181	mlx4_ib_mad_cleanup(ibdev);
2182
2183	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2184		device_remove_file(&ibdev->ib_dev.dev, mlx4_class_attributes[j]);
2185	}
2186
2187	ib_unregister_device(&ibdev->ib_dev);
2188
2189	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2190		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2191				ibdev->steer_qpn_count);
2192		kfree(ibdev->ib_uc_qpns_bitmap);
2193	}
2194
2195	if (ibdev->iboe.nb.notifier_call) {
2196		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2197			pr_warn("failure unregistering notifier\n");
2198		ibdev->iboe.nb.notifier_call = NULL;
2199	}
2200	iounmap(ibdev->priv_uar.map);
2201	for (p = 0; p < ibdev->num_ports; ++p)
2202		if (ibdev->counters[p] != -1)
2203			mlx4_counter_free(ibdev->dev, p + 1, ibdev->counters[p]);
2204	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2205		mlx4_CLOSE_PORT(dev, p);
2206
2207	mlx4_ib_free_eqs(dev, ibdev);
2208
2209	mlx4_uar_free(dev, &ibdev->priv_uar);
2210	mlx4_pd_free(dev, ibdev->priv_pdn);
2211	ib_dealloc_device(&ibdev->ib_dev);
2212}
2213
2214static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2215{
2216	struct mlx4_ib_demux_work **dm = NULL;
2217	struct mlx4_dev *dev = ibdev->dev;
2218	int i;
2219	unsigned long flags;
2220
2221	if (!mlx4_is_master(dev))
2222		return;
2223
2224	dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
2225	if (!dm) {
2226		pr_err("failed to allocate memory for tunneling qp update\n");
2227		goto out;
2228	}
2229
2230	for (i = 0; i < dev->caps.num_ports; i++) {
2231		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2232		if (!dm[i]) {
2233			pr_err("failed to allocate memory for tunneling qp update work struct\n");
2234			for (i = 0; i < dev->caps.num_ports; i++) {
2235				if (dm[i])
2236					kfree(dm[i]);
2237			}
2238			goto out;
2239		}
2240	}
2241	/* initialize or tear down tunnel QPs for the slave */
2242	for (i = 0; i < dev->caps.num_ports; i++) {
2243		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2244		dm[i]->port = i + 1;
2245		dm[i]->slave = slave;
2246		dm[i]->do_init = do_init;
2247		dm[i]->dev = ibdev;
2248		spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2249		if (!ibdev->sriov.is_going_down)
2250			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2251		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2252	}
2253out:
2254	if (dm)
2255		kfree(dm);
2256	return;
2257}
2258
2259static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2260			  enum mlx4_dev_event event, unsigned long param)
2261{
2262	struct ib_event ibev;
2263	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2264	struct mlx4_eqe *eqe = NULL;
2265	struct ib_event_work *ew;
2266	int p = 0;
2267
2268	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2269		eqe = (struct mlx4_eqe *)param;
2270	else
2271		p = (int) param;
2272
2273	switch (event) {
2274	case MLX4_DEV_EVENT_PORT_UP:
2275		if (p > ibdev->num_ports)
2276			return;
2277		if (mlx4_is_master(dev) &&
2278		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2279			IB_LINK_LAYER_INFINIBAND) {
2280			mlx4_ib_invalidate_all_guid_record(ibdev, p);
2281		}
2282		mlx4_ib_info((struct ib_device *) ibdev_ptr,
2283			     "Port %d logical link is up\n", p);
2284		ibev.event = IB_EVENT_PORT_ACTIVE;
2285		break;
2286
2287	case MLX4_DEV_EVENT_PORT_DOWN:
2288		if (p > ibdev->num_ports)
2289			return;
2290		mlx4_ib_info((struct ib_device *) ibdev_ptr,
2291			     "Port %d logical link is down\n", p);
2292		ibev.event = IB_EVENT_PORT_ERR;
2293		break;
2294
2295	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2296		ibdev->ib_active = false;
2297		ibev.event = IB_EVENT_DEVICE_FATAL;
2298		break;
2299
2300	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2301		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2302		if (!ew) {
2303			pr_err("failed to allocate memory for events work\n");
2304			break;
2305		}
2306
2307		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2308		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2309		ew->ib_dev = ibdev;
2310		/* need to queue only for port owner, which uses GEN_EQE */
2311		if (mlx4_is_master(dev))
2312			queue_work(wq, &ew->work);
2313		else
2314			handle_port_mgmt_change_event(&ew->work);
2315		return;
2316
2317	case MLX4_DEV_EVENT_SLAVE_INIT:
2318		/* here, p is the slave id */
2319		do_slave_init(ibdev, p, 1);
2320		return;
2321
2322	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2323		/* here, p is the slave id */
2324		do_slave_init(ibdev, p, 0);
2325		return;
2326
2327	default:
2328		return;
2329	}
2330
2331	ibev.device	      = ibdev_ptr;
2332	ibev.element.port_num = (u8) p;
2333
2334	ib_dispatch_event(&ibev);
2335}
2336
2337static struct mlx4_interface mlx4_ib_interface = {
2338	.add		= mlx4_ib_add,
2339	.remove		= mlx4_ib_remove,
2340	.event		= mlx4_ib_event,
2341	.protocol	= MLX4_PROT_IB_IPV6
2342};
2343
2344static int __init mlx4_ib_init(void)
2345{
2346	int err;
2347
2348	wq = create_singlethread_workqueue("mlx4_ib");
2349	if (!wq)
2350		return -ENOMEM;
2351
2352#ifdef __linux__
2353	err = mlx4_ib_proc_init();
2354	if (err)
2355		goto clean_wq;
2356#endif
2357
2358	err = mlx4_ib_mcg_init();
2359	if (err)
2360		goto clean_proc;
2361
2362	init_dev_assign();
2363
2364	err = mlx4_register_interface(&mlx4_ib_interface);
2365	if (err)
2366		goto clean_mcg;
2367
2368	return 0;
2369
2370clean_mcg:
2371	mlx4_ib_mcg_destroy();
2372
2373clean_proc:
2374#ifdef __linux__
2375	remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2376			  mlx4_ib_driver_dir_entry);
2377	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2378
2379clean_wq:
2380#endif
2381	destroy_workqueue(wq);
2382	return err;
2383}
2384
2385static void __exit mlx4_ib_cleanup(void)
2386{
2387	mlx4_unregister_interface(&mlx4_ib_interface);
2388	mlx4_ib_mcg_destroy();
2389	destroy_workqueue(wq);
2390
2391	/* Remove proc entries */
2392#ifdef __linux__
2393	remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2394				mlx4_ib_driver_dir_entry);
2395	remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2396#endif
2397
2398}
2399
2400module_init_order(mlx4_ib_init, SI_ORDER_MIDDLE);
2401module_exit(mlx4_ib_cleanup);
2402
2403#undef MODULE_VERSION
2404#include <sys/module.h>
2405static int
2406mlx4ib_evhand(module_t mod, int event, void *arg)
2407{
2408        return (0);
2409}
2410
2411static moduledata_t mlx4ib_mod = {
2412        .name = "mlx4ib",
2413        .evhand = mlx4ib_evhand,
2414};
2415
2416DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY);
2417MODULE_DEPEND(mlx4ib, mlx4, 1, 1, 1);
2418MODULE_DEPEND(mlx4ib, ibcore, 1, 1, 1);
2419