mlx4_ib_mad.c revision 296382
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h>
35#include <rdma/ib_sa.h>
36#include <rdma/ib_cache.h>
37
38#include <linux/random.h>
39#include <linux/mlx4/cmd.h>
40#include <linux/gfp.h>
41#include <rdma/ib_pma.h>
42
43#include "mlx4_ib.h"
44
45enum {
46	MLX4_IB_VENDOR_CLASS1 = 0x9,
47	MLX4_IB_VENDOR_CLASS2 = 0xa
48};
49
50#define MLX4_TUN_SEND_WRID_SHIFT 34
51#define MLX4_TUN_QPN_SHIFT 32
52#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
53#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
54
55#define MLX4_TUN_IS_RECV(a)  (((a) >>  MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
56#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
57
58 /* Port mgmt change event handling */
59
60#define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
61#define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
62#define NUM_IDX_IN_PKEY_TBL_BLK 32
63#define GUID_TBL_ENTRY_SIZE 8	   /* size in bytes */
64#define GUID_TBL_BLK_NUM_ENTRIES 8
65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
66
67struct mlx4_mad_rcv_buf {
68	struct ib_grh grh;
69	u8 payload[256];
70} __packed;
71
72struct mlx4_mad_snd_buf {
73	u8 payload[256];
74} __packed;
75
76struct mlx4_tunnel_mad {
77	struct ib_grh grh;
78	struct mlx4_ib_tunnel_header hdr;
79	struct ib_mad mad;
80} __packed;
81
82struct mlx4_rcv_tunnel_mad {
83	struct mlx4_rcv_tunnel_hdr hdr;
84	struct ib_grh grh;
85	struct ib_mad mad;
86} __packed;
87
88static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
89static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
90static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
91				int block, u32 change_bitmap);
92
93__be64 mlx4_ib_gen_node_guid(void)
94{
95#define NODE_GUID_HI	((u64) (((u64)IB_OPENIB_OUI) << 40))
96	return cpu_to_be64(NODE_GUID_HI | random());
97}
98
99__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
100{
101	return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
102		cpu_to_be64(0xff00000000000000LL);
103}
104
105int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
106		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
107		 void *in_mad, void *response_mad)
108{
109	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
110	void *inbox;
111	int err;
112	u32 in_modifier = port;
113	u8 op_modifier = 0;
114
115	inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
116	if (IS_ERR(inmailbox))
117		return PTR_ERR(inmailbox);
118	inbox = inmailbox->buf;
119
120	outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
121	if (IS_ERR(outmailbox)) {
122		mlx4_free_cmd_mailbox(dev->dev, inmailbox);
123		return PTR_ERR(outmailbox);
124	}
125
126	memcpy(inbox, in_mad, 256);
127
128	/*
129	 * Key check traps can't be generated unless we have in_wc to
130	 * tell us where to send the trap.
131	 */
132	if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
133		op_modifier |= 0x1;
134	if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
135		op_modifier |= 0x2;
136	if (mlx4_is_mfunc(dev->dev) &&
137	    (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
138		op_modifier |= 0x8;
139
140	if (in_wc) {
141		struct {
142			__be32		my_qpn;
143			u32		reserved1;
144			__be32		rqpn;
145			u8		sl;
146			u8		g_path;
147			u16		reserved2[2];
148			__be16		pkey;
149			u32		reserved3[11];
150			u8		grh[40];
151		} *ext_info;
152
153		memset(inbox + 256, 0, 256);
154		ext_info = inbox + 256;
155
156		ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
157		ext_info->rqpn   = cpu_to_be32(in_wc->src_qp);
158		ext_info->sl     = in_wc->sl << 4;
159		ext_info->g_path = in_wc->dlid_path_bits |
160			(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
161		ext_info->pkey   = cpu_to_be16(in_wc->pkey_index);
162
163		if (in_grh)
164			memcpy(ext_info->grh, in_grh, 40);
165
166		op_modifier |= 0x4;
167
168		in_modifier |= in_wc->slid << 16;
169	}
170
171	err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
172			   mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
173			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
174			   (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
175
176	if (!err)
177		memcpy(response_mad, outmailbox->buf, 256);
178
179	mlx4_free_cmd_mailbox(dev->dev, inmailbox);
180	mlx4_free_cmd_mailbox(dev->dev, outmailbox);
181
182	return err;
183}
184
185static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
186{
187	struct ib_ah *new_ah;
188	struct ib_ah_attr ah_attr;
189	unsigned long flags;
190
191	if (!dev->send_agent[port_num - 1][0])
192		return;
193
194	memset(&ah_attr, 0, sizeof ah_attr);
195	ah_attr.dlid     = lid;
196	ah_attr.sl       = sl;
197	ah_attr.port_num = port_num;
198
199	new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
200			      &ah_attr);
201	if (IS_ERR(new_ah))
202		return;
203
204	spin_lock_irqsave(&dev->sm_lock, flags);
205	if (dev->sm_ah[port_num - 1])
206		ib_destroy_ah(dev->sm_ah[port_num - 1]);
207	dev->sm_ah[port_num - 1] = new_ah;
208	spin_unlock_irqrestore(&dev->sm_lock, flags);
209}
210
211/*
212 * Snoop SM MADs for port info, GUID info, and  P_Key table sets, so we can
213 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
214 */
215static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
216		      u16 prev_lid)
217{
218	struct ib_port_info *pinfo;
219	u16 lid;
220	__be16 *base;
221	u32 bn, pkey_change_bitmap;
222	int i;
223
224
225	struct mlx4_ib_dev *dev = to_mdev(ibdev);
226	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
227	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
228	    mad->mad_hdr.method == IB_MGMT_METHOD_SET)
229		switch (mad->mad_hdr.attr_id) {
230		case IB_SMP_ATTR_PORT_INFO:
231			pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
232			lid = be16_to_cpu(pinfo->lid);
233
234			update_sm_ah(dev, port_num,
235				     be16_to_cpu(pinfo->sm_lid),
236				     pinfo->neighbormtu_mastersmsl & 0xf);
237
238			if (pinfo->clientrereg_resv_subnetto & 0x80)
239				handle_client_rereg_event(dev, port_num);
240
241			if (prev_lid != lid)
242				handle_lid_change_event(dev, port_num);
243			break;
244
245		case IB_SMP_ATTR_PKEY_TABLE:
246			if (!mlx4_is_mfunc(dev->dev)) {
247				mlx4_ib_dispatch_event(dev, port_num,
248						       IB_EVENT_PKEY_CHANGE);
249				break;
250			}
251
252			/* at this point, we are running in the master.
253			 * Slaves do not receive SMPs.
254			 */
255			bn  = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
256			base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
257			pkey_change_bitmap = 0;
258			for (i = 0; i < 32; i++) {
259				pr_debug("PKEY[%d] = x%x\n",
260					 i + bn*32, be16_to_cpu(base[i]));
261				if (be16_to_cpu(base[i]) !=
262				    dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
263					pkey_change_bitmap |= (1 << i);
264					dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
265						be16_to_cpu(base[i]);
266				}
267			}
268			pr_debug("PKEY Change event: port=%d, "
269				 "block=0x%x, change_bitmap=0x%x\n",
270				 port_num, bn, pkey_change_bitmap);
271
272			if (pkey_change_bitmap) {
273				mlx4_ib_dispatch_event(dev, port_num,
274						       IB_EVENT_PKEY_CHANGE);
275				if (!dev->sriov.is_going_down)
276					__propagate_pkey_ev(dev, port_num, bn,
277							    pkey_change_bitmap);
278			}
279			break;
280
281		case IB_SMP_ATTR_GUID_INFO:
282			/* paravirtualized master's guid is guid 0 -- does not change */
283			if (!mlx4_is_master(dev->dev))
284				mlx4_ib_dispatch_event(dev, port_num,
285						       IB_EVENT_GID_CHANGE);
286			/*if master, notify relevant slaves*/
287			if (mlx4_is_master(dev->dev) &&
288			    !dev->sriov.is_going_down) {
289				bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
290				mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
291								    (u8 *)(&((struct ib_smp *)mad)->data));
292				mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
293								     (u8 *)(&((struct ib_smp *)mad)->data));
294			}
295			break;
296
297		default:
298			break;
299		}
300}
301
302static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
303				int block, u32 change_bitmap)
304{
305	int i, ix, slave, err;
306	int have_event = 0;
307
308	for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
309		if (slave == mlx4_master_func_num(dev->dev))
310			continue;
311		if (!mlx4_is_slave_active(dev->dev, slave))
312			continue;
313
314		have_event = 0;
315		for (i = 0; i < 32; i++) {
316			if (!(change_bitmap & (1 << i)))
317				continue;
318			for (ix = 0;
319			     ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
320				if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
321				    [ix] == i + 32 * block) {
322					err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
323					pr_debug("propagate_pkey_ev: slave %d,"
324						 " port %d, ix %d (%d)\n",
325						 slave, port_num, ix, err);
326					have_event = 1;
327					break;
328				}
329			}
330			if (have_event)
331				break;
332		}
333	}
334}
335
336static void node_desc_override(struct ib_device *dev,
337			       struct ib_mad *mad)
338{
339	unsigned long flags;
340
341	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
342	     mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
343	    mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
344	    mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
345		spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
346		memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
347		spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
348	}
349}
350
351static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
352{
353	int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
354	struct ib_mad_send_buf *send_buf;
355	struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
356	int ret;
357	unsigned long flags;
358
359	if (agent) {
360		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
361					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
362		if (IS_ERR(send_buf))
363			return;
364		/*
365		 * We rely here on the fact that MLX QPs don't use the
366		 * address handle after the send is posted (this is
367		 * wrong following the IB spec strictly, but we know
368		 * it's OK for our devices).
369		 */
370		spin_lock_irqsave(&dev->sm_lock, flags);
371		memcpy(send_buf->mad, mad, sizeof *mad);
372		if ((send_buf->ah = dev->sm_ah[port_num - 1]))
373			ret = ib_post_send_mad(send_buf, NULL);
374		else
375			ret = -EINVAL;
376		spin_unlock_irqrestore(&dev->sm_lock, flags);
377
378		if (ret)
379			ib_free_send_mad(send_buf);
380	}
381}
382
383static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
384							     struct ib_sa_mad *sa_mad)
385{
386	int ret = 0;
387
388	/* dispatch to different sa handlers */
389	switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
390	case IB_SA_ATTR_MC_MEMBER_REC:
391		ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
392		break;
393	default:
394		break;
395	}
396	return ret;
397}
398
399int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
400{
401	struct mlx4_ib_dev *dev = to_mdev(ibdev);
402	int i;
403
404	for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
405		if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
406			return i;
407	}
408	return -1;
409}
410
411
412static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
413				   u8 port, u16 pkey, u16 *ix)
414{
415	int i, ret;
416	u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
417	u16 slot_pkey;
418
419	if (slave == mlx4_master_func_num(dev->dev))
420		return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
421
422	unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
423
424	for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
425		if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
426			continue;
427
428		pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
429
430		ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
431		if (ret)
432			continue;
433		if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
434			if (slot_pkey & 0x8000) {
435				*ix = (u16) pkey_ix;
436				return 0;
437			} else {
438				/* take first partial pkey index found */
439				if (partial_ix == 0xFF)
440					partial_ix = pkey_ix;
441			}
442		}
443	}
444
445	if (partial_ix < 0xFF) {
446		*ix = (u16) partial_ix;
447		return 0;
448	}
449
450	return -EINVAL;
451}
452
453int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
454			  enum ib_qp_type dest_qpt, struct ib_wc *wc,
455			  struct ib_grh *grh, struct ib_mad *mad)
456{
457	struct ib_sge list;
458	struct ib_send_wr wr, *bad_wr;
459	struct mlx4_ib_demux_pv_ctx *tun_ctx;
460	struct mlx4_ib_demux_pv_qp *tun_qp;
461	struct mlx4_rcv_tunnel_mad *tun_mad;
462	struct ib_ah_attr attr;
463	struct ib_ah *ah;
464	struct ib_qp *src_qp = NULL;
465	unsigned tun_tx_ix = 0;
466	int dqpn;
467	int ret = 0;
468	u16 tun_pkey_ix;
469	u16 cached_pkey;
470	u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
471
472	if (dest_qpt > IB_QPT_GSI)
473		return -EINVAL;
474
475	tun_ctx = dev->sriov.demux[port-1].tun[slave];
476
477	/* check if proxy qp created */
478	if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
479		return -EAGAIN;
480
481	/* QP0 forwarding only for Dom0 */
482	if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
483		return -EINVAL;
484
485	if (!dest_qpt)
486		tun_qp = &tun_ctx->qp[0];
487	else
488		tun_qp = &tun_ctx->qp[1];
489
490	/* compute P_Key index to put in tunnel header for slave */
491	if (dest_qpt) {
492		u16 pkey_ix;
493		ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
494		if (ret)
495			return -EINVAL;
496
497		ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
498		if (ret)
499			return -EINVAL;
500		tun_pkey_ix = pkey_ix;
501	} else
502		tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
503
504	dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
505
506	/* get tunnel tx data buf for slave */
507	src_qp = tun_qp->qp;
508
509	/* create ah. Just need an empty one with the port num for the post send.
510	 * The driver will set the force loopback bit in post_send */
511	memset(&attr, 0, sizeof attr);
512	attr.port_num = port;
513	if (is_eth) {
514		memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
515		attr.ah_flags = IB_AH_GRH;
516	}
517	ah = ib_create_ah(tun_ctx->pd, &attr);
518	if (IS_ERR(ah))
519		return -ENOMEM;
520
521	/* allocate tunnel tx buf after pass failure returns */
522	spin_lock(&tun_qp->tx_lock);
523	if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
524	    (MLX4_NUM_TUNNEL_BUFS - 1))
525		ret = -EAGAIN;
526	else
527		tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
528	spin_unlock(&tun_qp->tx_lock);
529	if (ret)
530		goto out;
531
532	tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
533	if (tun_qp->tx_ring[tun_tx_ix].ah)
534		ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
535	tun_qp->tx_ring[tun_tx_ix].ah = ah;
536	ib_dma_sync_single_for_cpu(&dev->ib_dev,
537				   tun_qp->tx_ring[tun_tx_ix].buf.map,
538				   sizeof (struct mlx4_rcv_tunnel_mad),
539				   DMA_TO_DEVICE);
540
541	/* copy over to tunnel buffer */
542	if (grh)
543		memcpy(&tun_mad->grh, grh, sizeof *grh);
544	memcpy(&tun_mad->mad, mad, sizeof *mad);
545
546	/* adjust tunnel data */
547	tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
548	tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
549	tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
550
551	if (is_eth) {
552		u16 vlan = 0;
553		if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
554		    NULL)) {
555			if (vlan != wc->vlan_id)
556				/* VST and default vlan is not the packet vlan drop the
557				 * packet*/
558				goto out;
559			 else
560				/* VST , remove hide the vlan from the VF */
561				vlan = 0;
562		} else {
563			vlan = wc->vlan_id;
564		}
565
566		tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
567		memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
568		memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
569	} else {
570		tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
571		tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
572	}
573
574	ib_dma_sync_single_for_device(&dev->ib_dev,
575				      tun_qp->tx_ring[tun_tx_ix].buf.map,
576				      sizeof (struct mlx4_rcv_tunnel_mad),
577				      DMA_TO_DEVICE);
578
579	list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
580	list.length = sizeof (struct mlx4_rcv_tunnel_mad);
581	list.lkey = tun_ctx->mr->lkey;
582
583	wr.wr.ud.ah = ah;
584	wr.wr.ud.port_num = port;
585	wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
586	wr.wr.ud.remote_qpn = dqpn;
587	wr.next = NULL;
588	wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
589	wr.sg_list = &list;
590	wr.num_sge = 1;
591	wr.opcode = IB_WR_SEND;
592	wr.send_flags = IB_SEND_SIGNALED;
593
594	ret = ib_post_send(src_qp, &wr, &bad_wr);
595out:
596	if (ret)
597		ib_destroy_ah(ah);
598	return ret;
599}
600
601static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
602			struct ib_wc *wc, struct ib_grh *grh,
603			struct ib_mad *mad)
604{
605	struct mlx4_ib_dev *dev = to_mdev(ibdev);
606	int err;
607	int slave;
608	u8 *slave_id;
609	int is_eth = 0;
610
611	if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
612		is_eth = 0;
613	else
614		is_eth = 1;
615
616	if (is_eth) {
617		if (!wc->wc_flags & IB_WC_GRH) {
618			mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
619			return -EINVAL;
620		}
621		if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
622			mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
623			return -EINVAL;
624		}
625		if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
626			mlx4_ib_warn(ibdev, "failed matching grh\n");
627			return -ENOENT;
628		}
629		if (slave >= dev->dev->caps.sqp_demux) {
630			mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
631				     slave, dev->dev->caps.sqp_demux);
632			return -ENOENT;
633		}
634
635		if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad, is_eth))
636			return 0;
637
638		err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
639		if (err)
640			pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
641				 slave, err);
642		return 0;
643	}
644
645	/* Initially assume that this mad is for us */
646	slave = mlx4_master_func_num(dev->dev);
647
648	/* See if the slave id is encoded in a response mad */
649	if (mad->mad_hdr.method & 0x80) {
650		slave_id = (u8 *) &mad->mad_hdr.tid;
651		slave = *slave_id;
652		if (slave != 255) /*255 indicates the dom0*/
653			*slave_id = 0; /* remap tid */
654	}
655
656	/* If a grh is present, we demux according to it */
657	if (wc->wc_flags & IB_WC_GRH) {
658		slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
659		if (slave < 0) {
660			mlx4_ib_warn(ibdev, "failed matching grh\n");
661			return -ENOENT;
662		}
663	}
664	/* Class-specific handling */
665	switch (mad->mad_hdr.mgmt_class) {
666	case IB_MGMT_CLASS_SUBN_ADM:
667		if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
668					     (struct ib_sa_mad *) mad))
669			return 0;
670		break;
671	case IB_MGMT_CLASS_CM:
672		if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad, is_eth))
673			return 0;
674		break;
675	case IB_MGMT_CLASS_DEVICE_MGMT:
676		if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
677			return 0;
678		break;
679	default:
680		/* Drop unsupported classes for slaves in tunnel mode */
681		if (slave != mlx4_master_func_num(dev->dev)) {
682			pr_debug("dropping unsupported ingress mad from class:%d "
683				 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
684			return 0;
685		}
686	}
687	/*make sure that no slave==255 was not handled yet.*/
688	if (slave >= dev->dev->caps.sqp_demux) {
689		mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
690			     slave, dev->dev->caps.sqp_demux);
691		return -ENOENT;
692	}
693
694	err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
695	if (err)
696		pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
697			 slave, err);
698	return 0;
699}
700
701static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
702			struct ib_wc *in_wc, struct ib_grh *in_grh,
703			struct ib_mad *in_mad, struct ib_mad *out_mad)
704{
705	u16 slid, prev_lid = 0;
706	int err;
707	struct ib_port_attr pattr;
708
709	if (in_wc && in_wc->qp->qp_num) {
710		pr_debug("received MAD: slid:%d sqpn:%d "
711			"dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
712			in_wc->slid, in_wc->src_qp,
713			in_wc->dlid_path_bits,
714			in_wc->qp->qp_num,
715			in_wc->wc_flags,
716			in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
717			be16_to_cpu(in_mad->mad_hdr.attr_id));
718		if (in_wc->wc_flags & IB_WC_GRH) {
719			pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
720				 (unsigned long long)be64_to_cpu(in_grh->sgid.global.subnet_prefix),
721				 (unsigned long long)be64_to_cpu(in_grh->sgid.global.interface_id));
722			pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
723				 (unsigned long long)be64_to_cpu(in_grh->dgid.global.subnet_prefix),
724				 (unsigned long long)be64_to_cpu(in_grh->dgid.global.interface_id));
725		}
726	}
727
728	slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
729
730	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
731		forward_trap(to_mdev(ibdev), port_num, in_mad);
732		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
733	}
734
735	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
736	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
737		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
738		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
739		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
740			return IB_MAD_RESULT_SUCCESS;
741
742		/*
743		 * Don't process SMInfo queries -- the SMA can't handle them.
744		 */
745		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
746			return IB_MAD_RESULT_SUCCESS;
747	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
748		   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
749		   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2   ||
750		   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
751		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
752		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
753			return IB_MAD_RESULT_SUCCESS;
754	} else
755		return IB_MAD_RESULT_SUCCESS;
756
757	if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
758	     in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
759	    in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
760	    in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
761	    !ib_query_port(ibdev, port_num, &pattr))
762		prev_lid = pattr.lid;
763
764	err = mlx4_MAD_IFC(to_mdev(ibdev),
765			   (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
766			   (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
767			   MLX4_MAD_IFC_NET_VIEW,
768			   port_num, in_wc, in_grh, in_mad, out_mad);
769	if (err)
770		return IB_MAD_RESULT_FAILURE;
771
772	if (!out_mad->mad_hdr.status) {
773		if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
774			smp_snoop(ibdev, port_num, in_mad, prev_lid);
775		/* slaves get node desc from FW */
776		if (!mlx4_is_slave(to_mdev(ibdev)->dev))
777			node_desc_override(ibdev, out_mad);
778	}
779
780	/* set return bit in status of directed route responses */
781	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
782		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
783
784	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
785		/* no response for trap repress */
786		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
787
788	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
789}
790
791static void edit_counter_ext(struct mlx4_if_stat_extended *cnt, void *counters,
792			     __be16 attr_id)
793{
794	switch (attr_id) {
795	case IB_PMA_PORT_COUNTERS:
796	{
797		struct ib_pma_portcounters *pma_cnt =
798				(struct ib_pma_portcounters *)counters;
799		pma_cnt->port_xmit_data =
800			cpu_to_be32((be64_to_cpu(cnt->counters[0].
801						 IfTxUnicastOctets) +
802				     be64_to_cpu(cnt->counters[0].
803						 IfTxMulticastOctets) +
804				     be64_to_cpu(cnt->counters[0].
805						 IfTxBroadcastOctets) +
806				     be64_to_cpu(cnt->counters[0].
807						 IfTxDroppedOctets)) >> 2);
808		pma_cnt->port_rcv_data  =
809			cpu_to_be32((be64_to_cpu(cnt->counters[0].
810						 IfRxUnicastOctets) +
811				     be64_to_cpu(cnt->counters[0].
812						 IfRxMulticastOctets) +
813				     be64_to_cpu(cnt->counters[0].
814						 IfRxBroadcastOctets) +
815				     be64_to_cpu(cnt->counters[0].
816						 IfRxNoBufferOctets) +
817				     be64_to_cpu(cnt->counters[0].
818						 IfRxErrorOctets)) >> 2);
819		pma_cnt->port_xmit_packets =
820			cpu_to_be32(be64_to_cpu(cnt->counters[0].
821						IfTxUnicastFrames) +
822				    be64_to_cpu(cnt->counters[0].
823						IfTxMulticastFrames) +
824				    be64_to_cpu(cnt->counters[0].
825						IfTxBroadcastFrames) +
826				    be64_to_cpu(cnt->counters[0].
827						IfTxDroppedFrames));
828		pma_cnt->port_rcv_packets  =
829			cpu_to_be32(be64_to_cpu(cnt->counters[0].
830						IfRxUnicastFrames) +
831				    be64_to_cpu(cnt->counters[0].
832						IfRxMulticastFrames) +
833				    be64_to_cpu(cnt->counters[0].
834						IfRxBroadcastFrames) +
835				    be64_to_cpu(cnt->counters[0].
836						IfRxNoBufferFrames) +
837				    be64_to_cpu(cnt->counters[0].
838						IfRxErrorFrames));
839		pma_cnt->port_rcv_errors = cpu_to_be32(be64_to_cpu(cnt->
840						       counters[0].
841						       IfRxErrorFrames));
842		break;
843	}
844
845	case IB_PMA_PORT_COUNTERS_EXT:
846	{
847		struct ib_pma_portcounters_ext *pma_cnt_ext =
848				(struct ib_pma_portcounters_ext *)counters;
849
850		pma_cnt_ext->port_xmit_data =
851			cpu_to_be64((be64_to_cpu(cnt->counters[0].
852						 IfTxUnicastOctets) +
853				     be64_to_cpu(cnt->counters[0].
854						 IfTxMulticastOctets) +
855				     be64_to_cpu(cnt->counters[0].
856						 IfTxBroadcastOctets) +
857				     be64_to_cpu(cnt->counters[0].
858						 IfTxDroppedOctets)) >> 2);
859		pma_cnt_ext->port_rcv_data  =
860			cpu_to_be64((be64_to_cpu(cnt->counters[0].
861						 IfRxUnicastOctets) +
862				     be64_to_cpu(cnt->counters[0].
863						 IfRxMulticastOctets) +
864				     be64_to_cpu(cnt->counters[0].
865						 IfRxBroadcastOctets) +
866				     be64_to_cpu(cnt->counters[0].
867						 IfRxNoBufferOctets) +
868				     be64_to_cpu(cnt->counters[0].
869						 IfRxErrorOctets)) >> 2);
870		pma_cnt_ext->port_xmit_packets =
871			cpu_to_be64(be64_to_cpu(cnt->counters[0].
872						IfTxUnicastFrames) +
873				    be64_to_cpu(cnt->counters[0].
874						IfTxMulticastFrames) +
875				    be64_to_cpu(cnt->counters[0].
876						IfTxBroadcastFrames) +
877				    be64_to_cpu(cnt->counters[0].
878						IfTxDroppedFrames));
879		pma_cnt_ext->port_rcv_packets  =
880			cpu_to_be64(be64_to_cpu(cnt->counters[0].
881						IfRxUnicastFrames) +
882				    be64_to_cpu(cnt->counters[0].
883						IfRxMulticastFrames) +
884				    be64_to_cpu(cnt->counters[0].
885						IfRxBroadcastFrames) +
886				    be64_to_cpu(cnt->counters[0].
887						IfRxNoBufferFrames) +
888				    be64_to_cpu(cnt->counters[0].
889						IfRxErrorFrames));
890		pma_cnt_ext->port_unicast_xmit_packets = cnt->counters[0].
891						IfTxUnicastFrames;
892		pma_cnt_ext->port_unicast_rcv_packets = cnt->counters[0].
893						IfRxUnicastFrames;
894		pma_cnt_ext->port_multicast_xmit_packets =
895			cpu_to_be64(be64_to_cpu(cnt->counters[0].
896						IfTxMulticastFrames) +
897				    be64_to_cpu(cnt->counters[0].
898						IfTxBroadcastFrames));
899		pma_cnt_ext->port_multicast_rcv_packets =
900			cpu_to_be64(be64_to_cpu(cnt->counters[0].
901						IfTxMulticastFrames) +
902				    be64_to_cpu(cnt->counters[0].
903						IfTxBroadcastFrames));
904
905		break;
906	}
907
908	default:
909		pr_warn("Unsupported attr_id 0x%x\n", attr_id);
910		break;
911	}
912
913}
914
915static void edit_counter(struct mlx4_if_stat_basic *cnt, void *counters,
916			 __be16	attr_id)
917{
918	switch (attr_id) {
919	case IB_PMA_PORT_COUNTERS:
920	{
921		struct ib_pma_portcounters *pma_cnt =
922				(struct ib_pma_portcounters *) counters;
923		pma_cnt->port_xmit_data =
924			cpu_to_be32(be64_to_cpu(
925				    cnt->counters[0].IfTxOctets) >> 2);
926		pma_cnt->port_rcv_data  =
927			cpu_to_be32(be64_to_cpu(
928				    cnt->counters[0].IfRxOctets) >> 2);
929		pma_cnt->port_xmit_packets =
930			cpu_to_be32(be64_to_cpu(cnt->counters[0].IfTxFrames));
931		pma_cnt->port_rcv_packets  =
932			cpu_to_be32(be64_to_cpu(cnt->counters[0].IfRxFrames));
933		break;
934	}
935	case IB_PMA_PORT_COUNTERS_EXT:
936	{
937		struct ib_pma_portcounters_ext *pma_cnt_ext =
938				(struct ib_pma_portcounters_ext *) counters;
939
940		pma_cnt_ext->port_xmit_data =
941			cpu_to_be64((be64_to_cpu(cnt->counters[0].
942						 IfTxOctets) >> 2));
943		pma_cnt_ext->port_rcv_data  =
944			cpu_to_be64((be64_to_cpu(cnt->counters[0].
945						 IfRxOctets) >> 2));
946		pma_cnt_ext->port_xmit_packets = cnt->counters[0].IfTxFrames;
947		pma_cnt_ext->port_rcv_packets  = cnt->counters[0].IfRxFrames;
948		break;
949	}
950	default:
951		pr_warn("Unsupported attr_id 0x%x\n", attr_id);
952		break;
953	}
954}
955
956int mlx4_ib_query_if_stat(struct mlx4_ib_dev *dev, u32 counter_index,
957		       union mlx4_counter *counter, u8 clear)
958{
959	struct mlx4_cmd_mailbox *mailbox;
960	int err;
961	u32 inmod = counter_index | ((clear & 1) << 31);
962
963	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
964	if (IS_ERR(mailbox))
965		return IB_MAD_RESULT_FAILURE;
966
967	err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
968			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
969			   MLX4_CMD_NATIVE);
970	if (!err)
971		memcpy(counter, mailbox->buf, MLX4_IF_STAT_SZ(1));
972
973	mlx4_free_cmd_mailbox(dev->dev, mailbox);
974
975	return err;
976}
977
978static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
979			struct ib_wc *in_wc, struct ib_grh *in_grh,
980			struct ib_mad *in_mad, struct ib_mad *out_mad)
981{
982	struct mlx4_ib_dev *dev = to_mdev(ibdev);
983	int err;
984	u32 counter_index = dev->counters[port_num - 1].counter_index & 0xffff;
985	u8 mode;
986	char				counter_buf[MLX4_IF_STAT_SZ(1)];
987	union  mlx4_counter		*counter = (union mlx4_counter *)
988						   counter_buf;
989
990	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
991		return -EINVAL;
992
993	/* in case of default counter IB shares the counter with ETH */
994	/* the state could be -EEXIST or -ENOSPC */
995	if (dev->counters[port_num - 1].status) {
996		memset(out_mad->data, 0, sizeof out_mad->data);
997		err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
998	} else {
999		if (mlx4_ib_query_if_stat(dev, counter_index, counter, 0))
1000			return IB_MAD_RESULT_FAILURE;
1001
1002		memset(out_mad->data, 0, sizeof(out_mad->data));
1003		mode = counter->control.cnt_mode & 0xFF;
1004		err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
1005		switch (mode & 0xf) {
1006		case 0:
1007			edit_counter((void *)counter,
1008				     (void *)(out_mad->data + 40),
1009				     in_mad->mad_hdr.attr_id);
1010			break;
1011		case 1:
1012			edit_counter_ext((void *)counter,
1013					 (void *)(out_mad->data + 40),
1014					 in_mad->mad_hdr.attr_id);
1015			break;
1016		default:
1017			err = IB_MAD_RESULT_FAILURE;
1018		}
1019	}
1020
1021	return err;
1022}
1023
1024int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1025			struct ib_wc *in_wc, struct ib_grh *in_grh,
1026			struct ib_mad *in_mad, struct ib_mad *out_mad)
1027{
1028	switch (rdma_port_get_link_layer(ibdev, port_num)) {
1029	case IB_LINK_LAYER_INFINIBAND:
1030		return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
1031				      in_grh, in_mad, out_mad);
1032	case IB_LINK_LAYER_ETHERNET:
1033		return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1034					  in_grh, in_mad, out_mad);
1035	default:
1036		return -EINVAL;
1037	}
1038}
1039
1040static void send_handler(struct ib_mad_agent *agent,
1041			 struct ib_mad_send_wc *mad_send_wc)
1042{
1043	if (mad_send_wc->send_buf->context[0])
1044		ib_destroy_ah(mad_send_wc->send_buf->context[0]);
1045	ib_free_send_mad(mad_send_wc->send_buf);
1046}
1047
1048int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
1049{
1050	struct ib_mad_agent *agent;
1051	int p, q;
1052	int ret;
1053	enum rdma_link_layer ll;
1054
1055	for (p = 0; p < dev->num_ports; ++p) {
1056		ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
1057		for (q = 0; q <= 1; ++q) {
1058			if (ll == IB_LINK_LAYER_INFINIBAND) {
1059				agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
1060							      q ? IB_QPT_GSI : IB_QPT_SMI,
1061							      NULL, 0, send_handler,
1062							      NULL, NULL);
1063				if (IS_ERR(agent)) {
1064					ret = PTR_ERR(agent);
1065					goto err;
1066				}
1067				dev->send_agent[p][q] = agent;
1068			} else
1069				dev->send_agent[p][q] = NULL;
1070		}
1071	}
1072
1073	return 0;
1074
1075err:
1076	for (p = 0; p < dev->num_ports; ++p)
1077		for (q = 0; q <= 1; ++q)
1078			if (dev->send_agent[p][q])
1079				ib_unregister_mad_agent(dev->send_agent[p][q]);
1080
1081	return ret;
1082}
1083
1084void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
1085{
1086	struct ib_mad_agent *agent;
1087	int p, q;
1088
1089	for (p = 0; p < dev->num_ports; ++p) {
1090		for (q = 0; q <= 1; ++q) {
1091			agent = dev->send_agent[p][q];
1092			if (agent) {
1093				dev->send_agent[p][q] = NULL;
1094				ib_unregister_mad_agent(agent);
1095			}
1096		}
1097
1098		if (dev->sm_ah[p])
1099			ib_destroy_ah(dev->sm_ah[p]);
1100	}
1101}
1102
1103static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
1104{
1105	mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
1106
1107	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1108		mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1109					    MLX4_EQ_PORT_INFO_LID_CHANGE_MASK, 0, 0);
1110}
1111
1112static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
1113{
1114	/* re-configure the alias-guid and mcg's */
1115	if (mlx4_is_master(dev->dev)) {
1116		mlx4_ib_invalidate_all_guid_record(dev, port_num);
1117
1118		if (!dev->sriov.is_going_down) {
1119			mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
1120			mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1121						    MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK, 0, 0);
1122		}
1123	}
1124	mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
1125}
1126
1127static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
1128			      struct mlx4_eqe *eqe)
1129{
1130	__propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
1131			    GET_MASK_FROM_EQE(eqe));
1132}
1133
1134static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
1135				      u32 guid_tbl_blk_num, u32 change_bitmap)
1136{
1137	struct ib_smp *in_mad  = NULL;
1138	struct ib_smp *out_mad  = NULL;
1139	u16 i;
1140
1141	if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
1142		return;
1143
1144	in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
1145	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1146	if (!in_mad || !out_mad) {
1147		mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
1148		goto out;
1149	}
1150
1151	guid_tbl_blk_num  *= 4;
1152
1153	for (i = 0; i < 4; i++) {
1154		if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1155			continue;
1156		memset(in_mad, 0, sizeof *in_mad);
1157		memset(out_mad, 0, sizeof *out_mad);
1158
1159		in_mad->base_version  = 1;
1160		in_mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1161		in_mad->class_version = 1;
1162		in_mad->method        = IB_MGMT_METHOD_GET;
1163		in_mad->attr_id       = IB_SMP_ATTR_GUID_INFO;
1164		in_mad->attr_mod      = cpu_to_be32(guid_tbl_blk_num + i);
1165
1166		if (mlx4_MAD_IFC(dev,
1167				 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1168				 port_num, NULL, NULL, in_mad, out_mad)) {
1169			mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1170			goto out;
1171		}
1172
1173		mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1174						    port_num,
1175						    (u8 *)(&((struct ib_smp *)out_mad)->data));
1176		mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1177						     port_num,
1178						     (u8 *)(&((struct ib_smp *)out_mad)->data));
1179	}
1180
1181out:
1182	kfree(in_mad);
1183	kfree(out_mad);
1184	return;
1185}
1186
1187void handle_port_mgmt_change_event(struct work_struct *work)
1188{
1189	struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1190	struct mlx4_ib_dev *dev = ew->ib_dev;
1191	struct mlx4_eqe *eqe = &(ew->ib_eqe);
1192	u8 port = eqe->event.port_mgmt_change.port;
1193	u32 changed_attr;
1194	u32 tbl_block;
1195	u32 change_bitmap;
1196
1197	switch (eqe->subtype) {
1198	case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1199		changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1200
1201		/* Update the SM ah - This should be done before handling
1202		   the other changed attributes so that MADs can be sent to the SM */
1203		if (changed_attr & MSTR_SM_CHANGE_MASK) {
1204			u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1205			u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1206			update_sm_ah(dev, port, lid, sl);
1207			mlx4_ib_dispatch_event(dev, port, IB_EVENT_SM_CHANGE);
1208			if (mlx4_is_master(dev->dev))
1209				mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1210							    changed_attr & MSTR_SM_CHANGE_MASK,
1211							    lid, sl);
1212		}
1213
1214		/* Check if it is a lid change event */
1215		if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1216			handle_lid_change_event(dev, port);
1217
1218		/* Generate GUID changed event */
1219		if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1220			mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1221			/*if master, notify all slaves*/
1222			if (mlx4_is_master(dev->dev))
1223				mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1224							    MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK, 0, 0);
1225		}
1226
1227		if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1228			handle_client_rereg_event(dev, port);
1229		break;
1230
1231	case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1232		mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1233		if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1234			propagate_pkey_ev(dev, port, eqe);
1235		break;
1236	case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1237		/* paravirtualized master's guid is guid 0 -- does not change */
1238		if (!mlx4_is_master(dev->dev))
1239			mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1240		/*if master, notify relevant slaves*/
1241		else if (!dev->sriov.is_going_down) {
1242			tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1243			change_bitmap = GET_MASK_FROM_EQE(eqe);
1244			handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1245		}
1246		break;
1247	default:
1248		pr_warn("Unsupported subtype 0x%x for "
1249			"Port Management Change event\n", eqe->subtype);
1250	}
1251
1252	kfree(ew);
1253}
1254
1255void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1256			    enum ib_event_type type)
1257{
1258	struct ib_event event;
1259
1260	event.device		= &dev->ib_dev;
1261	event.element.port_num	= port_num;
1262	event.event		= type;
1263
1264	ib_dispatch_event(&event);
1265}
1266
1267static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1268{
1269	unsigned long flags;
1270	struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1271	struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1272	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1273	if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1274		queue_work(ctx->wq, &ctx->work);
1275	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1276}
1277
1278static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1279				  struct mlx4_ib_demux_pv_qp *tun_qp,
1280				  int index)
1281{
1282	struct ib_sge sg_list;
1283	struct ib_recv_wr recv_wr, *bad_recv_wr;
1284	int size;
1285
1286	size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1287		sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1288
1289	sg_list.addr = tun_qp->ring[index].map;
1290	sg_list.length = size;
1291	sg_list.lkey = ctx->mr->lkey;
1292
1293	recv_wr.next = NULL;
1294	recv_wr.sg_list = &sg_list;
1295	recv_wr.num_sge = 1;
1296	recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1297		MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1298	ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1299				      size, DMA_FROM_DEVICE);
1300	return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1301}
1302
1303static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1304		int slave, struct ib_sa_mad *sa_mad)
1305{
1306	int ret = 0;
1307
1308	/* dispatch to different sa handlers */
1309	switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1310	case IB_SA_ATTR_MC_MEMBER_REC:
1311		ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1312		break;
1313	default:
1314		break;
1315	}
1316	return ret;
1317}
1318
1319static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1320{
1321	int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
1322
1323	return (qpn >= proxy_start && qpn <= proxy_start + 1);
1324}
1325
1326
1327int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1328			 enum ib_qp_type dest_qpt, u16 pkey_index,
1329			 u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
1330			 u8 *s_mac, struct ib_mad *mad)
1331{
1332	struct ib_sge list;
1333	struct ib_send_wr wr, *bad_wr;
1334	struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1335	struct mlx4_ib_demux_pv_qp *sqp;
1336	struct mlx4_mad_snd_buf *sqp_mad;
1337	struct ib_ah *ah;
1338	struct ib_qp *send_qp = NULL;
1339	unsigned wire_tx_ix = 0;
1340	int ret = 0;
1341	u16 wire_pkey_ix;
1342	int src_qpnum;
1343	u8 sgid_index;
1344
1345
1346	sqp_ctx = dev->sriov.sqps[port-1];
1347
1348	/* check if proxy qp created */
1349	if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1350		return -EAGAIN;
1351
1352	/* QP0 forwarding only for Dom0 */
1353	if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
1354		return -EINVAL;
1355
1356	if (dest_qpt == IB_QPT_SMI) {
1357		src_qpnum = 0;
1358		sqp = &sqp_ctx->qp[0];
1359		wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1360	} else {
1361		src_qpnum = 1;
1362		sqp = &sqp_ctx->qp[1];
1363		wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1364	}
1365
1366	send_qp = sqp->qp;
1367
1368	/* create ah */
1369	sgid_index = attr->grh.sgid_index;
1370	attr->grh.sgid_index = 0;
1371	ah = ib_create_ah(sqp_ctx->pd, attr);
1372	if (IS_ERR(ah))
1373		return -ENOMEM;
1374	attr->grh.sgid_index = sgid_index;
1375	to_mah(ah)->av.ib.gid_index = sgid_index;
1376	/* get rid of force-loopback bit */
1377	to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1378	spin_lock(&sqp->tx_lock);
1379	if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1380	    (MLX4_NUM_TUNNEL_BUFS - 1))
1381		ret = -EAGAIN;
1382	else
1383		wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1384	spin_unlock(&sqp->tx_lock);
1385	if (ret)
1386		goto out;
1387
1388	sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1389	if (sqp->tx_ring[wire_tx_ix].ah)
1390		ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1391	sqp->tx_ring[wire_tx_ix].ah = ah;
1392	ib_dma_sync_single_for_cpu(&dev->ib_dev,
1393				   sqp->tx_ring[wire_tx_ix].buf.map,
1394				   sizeof (struct mlx4_mad_snd_buf),
1395				   DMA_TO_DEVICE);
1396
1397	memcpy(&sqp_mad->payload, mad, sizeof *mad);
1398
1399	ib_dma_sync_single_for_device(&dev->ib_dev,
1400				      sqp->tx_ring[wire_tx_ix].buf.map,
1401				      sizeof (struct mlx4_mad_snd_buf),
1402				      DMA_TO_DEVICE);
1403
1404	list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1405	list.length = sizeof (struct mlx4_mad_snd_buf);
1406	list.lkey = sqp_ctx->mr->lkey;
1407
1408	wr.wr.ud.ah = ah;
1409	wr.wr.ud.port_num = port;
1410	wr.wr.ud.pkey_index = wire_pkey_ix;
1411	wr.wr.ud.remote_qkey = qkey;
1412	wr.wr.ud.remote_qpn = remote_qpn;
1413	wr.next = NULL;
1414	wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1415	wr.sg_list = &list;
1416	wr.num_sge = 1;
1417	wr.opcode = IB_WR_SEND;
1418	wr.send_flags = IB_SEND_SIGNALED;
1419	if (s_mac)
1420		memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
1421
1422
1423	ret = ib_post_send(send_qp, &wr, &bad_wr);
1424out:
1425	if (ret)
1426		ib_destroy_ah(ah);
1427	return ret;
1428}
1429
1430static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1431{
1432	int gids;
1433	int vfs;
1434
1435	if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1436		return slave;
1437
1438	gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1439	vfs = dev->dev->num_vfs;
1440
1441	if (slave == 0)
1442		return 0;
1443	if (slave <= gids % vfs)
1444		return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave - 1);
1445
1446	return MLX4_ROCE_PF_GIDS + (gids % vfs) + ((gids / vfs) * (slave - 1));
1447}
1448
1449static int get_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1450			       struct ib_ah_attr *ah_attr)
1451{
1452	if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) {
1453		ah_attr->grh.sgid_index = slave;
1454		return 0;
1455	}
1456	ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
1457	return 0;
1458}
1459
1460static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1461{
1462	struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1463	struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1464	int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1465	struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1466	struct mlx4_ib_ah ah;
1467	struct ib_ah_attr ah_attr;
1468	u8 *slave_id;
1469	int slave;
1470
1471	/* Get slave that sent this packet */
1472	if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1473	    wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1474	    (wc->src_qp & 0x1) != ctx->port - 1 ||
1475	    wc->src_qp & 0x4) {
1476		mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1477		return;
1478	}
1479	slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1480	if (slave != ctx->slave) {
1481		mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1482			     "belongs to another slave\n", wc->src_qp);
1483		return;
1484	}
1485	if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1486		mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1487			     "non-master trying to send QP0 packets\n", wc->src_qp);
1488		return;
1489	}
1490
1491	/* Map transaction ID */
1492	ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1493				   sizeof (struct mlx4_tunnel_mad),
1494				   DMA_FROM_DEVICE);
1495	switch (tunnel->mad.mad_hdr.method) {
1496	case IB_MGMT_METHOD_SET:
1497	case IB_MGMT_METHOD_GET:
1498	case IB_MGMT_METHOD_REPORT:
1499	case IB_SA_METHOD_GET_TABLE:
1500	case IB_SA_METHOD_DELETE:
1501	case IB_SA_METHOD_GET_MULTI:
1502	case IB_SA_METHOD_GET_TRACE_TBL:
1503		slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1504		if (*slave_id) {
1505			mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1506				     "class:%d slave:%d\n", *slave_id,
1507				     tunnel->mad.mad_hdr.mgmt_class, slave);
1508			return;
1509		} else
1510			*slave_id = slave;
1511	default:
1512		/* nothing */;
1513	}
1514
1515	/* Class-specific handling */
1516	switch (tunnel->mad.mad_hdr.mgmt_class) {
1517	case IB_MGMT_CLASS_SUBN_ADM:
1518		if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1519			      (struct ib_sa_mad *) &tunnel->mad))
1520			return;
1521		break;
1522	case IB_MGMT_CLASS_CM:
1523		if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1524			      (struct ib_mad *) &tunnel->mad))
1525			return;
1526		break;
1527	case IB_MGMT_CLASS_DEVICE_MGMT:
1528		if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1529		    tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1530			return;
1531		break;
1532	default:
1533		/* Drop unsupported classes for slaves in tunnel mode */
1534		if (slave != mlx4_master_func_num(dev->dev)) {
1535			mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1536				     "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1537			return;
1538		}
1539	}
1540
1541	/* We are using standard ib_core services to send the mad, so generate a
1542	 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1543	memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1544	ah.ibah.device = ctx->ib_dev;
1545	mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1546	if (ah_attr.ah_flags & IB_AH_GRH)
1547		if (get_real_sgid_index(dev, slave, ctx->port, &ah_attr))
1548			return;
1549	memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
1550	ah_attr.vlan_id = tunnel->hdr.vlan;
1551	/* if slave have default vlan use it */
1552	mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1553				    &ah_attr.vlan_id, &ah_attr.sl);
1554
1555	mlx4_ib_send_to_wire(dev, slave, ctx->port,
1556			     is_proxy_qp0(dev, wc->src_qp, slave) ?
1557			     IB_QPT_SMI : IB_QPT_GSI,
1558			     be16_to_cpu(tunnel->hdr.pkey_index),
1559			     be32_to_cpu(tunnel->hdr.remote_qpn),
1560			     be32_to_cpu(tunnel->hdr.qkey),
1561			     &ah_attr, wc->smac, &tunnel->mad);
1562}
1563
1564static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1565				 enum ib_qp_type qp_type, int is_tun)
1566{
1567	int i;
1568	struct mlx4_ib_demux_pv_qp *tun_qp;
1569	int rx_buf_size, tx_buf_size;
1570
1571	if (qp_type > IB_QPT_GSI)
1572		return -EINVAL;
1573
1574	tun_qp = &ctx->qp[qp_type];
1575
1576	tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1577			       GFP_KERNEL);
1578	if (!tun_qp->ring)
1579		return -ENOMEM;
1580
1581	tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1582				  sizeof (struct mlx4_ib_tun_tx_buf),
1583				  GFP_KERNEL);
1584	if (!tun_qp->tx_ring) {
1585		kfree(tun_qp->ring);
1586		tun_qp->ring = NULL;
1587		return -ENOMEM;
1588	}
1589
1590	if (is_tun) {
1591		rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1592		tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1593	} else {
1594		rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1595		tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1596	}
1597
1598	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1599		tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1600		if (!tun_qp->ring[i].addr)
1601			goto err;
1602		tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1603							tun_qp->ring[i].addr,
1604							rx_buf_size,
1605							DMA_FROM_DEVICE);
1606		if (unlikely(ib_dma_mapping_error(ctx->ib_dev,
1607						  tun_qp->ring[i].map))) {
1608			mlx4_ib_warn(ctx->ib_dev, "ib_dma_map_single failed\n");
1609			kfree(tun_qp->ring[i].addr);
1610			goto err;
1611		}
1612	}
1613
1614	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1615		tun_qp->tx_ring[i].buf.addr =
1616			kmalloc(tx_buf_size, GFP_KERNEL);
1617		if (!tun_qp->tx_ring[i].buf.addr)
1618			goto tx_err;
1619		tun_qp->tx_ring[i].buf.map =
1620			ib_dma_map_single(ctx->ib_dev,
1621					  tun_qp->tx_ring[i].buf.addr,
1622					  tx_buf_size,
1623					  DMA_TO_DEVICE);
1624		if (unlikely(ib_dma_mapping_error(ctx->ib_dev,
1625						  tun_qp->tx_ring[i].buf.map))) {
1626			mlx4_ib_warn(ctx->ib_dev, "ib_dma_map_single failed\n");
1627			kfree(tun_qp->tx_ring[i].buf.addr);
1628			goto tx_err;
1629		}
1630		tun_qp->tx_ring[i].ah = NULL;
1631	}
1632	spin_lock_init(&tun_qp->tx_lock);
1633	tun_qp->tx_ix_head = 0;
1634	tun_qp->tx_ix_tail = 0;
1635	tun_qp->proxy_qpt = qp_type;
1636
1637	return 0;
1638
1639tx_err:
1640	while (i > 0) {
1641		--i;
1642		ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1643				    tx_buf_size, DMA_TO_DEVICE);
1644		kfree(tun_qp->tx_ring[i].buf.addr);
1645	}
1646	kfree(tun_qp->tx_ring);
1647	tun_qp->tx_ring = NULL;
1648	i = MLX4_NUM_TUNNEL_BUFS;
1649err:
1650	while (i > 0) {
1651		--i;
1652		ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1653				    rx_buf_size, DMA_FROM_DEVICE);
1654		kfree(tun_qp->ring[i].addr);
1655	}
1656	kfree(tun_qp->ring);
1657	tun_qp->ring = NULL;
1658	return -ENOMEM;
1659}
1660
1661static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1662				     enum ib_qp_type qp_type, int is_tun)
1663{
1664	int i;
1665	struct mlx4_ib_demux_pv_qp *tun_qp;
1666	int rx_buf_size, tx_buf_size;
1667
1668	if (qp_type > IB_QPT_GSI)
1669		return;
1670
1671	tun_qp = &ctx->qp[qp_type];
1672	if (is_tun) {
1673		rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1674		tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1675	} else {
1676		rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1677		tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1678	}
1679
1680
1681	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1682		ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1683				    rx_buf_size, DMA_FROM_DEVICE);
1684		kfree(tun_qp->ring[i].addr);
1685	}
1686
1687	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1688		ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1689				    tx_buf_size, DMA_TO_DEVICE);
1690		kfree(tun_qp->tx_ring[i].buf.addr);
1691		if (tun_qp->tx_ring[i].ah)
1692			ib_destroy_ah(tun_qp->tx_ring[i].ah);
1693	}
1694	kfree(tun_qp->tx_ring);
1695	kfree(tun_qp->ring);
1696}
1697
1698static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1699{
1700	struct mlx4_ib_demux_pv_ctx *ctx;
1701	struct mlx4_ib_demux_pv_qp *tun_qp;
1702	struct ib_wc wc;
1703	int ret;
1704	ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1705	ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1706
1707	while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1708		tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1709		if (wc.status == IB_WC_SUCCESS) {
1710			switch (wc.opcode) {
1711			case IB_WC_RECV:
1712				mlx4_ib_multiplex_mad(ctx, &wc);
1713				ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1714							     wc.wr_id &
1715							     (MLX4_NUM_TUNNEL_BUFS - 1));
1716				if (ret)
1717					pr_err("Failed reposting tunnel "
1718					       "buf:%lld\n", (unsigned long long)wc.wr_id);
1719				break;
1720			case IB_WC_SEND:
1721				pr_debug("received tunnel send completion:"
1722					 "wrid=0x%llx, status=0x%x\n",
1723					 (unsigned long long)wc.wr_id, wc.status);
1724				ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1725					      (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1726				tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1727					= NULL;
1728				spin_lock(&tun_qp->tx_lock);
1729				tun_qp->tx_ix_tail++;
1730				spin_unlock(&tun_qp->tx_lock);
1731
1732				break;
1733			default:
1734				break;
1735			}
1736		} else  {
1737			pr_debug("mlx4_ib: completion error in tunnel: %d."
1738				 " status = %d, wrid = 0x%llx\n",
1739				 ctx->slave, wc.status, (unsigned long long)wc.wr_id);
1740			if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1741				ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1742					      (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1743				tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1744					= NULL;
1745				spin_lock(&tun_qp->tx_lock);
1746				tun_qp->tx_ix_tail++;
1747				spin_unlock(&tun_qp->tx_lock);
1748			}
1749		}
1750	}
1751}
1752
1753static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1754{
1755	struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1756
1757	/* It's worse than that! He's dead, Jim! */
1758	pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1759	       event->event, sqp->port);
1760}
1761
1762static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1763			    enum ib_qp_type qp_type, int create_tun)
1764{
1765	int i, ret;
1766	struct mlx4_ib_demux_pv_qp *tun_qp;
1767	struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1768	struct ib_qp_attr attr;
1769	int qp_attr_mask_INIT;
1770
1771	if (qp_type > IB_QPT_GSI)
1772		return -EINVAL;
1773
1774	tun_qp = &ctx->qp[qp_type];
1775
1776	memset(&qp_init_attr, 0, sizeof qp_init_attr);
1777	qp_init_attr.init_attr.send_cq = ctx->cq;
1778	qp_init_attr.init_attr.recv_cq = ctx->cq;
1779	qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1780	qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1781	qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1782	qp_init_attr.init_attr.cap.max_send_sge = 1;
1783	qp_init_attr.init_attr.cap.max_recv_sge = 1;
1784	if (create_tun) {
1785		qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1786		qp_init_attr.init_attr.create_flags = (enum ib_qp_create_flags)MLX4_IB_SRIOV_TUNNEL_QP;
1787		qp_init_attr.port = ctx->port;
1788		qp_init_attr.slave = ctx->slave;
1789		qp_init_attr.proxy_qp_type = qp_type;
1790		qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1791			   IB_QP_QKEY | IB_QP_PORT;
1792	} else {
1793		qp_init_attr.init_attr.qp_type = qp_type;
1794		qp_init_attr.init_attr.create_flags = (enum ib_qp_create_flags)MLX4_IB_SRIOV_SQP;
1795		qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1796	}
1797	qp_init_attr.init_attr.port_num = ctx->port;
1798	qp_init_attr.init_attr.qp_context = ctx;
1799	qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1800	tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1801	if (IS_ERR(tun_qp->qp)) {
1802		ret = PTR_ERR(tun_qp->qp);
1803		tun_qp->qp = NULL;
1804		pr_err("Couldn't create %s QP (%d)\n",
1805		       create_tun ? "tunnel" : "special", ret);
1806		return ret;
1807	}
1808
1809	memset(&attr, 0, sizeof attr);
1810	attr.qp_state = IB_QPS_INIT;
1811	ret = 0;
1812	if (create_tun)
1813		ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1814					      ctx->port, 0xFFFF, &attr.pkey_index);
1815	if (ret || !create_tun)
1816		attr.pkey_index =
1817			to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1818	attr.qkey = IB_QP1_QKEY;
1819	attr.port_num = ctx->port;
1820	ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1821	if (ret) {
1822		pr_err("Couldn't change %s qp state to INIT (%d)\n",
1823		       create_tun ? "tunnel" : "special", ret);
1824		goto err_qp;
1825	}
1826	attr.qp_state = IB_QPS_RTR;
1827	ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1828	if (ret) {
1829		pr_err("Couldn't change %s qp state to RTR (%d)\n",
1830		       create_tun ? "tunnel" : "special", ret);
1831		goto err_qp;
1832	}
1833	attr.qp_state = IB_QPS_RTS;
1834	attr.sq_psn = 0;
1835	ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1836	if (ret) {
1837		pr_err("Couldn't change %s qp state to RTS (%d)\n",
1838		       create_tun ? "tunnel" : "special", ret);
1839		goto err_qp;
1840	}
1841
1842	for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1843		ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1844		if (ret) {
1845			pr_err(" mlx4_ib_post_pv_buf error"
1846			       " (err = %d, i = %d)\n", ret, i);
1847			goto err_qp;
1848		}
1849	}
1850	return 0;
1851
1852err_qp:
1853	ib_destroy_qp(tun_qp->qp);
1854	tun_qp->qp = NULL;
1855	return ret;
1856}
1857
1858/*
1859 * IB MAD completion callback for real SQPs
1860 */
1861static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1862{
1863	struct mlx4_ib_demux_pv_ctx *ctx;
1864	struct mlx4_ib_demux_pv_qp *sqp;
1865	struct ib_wc wc;
1866	struct ib_grh *grh;
1867	struct ib_mad *mad;
1868
1869	ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1870	ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1871
1872	while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1873		sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1874		if (wc.status == IB_WC_SUCCESS) {
1875			switch (wc.opcode) {
1876			case IB_WC_SEND:
1877				ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1878					      (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1879				sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1880					= NULL;
1881				spin_lock(&sqp->tx_lock);
1882				sqp->tx_ix_tail++;
1883				spin_unlock(&sqp->tx_lock);
1884				break;
1885			case IB_WC_RECV:
1886				mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1887						(sqp->ring[wc.wr_id &
1888						(MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1889				grh = &(((struct mlx4_mad_rcv_buf *)
1890						(sqp->ring[wc.wr_id &
1891						(MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1892				mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1893				if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1894							   (MLX4_NUM_TUNNEL_BUFS - 1)))
1895					pr_err("Failed reposting SQP "
1896					       "buf:%lld\n", (unsigned long long)wc.wr_id);
1897				break;
1898			default:
1899				BUG_ON(1);
1900				break;
1901			}
1902		} else  {
1903			pr_debug("mlx4_ib: completion error in tunnel: %d."
1904				 " status = %d, wrid = 0x%llx\n",
1905				 ctx->slave, wc.status, (unsigned long long)wc.wr_id);
1906			if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1907				ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1908					      (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1909				sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1910					= NULL;
1911				spin_lock(&sqp->tx_lock);
1912				sqp->tx_ix_tail++;
1913				spin_unlock(&sqp->tx_lock);
1914			}
1915		}
1916	}
1917}
1918
1919static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1920			       struct mlx4_ib_demux_pv_ctx **ret_ctx)
1921{
1922	struct mlx4_ib_demux_pv_ctx *ctx;
1923
1924	*ret_ctx = NULL;
1925	ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1926	if (!ctx) {
1927		pr_err("failed allocating pv resource context "
1928		       "for port %d, slave %d\n", port, slave);
1929		return -ENOMEM;
1930	}
1931
1932	ctx->ib_dev = &dev->ib_dev;
1933	ctx->port = port;
1934	ctx->slave = slave;
1935	*ret_ctx = ctx;
1936	return 0;
1937}
1938
1939static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1940{
1941	if (dev->sriov.demux[port - 1].tun[slave]) {
1942		kfree(dev->sriov.demux[port - 1].tun[slave]);
1943		dev->sriov.demux[port - 1].tun[slave] = NULL;
1944	}
1945}
1946
1947static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1948			       int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1949{
1950	int ret, cq_size;
1951
1952	if (ctx->state != DEMUX_PV_STATE_DOWN)
1953		return -EEXIST;
1954
1955	ctx->state = DEMUX_PV_STATE_STARTING;
1956	/* have QP0 only on port owner, and only if link layer is IB */
1957	if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
1958	    rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
1959		ctx->has_smi = 1;
1960
1961	if (ctx->has_smi) {
1962		ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1963		if (ret) {
1964			pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1965			goto err_out;
1966		}
1967	}
1968
1969	ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1970	if (ret) {
1971		pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1972		goto err_out_qp0;
1973	}
1974
1975	cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1976	if (ctx->has_smi)
1977		cq_size *= 2;
1978
1979	ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1980			       NULL, ctx, cq_size, 0);
1981	if (IS_ERR(ctx->cq)) {
1982		ret = PTR_ERR(ctx->cq);
1983		pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1984		goto err_buf;
1985	}
1986
1987	ctx->pd = ib_alloc_pd(ctx->ib_dev);
1988	if (IS_ERR(ctx->pd)) {
1989		ret = PTR_ERR(ctx->pd);
1990		pr_err("Couldn't create tunnel PD (%d)\n", ret);
1991		goto err_cq;
1992	}
1993
1994	ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1995	if (IS_ERR(ctx->mr)) {
1996		ret = PTR_ERR(ctx->mr);
1997		pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
1998		goto err_pd;
1999	}
2000
2001	if (ctx->has_smi) {
2002		ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
2003		if (ret) {
2004			pr_err("Couldn't create %s QP0 (%d)\n",
2005			       create_tun ? "tunnel for" : "",  ret);
2006			goto err_mr;
2007		}
2008	}
2009
2010	ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
2011	if (ret) {
2012		pr_err("Couldn't create %s QP1 (%d)\n",
2013		       create_tun ? "tunnel for" : "",  ret);
2014		goto err_qp0;
2015	}
2016
2017	if (create_tun)
2018		INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
2019	else
2020		INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
2021
2022	ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
2023
2024	ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
2025	if (ret) {
2026		pr_err("Couldn't arm tunnel cq (%d)\n", ret);
2027		goto err_wq;
2028	}
2029	ctx->state = DEMUX_PV_STATE_ACTIVE;
2030	return 0;
2031
2032err_wq:
2033	ctx->wq = NULL;
2034	ib_destroy_qp(ctx->qp[1].qp);
2035	ctx->qp[1].qp = NULL;
2036
2037
2038err_qp0:
2039	if (ctx->has_smi)
2040		ib_destroy_qp(ctx->qp[0].qp);
2041	ctx->qp[0].qp = NULL;
2042
2043err_mr:
2044	ib_dereg_mr(ctx->mr);
2045	ctx->mr = NULL;
2046
2047err_pd:
2048	ib_dealloc_pd(ctx->pd);
2049	ctx->pd = NULL;
2050
2051err_cq:
2052	ib_destroy_cq(ctx->cq);
2053	ctx->cq = NULL;
2054
2055err_buf:
2056	mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
2057
2058err_out_qp0:
2059	if (ctx->has_smi)
2060		mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
2061err_out:
2062	ctx->state = DEMUX_PV_STATE_DOWN;
2063	return ret;
2064}
2065
2066static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
2067				 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
2068{
2069	if (!ctx)
2070		return;
2071	if (ctx->state > DEMUX_PV_STATE_DOWN) {
2072		ctx->state = DEMUX_PV_STATE_DOWNING;
2073		if (flush)
2074			flush_workqueue(ctx->wq);
2075		if (ctx->has_smi) {
2076			ib_destroy_qp(ctx->qp[0].qp);
2077			ctx->qp[0].qp = NULL;
2078			mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
2079		}
2080		ib_destroy_qp(ctx->qp[1].qp);
2081		ctx->qp[1].qp = NULL;
2082		mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
2083		ib_dereg_mr(ctx->mr);
2084		ctx->mr = NULL;
2085		ib_dealloc_pd(ctx->pd);
2086		ctx->pd = NULL;
2087		ib_destroy_cq(ctx->cq);
2088		ctx->cq = NULL;
2089		ctx->state = DEMUX_PV_STATE_DOWN;
2090	}
2091}
2092
2093static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
2094				  int port, int do_init)
2095{
2096	int ret = 0;
2097
2098	if (!do_init) {
2099		clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
2100		/* for master, destroy real sqp resources */
2101		if (slave == mlx4_master_func_num(dev->dev))
2102			destroy_pv_resources(dev, slave, port,
2103					     dev->sriov.sqps[port - 1], 1);
2104		/* destroy the tunnel qp resources */
2105		destroy_pv_resources(dev, slave, port,
2106				     dev->sriov.demux[port - 1].tun[slave], 1);
2107		return 0;
2108	}
2109
2110	/* create the tunnel qp resources */
2111	ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
2112				  dev->sriov.demux[port - 1].tun[slave]);
2113
2114	/* for master, create the real sqp resources */
2115	if (!ret && slave == mlx4_master_func_num(dev->dev))
2116		ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
2117					  dev->sriov.sqps[port - 1]);
2118	return ret;
2119}
2120
2121void mlx4_ib_tunnels_update_work(struct work_struct *work)
2122{
2123	struct mlx4_ib_demux_work *dmxw;
2124
2125	dmxw = container_of(work, struct mlx4_ib_demux_work, work);
2126	mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
2127			       dmxw->do_init);
2128	kfree(dmxw);
2129	return;
2130}
2131
2132static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
2133				       struct mlx4_ib_demux_ctx *ctx,
2134				       int port)
2135{
2136	char name[12];
2137	int ret = 0;
2138	int i;
2139
2140	ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
2141			   sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
2142	if (!ctx->tun)
2143		return -ENOMEM;
2144
2145	ctx->dev = dev;
2146	ctx->port = port;
2147	ctx->ib_dev = &dev->ib_dev;
2148
2149	for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2150		ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
2151		if (ret) {
2152			ret = -ENOMEM;
2153			goto err_mcg;
2154		}
2155	}
2156
2157	ret = mlx4_ib_mcg_port_init(ctx);
2158	if (ret) {
2159		pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2160		goto err_mcg;
2161	}
2162
2163	snprintf(name, sizeof name, "mlx4_ibt%d", port);
2164	ctx->wq = create_singlethread_workqueue(name);
2165	if (!ctx->wq) {
2166		pr_err("Failed to create tunnelling WQ for port %d\n", port);
2167		ret = -ENOMEM;
2168		goto err_wq;
2169	}
2170
2171	snprintf(name, sizeof name, "mlx4_ibud%d", port);
2172	ctx->ud_wq = create_singlethread_workqueue(name);
2173	if (!ctx->ud_wq) {
2174		pr_err("Failed to create up/down WQ for port %d\n", port);
2175		ret = -ENOMEM;
2176		goto err_udwq;
2177	}
2178
2179	return 0;
2180
2181err_udwq:
2182	destroy_workqueue(ctx->wq);
2183	ctx->wq = NULL;
2184
2185err_wq:
2186	mlx4_ib_mcg_port_cleanup(ctx, 1);
2187err_mcg:
2188	for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2189		free_pv_object(dev, i, port);
2190	kfree(ctx->tun);
2191	ctx->tun = NULL;
2192	return ret;
2193}
2194
2195static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2196{
2197	if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2198		sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2199		flush_workqueue(sqp_ctx->wq);
2200		if (sqp_ctx->has_smi) {
2201			ib_destroy_qp(sqp_ctx->qp[0].qp);
2202			sqp_ctx->qp[0].qp = NULL;
2203			mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2204		}
2205		ib_destroy_qp(sqp_ctx->qp[1].qp);
2206		sqp_ctx->qp[1].qp = NULL;
2207		mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2208		ib_dereg_mr(sqp_ctx->mr);
2209		sqp_ctx->mr = NULL;
2210		ib_dealloc_pd(sqp_ctx->pd);
2211		sqp_ctx->pd = NULL;
2212		ib_destroy_cq(sqp_ctx->cq);
2213		sqp_ctx->cq = NULL;
2214		sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2215	}
2216}
2217
2218static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2219{
2220	int i;
2221	if (ctx) {
2222		struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2223		mlx4_ib_mcg_port_cleanup(ctx, 1);
2224		for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2225			if (!ctx->tun[i])
2226				continue;
2227			if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2228				ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2229		}
2230		flush_workqueue(ctx->wq);
2231		for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2232			destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2233			free_pv_object(dev, i, ctx->port);
2234		}
2235		kfree(ctx->tun);
2236		destroy_workqueue(ctx->ud_wq);
2237		destroy_workqueue(ctx->wq);
2238	}
2239}
2240
2241static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2242{
2243	int i;
2244
2245	if (!mlx4_is_master(dev->dev))
2246		return;
2247	/* initialize or tear down tunnel QPs for the master */
2248	for (i = 0; i < dev->dev->caps.num_ports; i++)
2249		mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2250	return;
2251}
2252
2253int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2254{
2255	int i = 0;
2256	int err;
2257
2258	if (!mlx4_is_mfunc(dev->dev))
2259		return 0;
2260
2261	dev->sriov.is_going_down = 0;
2262	spin_lock_init(&dev->sriov.going_down_lock);
2263	mlx4_ib_cm_paravirt_init(dev);
2264
2265	mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2266
2267	if (mlx4_is_slave(dev->dev)) {
2268		mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2269		return 0;
2270	}
2271
2272	for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2273		if (i == mlx4_master_func_num(dev->dev))
2274			mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2275		else
2276			mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2277	}
2278
2279	err = mlx4_ib_init_alias_guid_service(dev);
2280	if (err) {
2281		mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2282		goto paravirt_err;
2283	}
2284	err = mlx4_ib_device_register_sysfs(dev);
2285	if (err) {
2286		mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2287		goto sysfs_err;
2288	}
2289
2290	mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2291		     dev->dev->caps.sqp_demux);
2292	for (i = 0; i < dev->num_ports; i++) {
2293		union ib_gid gid;
2294		err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2295		if (err)
2296			goto demux_err;
2297		dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2298		err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2299				      &dev->sriov.sqps[i]);
2300		if (err)
2301			goto demux_err;
2302		err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2303		if (err)
2304			goto demux_err;
2305	}
2306	mlx4_ib_master_tunnels(dev, 1);
2307	return 0;
2308
2309demux_err:
2310	while (i > 0) {
2311		free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2312		mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2313		--i;
2314	}
2315	mlx4_ib_device_unregister_sysfs(dev);
2316
2317sysfs_err:
2318	mlx4_ib_destroy_alias_guid_service(dev);
2319
2320paravirt_err:
2321	mlx4_ib_cm_paravirt_clean(dev, -1);
2322
2323	return err;
2324}
2325
2326void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2327{
2328	int i;
2329	unsigned long flags;
2330
2331	if (!mlx4_is_mfunc(dev->dev))
2332		return;
2333
2334	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2335	dev->sriov.is_going_down = 1;
2336	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2337	if (mlx4_is_master(dev->dev)) {
2338		for (i = 0; i < dev->num_ports; i++) {
2339			flush_workqueue(dev->sriov.demux[i].ud_wq);
2340			mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2341			kfree(dev->sriov.sqps[i]);
2342			dev->sriov.sqps[i] = NULL;
2343			mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2344		}
2345
2346		mlx4_ib_cm_paravirt_clean(dev, -1);
2347		mlx4_ib_destroy_alias_guid_service(dev);
2348		mlx4_ib_device_unregister_sysfs(dev);
2349	}
2350}
2351