mlx5_ib_main.c revision 337098
1322810Shselasky/*-
2322810Shselasky * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3322810Shselasky *
4322810Shselasky * Redistribution and use in source and binary forms, with or without
5322810Shselasky * modification, are permitted provided that the following conditions
6322810Shselasky * are met:
7322810Shselasky * 1. Redistributions of source code must retain the above copyright
8322810Shselasky *    notice, this list of conditions and the following disclaimer.
9322810Shselasky * 2. Redistributions in binary form must reproduce the above copyright
10322810Shselasky *    notice, this list of conditions and the following disclaimer in the
11322810Shselasky *    documentation and/or other materials provided with the distribution.
12322810Shselasky *
13322810Shselasky * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14322810Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15322810Shselasky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16322810Shselasky * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17322810Shselasky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18322810Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19322810Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20322810Shselasky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21322810Shselasky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22322810Shselasky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23322810Shselasky * SUCH DAMAGE.
24322810Shselasky *
25322810Shselasky * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 337098 2018-08-02 08:36:51Z hselasky $
26322810Shselasky */
27322810Shselasky
28331769Shselasky#include <linux/module.h>
29322810Shselasky#include <linux/errno.h>
30322810Shselasky#include <linux/pci.h>
31322810Shselasky#include <linux/dma-mapping.h>
32322810Shselasky#include <linux/slab.h>
33331769Shselasky#if defined(CONFIG_X86)
34331769Shselasky#include <asm/pat.h>
35331769Shselasky#endif
36322810Shselasky#include <linux/sched.h>
37331769Shselasky#include <linux/delay.h>
38322810Shselasky#include <linux/fs.h>
39322810Shselasky#undef inode
40322810Shselasky#include <rdma/ib_user_verbs.h>
41331769Shselasky#include <rdma/ib_addr.h>
42331769Shselasky#include <rdma/ib_cache.h>
43331769Shselasky#include <dev/mlx5/port.h>
44331769Shselasky#include <dev/mlx5/vport.h>
45331769Shselasky#include <linux/list.h>
46322810Shselasky#include <rdma/ib_smi.h>
47322810Shselasky#include <rdma/ib_umem.h>
48331769Shselasky#include <linux/in.h>
49331769Shselasky#include <linux/etherdevice.h>
50331769Shselasky#include <dev/mlx5/fs.h>
51322810Shselasky#include "mlx5_ib.h"
52322810Shselasky
53322810Shselasky#define DRIVER_NAME "mlx5_ib"
54331769Shselasky#define DRIVER_VERSION "3.4.1-BETA"
55331769Shselasky#define DRIVER_RELDATE	"October 2017"
56322810Shselasky
57322810ShselaskyMODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
58322810ShselaskyMODULE_LICENSE("Dual BSD/GPL");
59322810ShselaskyMODULE_DEPEND(mlx5ib, linuxkpi, 1, 1, 1);
60322810ShselaskyMODULE_DEPEND(mlx5ib, mlx5, 1, 1, 1);
61322810ShselaskyMODULE_DEPEND(mlx5ib, ibcore, 1, 1, 1);
62322810ShselaskyMODULE_VERSION(mlx5ib, 1);
63322810Shselasky
64322810Shselaskystatic int deprecated_prof_sel = 2;
65322810Shselaskymodule_param_named(prof_sel, deprecated_prof_sel, int, 0444);
66322810ShselaskyMODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
67322810Shselasky
68331769Shselaskystatic char mlx5_version[] =
69331769Shselasky	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
70331769Shselasky	DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
71331769Shselasky
72322810Shselaskyenum {
73331769Shselasky	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
74322810Shselasky};
75322810Shselasky
76331769Shselaskystatic enum rdma_link_layer
77331769Shselaskymlx5_port_type_cap_to_rdma_ll(int port_type_cap)
78331769Shselasky{
79331769Shselasky	switch (port_type_cap) {
80331769Shselasky	case MLX5_CAP_PORT_TYPE_IB:
81331769Shselasky		return IB_LINK_LAYER_INFINIBAND;
82331769Shselasky	case MLX5_CAP_PORT_TYPE_ETH:
83331769Shselasky		return IB_LINK_LAYER_ETHERNET;
84331769Shselasky	default:
85331769Shselasky		return IB_LINK_LAYER_UNSPECIFIED;
86331769Shselasky	}
87331769Shselasky}
88322810Shselasky
89331769Shselaskystatic enum rdma_link_layer
90331769Shselaskymlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
91331769Shselasky{
92331769Shselasky	struct mlx5_ib_dev *dev = to_mdev(device);
93331769Shselasky	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
94322810Shselasky
95331769Shselasky	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
96331769Shselasky}
97331769Shselasky
98331769Shselaskystatic bool mlx5_netdev_match(struct net_device *ndev,
99331769Shselasky			      struct mlx5_core_dev *mdev,
100331769Shselasky			      const char *dname)
101322810Shselasky{
102331769Shselasky	return ndev->if_type == IFT_ETHER &&
103331769Shselasky	  ndev->if_dname != NULL &&
104331769Shselasky	  strcmp(ndev->if_dname, dname) == 0 &&
105331769Shselasky	  ndev->if_softc != NULL &&
106331769Shselasky	  *(struct mlx5_core_dev **)ndev->if_softc == mdev;
107331769Shselasky}
108322810Shselasky
109331769Shselaskystatic int mlx5_netdev_event(struct notifier_block *this,
110331769Shselasky			     unsigned long event, void *ptr)
111331769Shselasky{
112331769Shselasky	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
113331769Shselasky	struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
114331769Shselasky						 roce.nb);
115322810Shselasky
116331769Shselasky	switch (event) {
117331769Shselasky	case NETDEV_REGISTER:
118331769Shselasky	case NETDEV_UNREGISTER:
119331769Shselasky		write_lock(&ibdev->roce.netdev_lock);
120331769Shselasky		/* check if network interface belongs to mlx5en */
121331769Shselasky		if (mlx5_netdev_match(ndev, ibdev->mdev, "mce"))
122331769Shselasky			ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
123331769Shselasky					     NULL : ndev;
124331769Shselasky		write_unlock(&ibdev->roce.netdev_lock);
125331769Shselasky		break;
126331769Shselasky
127331769Shselasky	case NETDEV_UP:
128331769Shselasky	case NETDEV_DOWN: {
129331769Shselasky		struct net_device *upper = NULL;
130331769Shselasky
131331769Shselasky		if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
132331769Shselasky		    && ibdev->ib_active) {
133331769Shselasky			struct ib_event ibev = {0};
134331769Shselasky
135331769Shselasky			ibev.device = &ibdev->ib_dev;
136331769Shselasky			ibev.event = (event == NETDEV_UP) ?
137331769Shselasky				     IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
138331769Shselasky			ibev.element.port_num = 1;
139331769Shselasky			ib_dispatch_event(&ibev);
140322810Shselasky		}
141331769Shselasky		break;
142322810Shselasky	}
143322810Shselasky
144331769Shselasky	default:
145331769Shselasky		break;
146322810Shselasky	}
147331769Shselasky
148331769Shselasky	return NOTIFY_DONE;
149322810Shselasky}
150322810Shselasky
151331769Shselaskystatic struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
152331769Shselasky					     u8 port_num)
153322810Shselasky{
154331769Shselasky	struct mlx5_ib_dev *ibdev = to_mdev(device);
155331769Shselasky	struct net_device *ndev;
156331769Shselasky
157331769Shselasky	/* Ensure ndev does not disappear before we invoke dev_hold()
158331769Shselasky	 */
159331769Shselasky	read_lock(&ibdev->roce.netdev_lock);
160331769Shselasky	ndev = ibdev->roce.netdev;
161331769Shselasky	if (ndev)
162331769Shselasky		dev_hold(ndev);
163331769Shselasky	read_unlock(&ibdev->roce.netdev_lock);
164331769Shselasky
165331769Shselasky	return ndev;
166331769Shselasky}
167331769Shselasky
168331805Shselaskystatic int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
169331805Shselasky				    u8 *active_width)
170331805Shselasky{
171331805Shselasky	switch (eth_proto_oper) {
172331805Shselasky	case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
173331805Shselasky	case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
174331805Shselasky	case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
175331805Shselasky	case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
176331805Shselasky		*active_width = IB_WIDTH_1X;
177331805Shselasky		*active_speed = IB_SPEED_SDR;
178331805Shselasky		break;
179331805Shselasky	case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
180331805Shselasky	case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
181331805Shselasky	case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
182331805Shselasky	case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
183331805Shselasky	case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
184331805Shselasky	case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
185331805Shselasky	case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
186331805Shselasky		*active_width = IB_WIDTH_1X;
187331805Shselasky		*active_speed = IB_SPEED_QDR;
188331805Shselasky		break;
189331805Shselasky	case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
190331805Shselasky	case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
191331805Shselasky	case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
192331805Shselasky		*active_width = IB_WIDTH_1X;
193331805Shselasky		*active_speed = IB_SPEED_EDR;
194331805Shselasky		break;
195331805Shselasky	case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
196331805Shselasky	case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
197331805Shselasky	case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
198331805Shselasky	case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
199331805Shselasky		*active_width = IB_WIDTH_4X;
200331805Shselasky		*active_speed = IB_SPEED_QDR;
201331805Shselasky		break;
202331805Shselasky	case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
203331805Shselasky	case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
204331805Shselasky	case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
205331805Shselasky		*active_width = IB_WIDTH_1X;
206331805Shselasky		*active_speed = IB_SPEED_HDR;
207331805Shselasky		break;
208331805Shselasky	case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
209331805Shselasky		*active_width = IB_WIDTH_4X;
210331805Shselasky		*active_speed = IB_SPEED_FDR;
211331805Shselasky		break;
212331805Shselasky	case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
213331805Shselasky	case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
214331805Shselasky	case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
215331805Shselasky	case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
216331805Shselasky		*active_width = IB_WIDTH_4X;
217331805Shselasky		*active_speed = IB_SPEED_EDR;
218331805Shselasky		break;
219331805Shselasky	default:
220331805Shselasky		return -EINVAL;
221331805Shselasky	}
222331805Shselasky
223331805Shselasky	return 0;
224331805Shselasky}
225331805Shselasky
226331769Shselaskystatic int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
227331769Shselasky				struct ib_port_attr *props)
228331769Shselasky{
229322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(device);
230331769Shselasky	struct net_device *ndev;
231331769Shselasky	enum ib_mtu ndev_ib_mtu;
232331769Shselasky	u16 qkey_viol_cntr;
233331805Shselasky	u32 eth_prot_oper;
234331805Shselasky	int err;
235322810Shselasky
236331769Shselasky	memset(props, 0, sizeof(*props));
237331769Shselasky
238331805Shselasky	/* Possible bad flows are checked before filling out props so in case
239331805Shselasky	 * of an error it will still be zeroed out.
240331805Shselasky	 */
241331805Shselasky	err = mlx5_query_port_eth_proto_oper(dev->mdev, &eth_prot_oper, port_num);
242331805Shselasky	if (err)
243331805Shselasky		return err;
244331805Shselasky
245331805Shselasky	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
246331805Shselasky				 &props->active_width);
247331805Shselasky
248331769Shselasky	props->port_cap_flags  |= IB_PORT_CM_SUP;
249331769Shselasky	props->port_cap_flags  |= IB_PORT_IP_BASED_GIDS;
250331769Shselasky
251331769Shselasky	props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
252331769Shselasky						roce_address_table_size);
253331769Shselasky	props->max_mtu          = IB_MTU_4096;
254331769Shselasky	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
255331769Shselasky	props->pkey_tbl_len     = 1;
256331769Shselasky	props->state            = IB_PORT_DOWN;
257331769Shselasky	props->phys_state       = 3;
258331769Shselasky
259331769Shselasky	mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
260331769Shselasky	props->qkey_viol_cntr = qkey_viol_cntr;
261331769Shselasky
262331769Shselasky	ndev = mlx5_ib_get_netdev(device, port_num);
263331769Shselasky	if (!ndev)
264331769Shselasky		return 0;
265331769Shselasky
266331769Shselasky	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
267331769Shselasky		props->state      = IB_PORT_ACTIVE;
268331769Shselasky		props->phys_state = 5;
269331769Shselasky	}
270331769Shselasky
271331769Shselasky	ndev_ib_mtu = iboe_get_mtu(ndev->if_mtu);
272331769Shselasky
273331769Shselasky	dev_put(ndev);
274331769Shselasky
275331769Shselasky	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
276331769Shselasky	return 0;
277331769Shselasky}
278331769Shselasky
279331769Shselaskystatic void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
280331769Shselasky				     const struct ib_gid_attr *attr,
281331769Shselasky				     void *mlx5_addr)
282331769Shselasky{
283331769Shselasky#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
284331769Shselasky	char *mlx5_addr_l3_addr	= MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
285331769Shselasky					       source_l3_address);
286331769Shselasky	void *mlx5_addr_mac	= MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
287331769Shselasky					       source_mac_47_32);
288337078Shselasky	u16 vlan_id;
289331769Shselasky
290331769Shselasky	if (!gid)
291331769Shselasky		return;
292331769Shselasky	ether_addr_copy(mlx5_addr_mac, IF_LLADDR(attr->ndev));
293331769Shselasky
294337078Shselasky	vlan_id = rdma_vlan_dev_vlan_id(attr->ndev);
295337078Shselasky	if (vlan_id != 0xffff) {
296331769Shselasky		MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
297337078Shselasky		MLX5_SET_RA(mlx5_addr, vlan_id, vlan_id);
298331769Shselasky	}
299331769Shselasky
300331769Shselasky	switch (attr->gid_type) {
301331769Shselasky	case IB_GID_TYPE_IB:
302331769Shselasky		MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
303331769Shselasky		break;
304331769Shselasky	case IB_GID_TYPE_ROCE_UDP_ENCAP:
305331769Shselasky		MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
306331769Shselasky		break;
307331769Shselasky
308322810Shselasky	default:
309331769Shselasky		WARN_ON(true);
310322810Shselasky	}
311331769Shselasky
312331769Shselasky	if (attr->gid_type != IB_GID_TYPE_IB) {
313331769Shselasky		if (ipv6_addr_v4mapped((void *)gid))
314331769Shselasky			MLX5_SET_RA(mlx5_addr, roce_l3_type,
315331769Shselasky				    MLX5_ROCE_L3_TYPE_IPV4);
316331769Shselasky		else
317331769Shselasky			MLX5_SET_RA(mlx5_addr, roce_l3_type,
318331769Shselasky				    MLX5_ROCE_L3_TYPE_IPV6);
319331769Shselasky	}
320331769Shselasky
321331769Shselasky	if ((attr->gid_type == IB_GID_TYPE_IB) ||
322331769Shselasky	    !ipv6_addr_v4mapped((void *)gid))
323331769Shselasky		memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
324331769Shselasky	else
325331769Shselasky		memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
326322810Shselasky}
327322810Shselasky
328331769Shselaskystatic int set_roce_addr(struct ib_device *device, u8 port_num,
329331769Shselasky			 unsigned int index,
330331769Shselasky			 const union ib_gid *gid,
331331769Shselasky			 const struct ib_gid_attr *attr)
332331769Shselasky{
333331769Shselasky	struct mlx5_ib_dev *dev = to_mdev(device);
334331769Shselasky	u32  in[MLX5_ST_SZ_DW(set_roce_address_in)]  = {0};
335331769Shselasky	u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
336331769Shselasky	void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
337331769Shselasky	enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
338331769Shselasky
339331769Shselasky	if (ll != IB_LINK_LAYER_ETHERNET)
340331769Shselasky		return -EINVAL;
341331769Shselasky
342331769Shselasky	ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
343331769Shselasky
344331769Shselasky	MLX5_SET(set_roce_address_in, in, roce_address_index, index);
345331769Shselasky	MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
346331769Shselasky	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
347331769Shselasky}
348331769Shselasky
349331769Shselaskystatic int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
350331769Shselasky			   unsigned int index, const union ib_gid *gid,
351331769Shselasky			   const struct ib_gid_attr *attr,
352331769Shselasky			   __always_unused void **context)
353331769Shselasky{
354331769Shselasky	return set_roce_addr(device, port_num, index, gid, attr);
355331769Shselasky}
356331769Shselasky
357331769Shselaskystatic int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
358331769Shselasky			   unsigned int index, __always_unused void **context)
359331769Shselasky{
360331769Shselasky	return set_roce_addr(device, port_num, index, NULL, NULL);
361331769Shselasky}
362331769Shselasky
363331769Shselasky__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
364331769Shselasky			       int index)
365331769Shselasky{
366331769Shselasky	struct ib_gid_attr attr;
367331769Shselasky	union ib_gid gid;
368331769Shselasky
369331769Shselasky	if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
370331769Shselasky		return 0;
371331769Shselasky
372331769Shselasky	if (!attr.ndev)
373331769Shselasky		return 0;
374331769Shselasky
375331769Shselasky	dev_put(attr.ndev);
376331769Shselasky
377331769Shselasky	if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
378331769Shselasky		return 0;
379331769Shselasky
380331769Shselasky	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
381331769Shselasky}
382331769Shselasky
383337098Shselaskyint mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
384337098Shselasky			   int index, enum ib_gid_type *gid_type)
385337098Shselasky{
386337098Shselasky	struct ib_gid_attr attr;
387337098Shselasky	union ib_gid gid;
388337098Shselasky	int ret;
389337098Shselasky
390337098Shselasky	ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
391337098Shselasky	if (ret)
392337098Shselasky		return ret;
393337098Shselasky
394337098Shselasky	if (!attr.ndev)
395337098Shselasky		return -ENODEV;
396337098Shselasky
397337098Shselasky	dev_put(attr.ndev);
398337098Shselasky
399337098Shselasky	*gid_type = attr.gid_type;
400337098Shselasky
401337098Shselasky	return 0;
402337098Shselasky}
403337098Shselasky
404322810Shselaskystatic int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
405322810Shselasky{
406331769Shselasky	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
407331769Shselasky		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
408331769Shselasky	return 0;
409322810Shselasky}
410322810Shselasky
411322810Shselaskyenum {
412322810Shselasky	MLX5_VPORT_ACCESS_METHOD_MAD,
413322810Shselasky	MLX5_VPORT_ACCESS_METHOD_HCA,
414322810Shselasky	MLX5_VPORT_ACCESS_METHOD_NIC,
415322810Shselasky};
416322810Shselasky
417322810Shselaskystatic int mlx5_get_vport_access_method(struct ib_device *ibdev)
418322810Shselasky{
419322810Shselasky	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
420322810Shselasky		return MLX5_VPORT_ACCESS_METHOD_MAD;
421322810Shselasky
422322810Shselasky	if (mlx5_ib_port_link_layer(ibdev, 1) ==
423322810Shselasky	    IB_LINK_LAYER_ETHERNET)
424322810Shselasky		return MLX5_VPORT_ACCESS_METHOD_NIC;
425322810Shselasky
426322810Shselasky	return MLX5_VPORT_ACCESS_METHOD_HCA;
427322810Shselasky}
428322810Shselasky
429331769Shselaskystatic void get_atomic_caps(struct mlx5_ib_dev *dev,
430331769Shselasky			    struct ib_device_attr *props)
431331769Shselasky{
432331769Shselasky	u8 tmp;
433331769Shselasky	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
434331769Shselasky	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
435331769Shselasky	u8 atomic_req_8B_endianness_mode =
436331769Shselasky		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
437331769Shselasky
438331769Shselasky	/* Check if HW supports 8 bytes standard atomic operations and capable
439331769Shselasky	 * of host endianness respond
440331769Shselasky	 */
441331769Shselasky	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
442331769Shselasky	if (((atomic_operations & tmp) == tmp) &&
443331769Shselasky	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
444331769Shselasky	    (atomic_req_8B_endianness_mode)) {
445331769Shselasky		props->atomic_cap = IB_ATOMIC_HCA;
446331769Shselasky	} else {
447331769Shselasky		props->atomic_cap = IB_ATOMIC_NONE;
448331769Shselasky	}
449331769Shselasky}
450331769Shselasky
451322810Shselaskystatic int mlx5_query_system_image_guid(struct ib_device *ibdev,
452322810Shselasky					__be64 *sys_image_guid)
453322810Shselasky{
454322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
455322810Shselasky	struct mlx5_core_dev *mdev = dev->mdev;
456322810Shselasky	u64 tmp;
457322810Shselasky	int err;
458322810Shselasky
459322810Shselasky	switch (mlx5_get_vport_access_method(ibdev)) {
460322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_MAD:
461331769Shselasky		return mlx5_query_mad_ifc_system_image_guid(ibdev,
462322810Shselasky							    sys_image_guid);
463322810Shselasky
464322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_HCA:
465322810Shselasky		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
466331769Shselasky		break;
467322810Shselasky
468322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_NIC:
469322810Shselasky		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
470331769Shselasky		break;
471322810Shselasky
472322810Shselasky	default:
473322810Shselasky		return -EINVAL;
474322810Shselasky	}
475331769Shselasky
476331769Shselasky	if (!err)
477331769Shselasky		*sys_image_guid = cpu_to_be64(tmp);
478331769Shselasky
479331769Shselasky	return err;
480331769Shselasky
481322810Shselasky}
482322810Shselasky
483322810Shselaskystatic int mlx5_query_max_pkeys(struct ib_device *ibdev,
484322810Shselasky				u16 *max_pkeys)
485322810Shselasky{
486322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
487322810Shselasky	struct mlx5_core_dev *mdev = dev->mdev;
488322810Shselasky
489322810Shselasky	switch (mlx5_get_vport_access_method(ibdev)) {
490322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_MAD:
491331769Shselasky		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
492322810Shselasky
493322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_HCA:
494322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_NIC:
495322810Shselasky		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
496322810Shselasky						pkey_table_size));
497322810Shselasky		return 0;
498322810Shselasky
499322810Shselasky	default:
500322810Shselasky		return -EINVAL;
501322810Shselasky	}
502322810Shselasky}
503322810Shselasky
504322810Shselaskystatic int mlx5_query_vendor_id(struct ib_device *ibdev,
505322810Shselasky				u32 *vendor_id)
506322810Shselasky{
507322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
508322810Shselasky
509322810Shselasky	switch (mlx5_get_vport_access_method(ibdev)) {
510322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_MAD:
511331769Shselasky		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
512322810Shselasky
513322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_HCA:
514322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_NIC:
515322810Shselasky		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
516322810Shselasky
517322810Shselasky	default:
518322810Shselasky		return -EINVAL;
519322810Shselasky	}
520322810Shselasky}
521322810Shselasky
522322810Shselaskystatic int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
523322810Shselasky				__be64 *node_guid)
524322810Shselasky{
525322810Shselasky	u64 tmp;
526322810Shselasky	int err;
527322810Shselasky
528322810Shselasky	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
529322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_MAD:
530331769Shselasky		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
531322810Shselasky
532322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_HCA:
533322810Shselasky		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
534331769Shselasky		break;
535322810Shselasky
536322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_NIC:
537322810Shselasky		err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
538331769Shselasky		break;
539322810Shselasky
540322810Shselasky	default:
541322810Shselasky		return -EINVAL;
542322810Shselasky	}
543331769Shselasky
544331769Shselasky	if (!err)
545331769Shselasky		*node_guid = cpu_to_be64(tmp);
546331769Shselasky
547331769Shselasky	return err;
548322810Shselasky}
549322810Shselasky
550322810Shselaskystruct mlx5_reg_node_desc {
551331769Shselasky	u8	desc[IB_DEVICE_NODE_DESC_MAX];
552322810Shselasky};
553322810Shselasky
554322810Shselaskystatic int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
555322810Shselasky{
556322810Shselasky	struct mlx5_reg_node_desc in;
557322810Shselasky
558322810Shselasky	if (mlx5_use_mad_ifc(dev))
559331769Shselasky		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
560322810Shselasky
561322810Shselasky	memset(&in, 0, sizeof(in));
562322810Shselasky
563322810Shselasky	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
564322810Shselasky				    sizeof(struct mlx5_reg_node_desc),
565322810Shselasky				    MLX5_REG_NODE_DESC, 0, 0);
566322810Shselasky}
567322810Shselasky
568322810Shselaskystatic int mlx5_ib_query_device(struct ib_device *ibdev,
569331769Shselasky				struct ib_device_attr *props,
570331769Shselasky				struct ib_udata *uhw)
571322810Shselasky{
572322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
573322810Shselasky	struct mlx5_core_dev *mdev = dev->mdev;
574331769Shselasky	int err = -ENOMEM;
575322810Shselasky	int max_rq_sg;
576322810Shselasky	int max_sq_sg;
577331769Shselasky	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
578331769Shselasky	struct mlx5_ib_query_device_resp resp = {};
579331769Shselasky	size_t resp_len;
580331769Shselasky	u64 max_tso;
581322810Shselasky
582331769Shselasky	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
583331769Shselasky	if (uhw->outlen && uhw->outlen < resp_len)
584331769Shselasky		return -EINVAL;
585331769Shselasky	else
586331769Shselasky		resp.response_length = resp_len;
587322810Shselasky
588331769Shselasky	if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
589331769Shselasky		return -EINVAL;
590331769Shselasky
591322810Shselasky	memset(props, 0, sizeof(*props));
592322810Shselasky	err = mlx5_query_system_image_guid(ibdev,
593322810Shselasky					   &props->sys_image_guid);
594322810Shselasky	if (err)
595322810Shselasky		return err;
596322810Shselasky
597322810Shselasky	err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
598322810Shselasky	if (err)
599322810Shselasky		return err;
600322810Shselasky
601322810Shselasky	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
602322810Shselasky	if (err)
603322810Shselasky		return err;
604322810Shselasky
605322810Shselasky	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
606331769Shselasky		(fw_rev_min(dev->mdev) << 16) |
607322810Shselasky		fw_rev_sub(dev->mdev);
608322810Shselasky	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
609322810Shselasky		IB_DEVICE_PORT_ACTIVE_EVENT		|
610322810Shselasky		IB_DEVICE_SYS_IMAGE_GUID		|
611322810Shselasky		IB_DEVICE_RC_RNR_NAK_GEN;
612322810Shselasky
613322810Shselasky	if (MLX5_CAP_GEN(mdev, pkv))
614322810Shselasky		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
615322810Shselasky	if (MLX5_CAP_GEN(mdev, qkv))
616322810Shselasky		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
617322810Shselasky	if (MLX5_CAP_GEN(mdev, apm))
618322810Shselasky		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
619322810Shselasky	if (MLX5_CAP_GEN(mdev, xrc))
620322810Shselasky		props->device_cap_flags |= IB_DEVICE_XRC;
621331769Shselasky	if (MLX5_CAP_GEN(mdev, imaicl)) {
622331769Shselasky		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
623331769Shselasky					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
624331769Shselasky		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
625331769Shselasky		/* We support 'Gappy' memory registration too */
626331769Shselasky		props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
627331769Shselasky	}
628322810Shselasky	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
629331769Shselasky	if (MLX5_CAP_GEN(mdev, sho)) {
630331769Shselasky		props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
631331769Shselasky		/* At this stage no support for signature handover */
632331769Shselasky		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
633331769Shselasky				      IB_PROT_T10DIF_TYPE_2 |
634331769Shselasky				      IB_PROT_T10DIF_TYPE_3;
635331769Shselasky		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
636331769Shselasky				       IB_GUARD_T10DIF_CSUM;
637331769Shselasky	}
638322810Shselasky	if (MLX5_CAP_GEN(mdev, block_lb_mc))
639322810Shselasky		props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
640322810Shselasky
641331769Shselasky	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
642331769Shselasky		if (MLX5_CAP_ETH(mdev, csum_cap))
643331769Shselasky			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
644331769Shselasky
645331769Shselasky		if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
646331769Shselasky			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
647331769Shselasky			if (max_tso) {
648331769Shselasky				resp.tso_caps.max_tso = 1 << max_tso;
649331769Shselasky				resp.tso_caps.supported_qpts |=
650331769Shselasky					1 << IB_QPT_RAW_PACKET;
651331769Shselasky				resp.response_length += sizeof(resp.tso_caps);
652331769Shselasky			}
653331769Shselasky		}
654331769Shselasky
655331769Shselasky		if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
656331769Shselasky			resp.rss_caps.rx_hash_function =
657331769Shselasky						MLX5_RX_HASH_FUNC_TOEPLITZ;
658331769Shselasky			resp.rss_caps.rx_hash_fields_mask =
659331769Shselasky						MLX5_RX_HASH_SRC_IPV4 |
660331769Shselasky						MLX5_RX_HASH_DST_IPV4 |
661331769Shselasky						MLX5_RX_HASH_SRC_IPV6 |
662331769Shselasky						MLX5_RX_HASH_DST_IPV6 |
663331769Shselasky						MLX5_RX_HASH_SRC_PORT_TCP |
664331769Shselasky						MLX5_RX_HASH_DST_PORT_TCP |
665331769Shselasky						MLX5_RX_HASH_SRC_PORT_UDP |
666331769Shselasky						MLX5_RX_HASH_DST_PORT_UDP;
667331769Shselasky			resp.response_length += sizeof(resp.rss_caps);
668331769Shselasky		}
669331769Shselasky	} else {
670331769Shselasky		if (field_avail(typeof(resp), tso_caps, uhw->outlen))
671331769Shselasky			resp.response_length += sizeof(resp.tso_caps);
672331769Shselasky		if (field_avail(typeof(resp), rss_caps, uhw->outlen))
673331769Shselasky			resp.response_length += sizeof(resp.rss_caps);
674331769Shselasky	}
675331769Shselasky
676331769Shselasky	if (MLX5_CAP_GEN(mdev, ipoib_ipoib_offloads)) {
677331769Shselasky		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
678331769Shselasky		props->device_cap_flags |= IB_DEVICE_UD_TSO;
679331769Shselasky	}
680331769Shselasky
681331769Shselasky	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
682331769Shselasky	    MLX5_CAP_ETH(dev->mdev, scatter_fcs))
683331769Shselasky		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
684331769Shselasky
685331769Shselasky	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
686331769Shselasky		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
687331769Shselasky
688322810Shselasky	props->vendor_part_id	   = mdev->pdev->device;
689322810Shselasky	props->hw_ver		   = mdev->pdev->revision;
690322810Shselasky
691322810Shselasky	props->max_mr_size	   = ~0ull;
692331769Shselasky	props->page_size_cap	   = ~(min_page_size - 1);
693322810Shselasky	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
694322810Shselasky	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
695322810Shselasky	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
696322810Shselasky		     sizeof(struct mlx5_wqe_data_seg);
697331769Shselasky	max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
698331769Shselasky		     sizeof(struct mlx5_wqe_ctrl_seg)) /
699331769Shselasky		     sizeof(struct mlx5_wqe_data_seg);
700322810Shselasky	props->max_sge = min(max_rq_sg, max_sq_sg);
701331769Shselasky	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
702322810Shselasky	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
703322810Shselasky	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
704322810Shselasky	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
705322810Shselasky	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
706322810Shselasky	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
707322810Shselasky	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
708322810Shselasky	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
709322810Shselasky	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
710322810Shselasky	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
711322810Shselasky	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
712322810Shselasky	props->max_srq_sge	   = max_rq_sg - 1;
713331769Shselasky	props->max_fast_reg_page_list_len =
714331769Shselasky		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
715322810Shselasky	get_atomic_caps(dev, props);
716331769Shselasky	props->masked_atomic_cap   = IB_ATOMIC_NONE;
717322810Shselasky	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
718322810Shselasky	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
719322810Shselasky	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
720322810Shselasky					   props->max_mcast_grp;
721322810Shselasky	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
722331769Shselasky	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
723331769Shselasky	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
724322810Shselasky
725331769Shselasky#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
726331769Shselasky	if (MLX5_CAP_GEN(mdev, pg))
727331769Shselasky		props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
728331769Shselasky	props->odp_caps = dev->odp_caps;
729331769Shselasky#endif
730331769Shselasky
731331769Shselasky	if (MLX5_CAP_GEN(mdev, cd))
732331769Shselasky		props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
733331769Shselasky
734331769Shselasky	if (!mlx5_core_is_pf(mdev))
735331769Shselasky		props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
736331769Shselasky
737331769Shselasky	if (mlx5_ib_port_link_layer(ibdev, 1) ==
738331769Shselasky	    IB_LINK_LAYER_ETHERNET) {
739331769Shselasky		props->rss_caps.max_rwq_indirection_tables =
740331769Shselasky			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
741331769Shselasky		props->rss_caps.max_rwq_indirection_table_size =
742331769Shselasky			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
743331769Shselasky		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
744331769Shselasky		props->max_wq_type_rq =
745331769Shselasky			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
746331769Shselasky	}
747331769Shselasky
748331769Shselasky	if (uhw->outlen) {
749331769Shselasky		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
750331769Shselasky
751331769Shselasky		if (err)
752331769Shselasky			return err;
753331769Shselasky	}
754331769Shselasky
755322810Shselasky	return 0;
756322810Shselasky}
757322810Shselasky
758322810Shselaskyenum mlx5_ib_width {
759322810Shselasky	MLX5_IB_WIDTH_1X	= 1 << 0,
760322810Shselasky	MLX5_IB_WIDTH_2X	= 1 << 1,
761322810Shselasky	MLX5_IB_WIDTH_4X	= 1 << 2,
762322810Shselasky	MLX5_IB_WIDTH_8X	= 1 << 3,
763322810Shselasky	MLX5_IB_WIDTH_12X	= 1 << 4
764322810Shselasky};
765322810Shselasky
766322810Shselaskystatic int translate_active_width(struct ib_device *ibdev, u8 active_width,
767322810Shselasky				  u8 *ib_width)
768322810Shselasky{
769322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
770322810Shselasky	int err = 0;
771322810Shselasky
772322810Shselasky	if (active_width & MLX5_IB_WIDTH_1X) {
773322810Shselasky		*ib_width = IB_WIDTH_1X;
774322810Shselasky	} else if (active_width & MLX5_IB_WIDTH_2X) {
775331769Shselasky		mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
776331769Shselasky			    (int)active_width);
777322810Shselasky		err = -EINVAL;
778322810Shselasky	} else if (active_width & MLX5_IB_WIDTH_4X) {
779322810Shselasky		*ib_width = IB_WIDTH_4X;
780322810Shselasky	} else if (active_width & MLX5_IB_WIDTH_8X) {
781322810Shselasky		*ib_width = IB_WIDTH_8X;
782322810Shselasky	} else if (active_width & MLX5_IB_WIDTH_12X) {
783322810Shselasky		*ib_width = IB_WIDTH_12X;
784322810Shselasky	} else {
785322810Shselasky		mlx5_ib_dbg(dev, "Invalid active_width %d\n",
786322810Shselasky			    (int)active_width);
787322810Shselasky		err = -EINVAL;
788322810Shselasky	}
789322810Shselasky
790322810Shselasky	return err;
791322810Shselasky}
792322810Shselasky
793322810Shselaskyenum ib_max_vl_num {
794322810Shselasky	__IB_MAX_VL_0		= 1,
795322810Shselasky	__IB_MAX_VL_0_1		= 2,
796322810Shselasky	__IB_MAX_VL_0_3		= 3,
797322810Shselasky	__IB_MAX_VL_0_7		= 4,
798322810Shselasky	__IB_MAX_VL_0_14	= 5,
799322810Shselasky};
800322810Shselasky
801322810Shselaskyenum mlx5_vl_hw_cap {
802322810Shselasky	MLX5_VL_HW_0	= 1,
803322810Shselasky	MLX5_VL_HW_0_1	= 2,
804322810Shselasky	MLX5_VL_HW_0_2	= 3,
805322810Shselasky	MLX5_VL_HW_0_3	= 4,
806322810Shselasky	MLX5_VL_HW_0_4	= 5,
807322810Shselasky	MLX5_VL_HW_0_5	= 6,
808322810Shselasky	MLX5_VL_HW_0_6	= 7,
809322810Shselasky	MLX5_VL_HW_0_7	= 8,
810322810Shselasky	MLX5_VL_HW_0_14	= 15
811322810Shselasky};
812322810Shselasky
813322810Shselaskystatic int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
814322810Shselasky				u8 *max_vl_num)
815322810Shselasky{
816322810Shselasky	switch (vl_hw_cap) {
817322810Shselasky	case MLX5_VL_HW_0:
818322810Shselasky		*max_vl_num = __IB_MAX_VL_0;
819322810Shselasky		break;
820322810Shselasky	case MLX5_VL_HW_0_1:
821322810Shselasky		*max_vl_num = __IB_MAX_VL_0_1;
822322810Shselasky		break;
823322810Shselasky	case MLX5_VL_HW_0_3:
824322810Shselasky		*max_vl_num = __IB_MAX_VL_0_3;
825322810Shselasky		break;
826322810Shselasky	case MLX5_VL_HW_0_7:
827322810Shselasky		*max_vl_num = __IB_MAX_VL_0_7;
828322810Shselasky		break;
829322810Shselasky	case MLX5_VL_HW_0_14:
830322810Shselasky		*max_vl_num = __IB_MAX_VL_0_14;
831322810Shselasky		break;
832322810Shselasky
833322810Shselasky	default:
834322810Shselasky		return -EINVAL;
835322810Shselasky	}
836322810Shselasky
837322810Shselasky	return 0;
838322810Shselasky}
839322810Shselasky
840331769Shselaskystatic int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
841331769Shselasky			       struct ib_port_attr *props)
842322810Shselasky{
843322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
844322810Shselasky	struct mlx5_core_dev *mdev = dev->mdev;
845322810Shselasky	u32 *rep;
846331769Shselasky	int replen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
847322810Shselasky	struct mlx5_ptys_reg *ptys;
848322810Shselasky	struct mlx5_pmtu_reg *pmtu;
849322810Shselasky	struct mlx5_pvlc_reg pvlc;
850322810Shselasky	void *ctx;
851322810Shselasky	int err;
852322810Shselasky
853331769Shselasky	rep = mlx5_vzalloc(replen);
854322810Shselasky	ptys = kzalloc(sizeof(*ptys), GFP_KERNEL);
855322810Shselasky	pmtu = kzalloc(sizeof(*pmtu), GFP_KERNEL);
856322810Shselasky	if (!rep || !ptys || !pmtu) {
857322810Shselasky		err = -ENOMEM;
858322810Shselasky		goto out;
859322810Shselasky	}
860322810Shselasky
861322810Shselasky	memset(props, 0, sizeof(*props));
862322810Shselasky
863331769Shselasky	err = mlx5_query_hca_vport_context(mdev, port, 0, rep, replen);
864322810Shselasky	if (err)
865322810Shselasky		goto out;
866322810Shselasky
867322810Shselasky	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, rep, hca_vport_context);
868322810Shselasky
869322810Shselasky	props->lid		= MLX5_GET(hca_vport_context, ctx, lid);
870322810Shselasky	props->lmc		= MLX5_GET(hca_vport_context, ctx, lmc);
871322810Shselasky	props->sm_lid		= MLX5_GET(hca_vport_context, ctx, sm_lid);
872322810Shselasky	props->sm_sl		= MLX5_GET(hca_vport_context, ctx, sm_sl);
873322810Shselasky	props->state		= MLX5_GET(hca_vport_context, ctx, vport_state);
874322810Shselasky	props->phys_state	= MLX5_GET(hca_vport_context, ctx,
875322810Shselasky					port_physical_state);
876322810Shselasky	props->port_cap_flags	= MLX5_GET(hca_vport_context, ctx, cap_mask1);
877322810Shselasky	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
878322810Shselasky	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
879322810Shselasky	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
880322810Shselasky	props->bad_pkey_cntr	= MLX5_GET(hca_vport_context, ctx,
881331769Shselasky					pkey_violation_counter);
882322810Shselasky	props->qkey_viol_cntr	= MLX5_GET(hca_vport_context, ctx,
883331769Shselasky					qkey_violation_counter);
884322810Shselasky	props->subnet_timeout	= MLX5_GET(hca_vport_context, ctx,
885331769Shselasky					subnet_timeout);
886322810Shselasky	props->init_type_reply	= MLX5_GET(hca_vport_context, ctx,
887331769Shselasky					init_type_reply);
888331769Shselasky	props->grh_required	= MLX5_GET(hca_vport_context, ctx, grh_required);
889322810Shselasky
890322810Shselasky	ptys->proto_mask |= MLX5_PTYS_IB;
891322810Shselasky	ptys->local_port = port;
892322810Shselasky	err = mlx5_core_access_ptys(mdev, ptys, 0);
893322810Shselasky	if (err)
894322810Shselasky		goto out;
895322810Shselasky
896322810Shselasky	err = translate_active_width(ibdev, ptys->ib_link_width_oper,
897322810Shselasky				     &props->active_width);
898322810Shselasky	if (err)
899322810Shselasky		goto out;
900322810Shselasky
901322810Shselasky	props->active_speed	= (u8)ptys->ib_proto_oper;
902322810Shselasky
903322810Shselasky	pmtu->local_port = port;
904322810Shselasky	err = mlx5_core_access_pmtu(mdev, pmtu, 0);
905322810Shselasky	if (err)
906322810Shselasky		goto out;
907322810Shselasky
908322810Shselasky	props->max_mtu		= pmtu->max_mtu;
909322810Shselasky	props->active_mtu	= pmtu->oper_mtu;
910322810Shselasky
911322810Shselasky	memset(&pvlc, 0, sizeof(pvlc));
912322810Shselasky	pvlc.local_port = port;
913322810Shselasky	err = mlx5_core_access_pvlc(mdev, &pvlc, 0);
914322810Shselasky	if (err)
915322810Shselasky		goto out;
916322810Shselasky
917322810Shselasky	err = translate_max_vl_num(ibdev, pvlc.vl_hw_cap,
918322810Shselasky				   &props->max_vl_num);
919322810Shselaskyout:
920322810Shselasky	kvfree(rep);
921322810Shselasky	kfree(ptys);
922322810Shselasky	kfree(pmtu);
923322810Shselasky	return err;
924322810Shselasky}
925322810Shselasky
926322810Shselaskyint mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
927322810Shselasky		       struct ib_port_attr *props)
928322810Shselasky{
929322810Shselasky	switch (mlx5_get_vport_access_method(ibdev)) {
930322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_MAD:
931331769Shselasky		return mlx5_query_mad_ifc_port(ibdev, port, props);
932322810Shselasky
933322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_HCA:
934331769Shselasky		return mlx5_query_hca_port(ibdev, port, props);
935322810Shselasky
936322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_NIC:
937322810Shselasky		return mlx5_query_port_roce(ibdev, port, props);
938322810Shselasky
939322810Shselasky	default:
940322810Shselasky		return -EINVAL;
941322810Shselasky	}
942322810Shselasky}
943322810Shselasky
944322810Shselaskystatic int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
945322810Shselasky			     union ib_gid *gid)
946322810Shselasky{
947322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
948322810Shselasky	struct mlx5_core_dev *mdev = dev->mdev;
949322810Shselasky
950322810Shselasky	switch (mlx5_get_vport_access_method(ibdev)) {
951322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_MAD:
952331769Shselasky		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
953322810Shselasky
954322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_HCA:
955322810Shselasky		return mlx5_query_hca_vport_gid(mdev, port, 0, index, gid);
956322810Shselasky
957322810Shselasky	default:
958322810Shselasky		return -EINVAL;
959322810Shselasky	}
960331769Shselasky
961322810Shselasky}
962322810Shselasky
963322810Shselaskystatic int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
964322810Shselasky			      u16 *pkey)
965322810Shselasky{
966322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
967322810Shselasky	struct mlx5_core_dev *mdev = dev->mdev;
968322810Shselasky
969322810Shselasky	switch (mlx5_get_vport_access_method(ibdev)) {
970322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_MAD:
971331769Shselasky		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
972322810Shselasky
973322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_HCA:
974322810Shselasky	case MLX5_VPORT_ACCESS_METHOD_NIC:
975331769Shselasky		return mlx5_query_hca_vport_pkey(mdev, 0, port,  0, index,
976322810Shselasky						 pkey);
977322810Shselasky	default:
978322810Shselasky		return -EINVAL;
979322810Shselasky	}
980322810Shselasky}
981322810Shselasky
982322810Shselaskystatic int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
983322810Shselasky				 struct ib_device_modify *props)
984322810Shselasky{
985322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
986322810Shselasky	struct mlx5_reg_node_desc in;
987322810Shselasky	struct mlx5_reg_node_desc out;
988322810Shselasky	int err;
989322810Shselasky
990322810Shselasky	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
991322810Shselasky		return -EOPNOTSUPP;
992322810Shselasky
993322810Shselasky	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
994322810Shselasky		return 0;
995322810Shselasky
996322810Shselasky	/*
997322810Shselasky	 * If possible, pass node desc to FW, so it can generate
998322810Shselasky	 * a 144 trap.  If cmd fails, just ignore.
999322810Shselasky	 */
1000331769Shselasky	memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1001322810Shselasky	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1002322810Shselasky				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1003322810Shselasky	if (err)
1004322810Shselasky		return err;
1005322810Shselasky
1006331769Shselasky	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1007322810Shselasky
1008322810Shselasky	return err;
1009322810Shselasky}
1010322810Shselasky
1011322810Shselaskystatic int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1012322810Shselasky			       struct ib_port_modify *props)
1013322810Shselasky{
1014322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1015322810Shselasky	struct ib_port_attr attr;
1016322810Shselasky	u32 tmp;
1017322810Shselasky	int err;
1018322810Shselasky
1019322810Shselasky	mutex_lock(&dev->cap_mask_mutex);
1020322810Shselasky
1021322810Shselasky	err = mlx5_ib_query_port(ibdev, port, &attr);
1022322810Shselasky	if (err)
1023322810Shselasky		goto out;
1024322810Shselasky
1025322810Shselasky	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1026322810Shselasky		~props->clr_port_cap_mask;
1027322810Shselasky
1028322810Shselasky	err = mlx5_set_port_caps(dev->mdev, port, tmp);
1029322810Shselasky
1030322810Shselaskyout:
1031322810Shselasky	mutex_unlock(&dev->cap_mask_mutex);
1032322810Shselasky	return err;
1033322810Shselasky}
1034322810Shselasky
1035322810Shselaskystatic struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1036322810Shselasky						  struct ib_udata *udata)
1037322810Shselasky{
1038322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1039331769Shselasky	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1040331769Shselasky	struct mlx5_ib_alloc_ucontext_resp resp = {};
1041322810Shselasky	struct mlx5_ib_ucontext *context;
1042322810Shselasky	struct mlx5_uuar_info *uuari;
1043322810Shselasky	struct mlx5_uar *uars;
1044322810Shselasky	int gross_uuars;
1045322810Shselasky	int num_uars;
1046322810Shselasky	int ver;
1047322810Shselasky	int uuarn;
1048322810Shselasky	int err;
1049322810Shselasky	int i;
1050322810Shselasky	size_t reqlen;
1051331769Shselasky	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1052331769Shselasky				     max_cqe_version);
1053322810Shselasky
1054322810Shselasky	if (!dev->ib_active)
1055322810Shselasky		return ERR_PTR(-EAGAIN);
1056322810Shselasky
1057331769Shselasky	if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
1058331769Shselasky		return ERR_PTR(-EINVAL);
1059322810Shselasky
1060322810Shselasky	reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
1061322810Shselasky	if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1062322810Shselasky		ver = 0;
1063331769Shselasky	else if (reqlen >= min_req_v2)
1064322810Shselasky		ver = 2;
1065331769Shselasky	else
1066322810Shselasky		return ERR_PTR(-EINVAL);
1067322810Shselasky
1068331769Shselasky	err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
1069331769Shselasky	if (err)
1070322810Shselasky		return ERR_PTR(err);
1071322810Shselasky
1072331769Shselasky	if (req.flags)
1073322810Shselasky		return ERR_PTR(-EINVAL);
1074322810Shselasky
1075331769Shselasky	if (req.total_num_uuars > MLX5_MAX_UUARS)
1076322810Shselasky		return ERR_PTR(-ENOMEM);
1077322810Shselasky
1078331769Shselasky	if (req.total_num_uuars == 0)
1079331769Shselasky		return ERR_PTR(-EINVAL);
1080331769Shselasky
1081331769Shselasky	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1082331769Shselasky		return ERR_PTR(-EOPNOTSUPP);
1083331769Shselasky
1084331769Shselasky	if (reqlen > sizeof(req) &&
1085331769Shselasky	    !ib_is_udata_cleared(udata, sizeof(req),
1086331769Shselasky				 reqlen - sizeof(req)))
1087331769Shselasky		return ERR_PTR(-EOPNOTSUPP);
1088331769Shselasky
1089322810Shselasky	req.total_num_uuars = ALIGN(req.total_num_uuars,
1090322810Shselasky				    MLX5_NON_FP_BF_REGS_PER_PAGE);
1091331769Shselasky	if (req.num_low_latency_uuars > req.total_num_uuars - 1)
1092322810Shselasky		return ERR_PTR(-EINVAL);
1093322810Shselasky
1094322810Shselasky	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
1095322810Shselasky	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
1096322810Shselasky	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1097322810Shselasky	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1098322810Shselasky		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1099331769Shselasky	resp.cache_line_size = cache_line_size();
1100322810Shselasky	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1101322810Shselasky	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1102322810Shselasky	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1103322810Shselasky	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1104322810Shselasky	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1105331769Shselasky	resp.cqe_version = min_t(__u8,
1106331769Shselasky				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1107331769Shselasky				 req.max_cqe_version);
1108331769Shselasky	resp.response_length = min(offsetof(typeof(resp), response_length) +
1109331769Shselasky				   sizeof(resp.response_length), udata->outlen);
1110322810Shselasky
1111322810Shselasky	context = kzalloc(sizeof(*context), GFP_KERNEL);
1112322810Shselasky	if (!context)
1113322810Shselasky		return ERR_PTR(-ENOMEM);
1114322810Shselasky
1115322810Shselasky	uuari = &context->uuari;
1116322810Shselasky	mutex_init(&uuari->lock);
1117322810Shselasky	uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
1118322810Shselasky	if (!uars) {
1119322810Shselasky		err = -ENOMEM;
1120322810Shselasky		goto out_ctx;
1121322810Shselasky	}
1122322810Shselasky
1123322810Shselasky	uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
1124322810Shselasky				sizeof(*uuari->bitmap),
1125322810Shselasky				GFP_KERNEL);
1126322810Shselasky	if (!uuari->bitmap) {
1127322810Shselasky		err = -ENOMEM;
1128322810Shselasky		goto out_uar_ctx;
1129322810Shselasky	}
1130322810Shselasky	/*
1131322810Shselasky	 * clear all fast path uuars
1132322810Shselasky	 */
1133322810Shselasky	for (i = 0; i < gross_uuars; i++) {
1134322810Shselasky		uuarn = i & 3;
1135322810Shselasky		if (uuarn == 2 || uuarn == 3)
1136322810Shselasky			set_bit(i, uuari->bitmap);
1137322810Shselasky	}
1138322810Shselasky
1139322810Shselasky	uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
1140322810Shselasky	if (!uuari->count) {
1141322810Shselasky		err = -ENOMEM;
1142322810Shselasky		goto out_bitmap;
1143322810Shselasky	}
1144322810Shselasky
1145322810Shselasky	for (i = 0; i < num_uars; i++) {
1146322810Shselasky		err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
1147331769Shselasky		if (err)
1148331769Shselasky			goto out_count;
1149331769Shselasky	}
1150331769Shselasky
1151331769Shselasky#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1152331769Shselasky	context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1153331769Shselasky#endif
1154331769Shselasky
1155331769Shselasky	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1156331769Shselasky		err = mlx5_alloc_transport_domain(dev->mdev,
1157331769Shselasky						       &context->tdn);
1158331769Shselasky		if (err)
1159322810Shselasky			goto out_uars;
1160322810Shselasky	}
1161322810Shselasky
1162331769Shselasky	INIT_LIST_HEAD(&context->vma_private_list);
1163322810Shselasky	INIT_LIST_HEAD(&context->db_page_list);
1164322810Shselasky	mutex_init(&context->db_page_mutex);
1165322810Shselasky
1166322810Shselasky	resp.tot_uuars = req.total_num_uuars;
1167322810Shselasky	resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
1168331769Shselasky
1169331769Shselasky	if (field_avail(typeof(resp), cqe_version, udata->outlen))
1170331769Shselasky		resp.response_length += sizeof(resp.cqe_version);
1171331769Shselasky
1172331769Shselasky	if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1173331784Shselasky		resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1174331784Shselasky				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1175331769Shselasky		resp.response_length += sizeof(resp.cmds_supp_uhw);
1176331769Shselasky	}
1177331769Shselasky
1178331769Shselasky	/*
1179331769Shselasky	 * We don't want to expose information from the PCI bar that is located
1180331769Shselasky	 * after 4096 bytes, so if the arch only supports larger pages, let's
1181331769Shselasky	 * pretend we don't support reading the HCA's core clock. This is also
1182331769Shselasky	 * forced by mmap function.
1183331769Shselasky	 */
1184331769Shselasky	if (PAGE_SIZE <= 4096 &&
1185331769Shselasky	    field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1186331769Shselasky		resp.comp_mask |=
1187331769Shselasky			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1188331769Shselasky		resp.hca_core_clock_offset =
1189331769Shselasky			offsetof(struct mlx5_init_seg, internal_timer_h) %
1190331769Shselasky			PAGE_SIZE;
1191331769Shselasky		resp.response_length += sizeof(resp.hca_core_clock_offset) +
1192331769Shselasky					sizeof(resp.reserved2);
1193331769Shselasky	}
1194331769Shselasky
1195331769Shselasky	err = ib_copy_to_udata(udata, &resp, resp.response_length);
1196322810Shselasky	if (err)
1197331769Shselasky		goto out_td;
1198322810Shselasky
1199322810Shselasky	uuari->ver = ver;
1200322810Shselasky	uuari->num_low_latency_uuars = req.num_low_latency_uuars;
1201322810Shselasky	uuari->uars = uars;
1202322810Shselasky	uuari->num_uars = num_uars;
1203331769Shselasky	context->cqe_version = resp.cqe_version;
1204322810Shselasky
1205322810Shselasky	return &context->ibucontext;
1206322810Shselasky
1207331769Shselaskyout_td:
1208331769Shselasky	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1209331769Shselasky		mlx5_dealloc_transport_domain(dev->mdev, context->tdn);
1210331769Shselasky
1211322810Shselaskyout_uars:
1212322810Shselasky	for (i--; i >= 0; i--)
1213322810Shselasky		mlx5_cmd_free_uar(dev->mdev, uars[i].index);
1214331769Shselaskyout_count:
1215322810Shselasky	kfree(uuari->count);
1216322810Shselasky
1217322810Shselaskyout_bitmap:
1218322810Shselasky	kfree(uuari->bitmap);
1219322810Shselasky
1220322810Shselaskyout_uar_ctx:
1221322810Shselasky	kfree(uars);
1222322810Shselasky
1223322810Shselaskyout_ctx:
1224322810Shselasky	kfree(context);
1225322810Shselasky	return ERR_PTR(err);
1226322810Shselasky}
1227322810Shselasky
1228322810Shselaskystatic int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1229322810Shselasky{
1230322810Shselasky	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1231322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1232322810Shselasky	struct mlx5_uuar_info *uuari = &context->uuari;
1233322810Shselasky	int i;
1234322810Shselasky
1235331769Shselasky	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1236322810Shselasky		mlx5_dealloc_transport_domain(dev->mdev, context->tdn);
1237322810Shselasky
1238322810Shselasky	for (i = 0; i < uuari->num_uars; i++) {
1239322810Shselasky		if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
1240322810Shselasky			mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
1241322810Shselasky	}
1242322810Shselasky
1243322810Shselasky	kfree(uuari->count);
1244322810Shselasky	kfree(uuari->bitmap);
1245322810Shselasky	kfree(uuari->uars);
1246322810Shselasky	kfree(context);
1247322810Shselasky
1248322810Shselasky	return 0;
1249322810Shselasky}
1250322810Shselasky
1251322810Shselaskystatic phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
1252322810Shselasky{
1253322810Shselasky	return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
1254322810Shselasky}
1255322810Shselasky
1256322810Shselaskystatic int get_command(unsigned long offset)
1257322810Shselasky{
1258322810Shselasky	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1259322810Shselasky}
1260322810Shselasky
1261322810Shselaskystatic int get_arg(unsigned long offset)
1262322810Shselasky{
1263322810Shselasky	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1264322810Shselasky}
1265322810Shselasky
1266322810Shselaskystatic int get_index(unsigned long offset)
1267322810Shselasky{
1268322810Shselasky	return get_arg(offset);
1269322810Shselasky}
1270322810Shselasky
1271331769Shselaskystatic void  mlx5_ib_vma_open(struct vm_area_struct *area)
1272331769Shselasky{
1273331769Shselasky	/* vma_open is called when a new VMA is created on top of our VMA.  This
1274331769Shselasky	 * is done through either mremap flow or split_vma (usually due to
1275331769Shselasky	 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
1276331769Shselasky	 * as this VMA is strongly hardware related.  Therefore we set the
1277331769Shselasky	 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1278331769Shselasky	 * calling us again and trying to do incorrect actions.  We assume that
1279331769Shselasky	 * the original VMA size is exactly a single page, and therefore all
1280331769Shselasky	 * "splitting" operation will not happen to it.
1281331769Shselasky	 */
1282331769Shselasky	area->vm_ops = NULL;
1283331769Shselasky}
1284331769Shselasky
1285331769Shselaskystatic void  mlx5_ib_vma_close(struct vm_area_struct *area)
1286331769Shselasky{
1287331769Shselasky	struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
1288331769Shselasky
1289331769Shselasky	/* It's guaranteed that all VMAs opened on a FD are closed before the
1290331769Shselasky	 * file itself is closed, therefore no sync is needed with the regular
1291331769Shselasky	 * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
1292331769Shselasky	 * However need a sync with accessing the vma as part of
1293331769Shselasky	 * mlx5_ib_disassociate_ucontext.
1294331769Shselasky	 * The close operation is usually called under mm->mmap_sem except when
1295331769Shselasky	 * process is exiting.
1296331769Shselasky	 * The exiting case is handled explicitly as part of
1297331769Shselasky	 * mlx5_ib_disassociate_ucontext.
1298331769Shselasky	 */
1299331769Shselasky	mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
1300331769Shselasky
1301331769Shselasky	/* setting the vma context pointer to null in the mlx5_ib driver's
1302331769Shselasky	 * private data, to protect a race condition in
1303331769Shselasky	 * mlx5_ib_disassociate_ucontext().
1304331769Shselasky	 */
1305331769Shselasky	mlx5_ib_vma_priv_data->vma = NULL;
1306331769Shselasky	list_del(&mlx5_ib_vma_priv_data->list);
1307331769Shselasky	kfree(mlx5_ib_vma_priv_data);
1308331769Shselasky}
1309331769Shselasky
1310331769Shselaskystatic const struct vm_operations_struct mlx5_ib_vm_ops = {
1311331769Shselasky	.open = mlx5_ib_vma_open,
1312331769Shselasky	.close = mlx5_ib_vma_close
1313331769Shselasky};
1314331769Shselasky
1315331769Shselaskystatic int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
1316331769Shselasky				struct mlx5_ib_ucontext *ctx)
1317331769Shselasky{
1318331769Shselasky	struct mlx5_ib_vma_private_data *vma_prv;
1319331769Shselasky	struct list_head *vma_head = &ctx->vma_private_list;
1320331769Shselasky
1321331769Shselasky	vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
1322331769Shselasky	if (!vma_prv)
1323331769Shselasky		return -ENOMEM;
1324331769Shselasky
1325331769Shselasky	vma_prv->vma = vma;
1326331769Shselasky	vma->vm_private_data = vma_prv;
1327331769Shselasky	vma->vm_ops =  &mlx5_ib_vm_ops;
1328331769Shselasky
1329331769Shselasky	list_add(&vma_prv->list, vma_head);
1330331769Shselasky
1331331769Shselasky	return 0;
1332331769Shselasky}
1333331769Shselasky
1334331769Shselaskystatic inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1335331769Shselasky{
1336331769Shselasky	switch (cmd) {
1337331769Shselasky	case MLX5_IB_MMAP_WC_PAGE:
1338331769Shselasky		return "WC";
1339331769Shselasky	case MLX5_IB_MMAP_REGULAR_PAGE:
1340331769Shselasky		return "best effort WC";
1341331769Shselasky	case MLX5_IB_MMAP_NC_PAGE:
1342331769Shselasky		return "NC";
1343331769Shselasky	default:
1344331769Shselasky		return NULL;
1345331769Shselasky	}
1346331769Shselasky}
1347331769Shselasky
1348331769Shselaskystatic int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1349331769Shselasky		    struct vm_area_struct *vma,
1350322810Shselasky		    struct mlx5_ib_ucontext *context)
1351322810Shselasky{
1352331769Shselasky	struct mlx5_uuar_info *uuari = &context->uuari;
1353331769Shselasky	int err;
1354322810Shselasky	unsigned long idx;
1355331769Shselasky	phys_addr_t pfn, pa;
1356331769Shselasky	pgprot_t prot;
1357322810Shselasky
1358331769Shselasky	switch (cmd) {
1359331769Shselasky	case MLX5_IB_MMAP_WC_PAGE:
1360331769Shselasky/* Some architectures don't support WC memory */
1361331769Shselasky#if defined(CONFIG_X86)
1362331769Shselasky		if (!pat_enabled())
1363331769Shselasky			return -EPERM;
1364331769Shselasky#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
1365331769Shselasky			return -EPERM;
1366331769Shselasky#endif
1367331769Shselasky	/* fall through */
1368331769Shselasky	case MLX5_IB_MMAP_REGULAR_PAGE:
1369331769Shselasky		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
1370331769Shselasky		prot = pgprot_writecombine(vma->vm_page_prot);
1371331769Shselasky		break;
1372331769Shselasky	case MLX5_IB_MMAP_NC_PAGE:
1373331769Shselasky		prot = pgprot_noncached(vma->vm_page_prot);
1374331769Shselasky		break;
1375331769Shselasky	default:
1376322810Shselasky		return -EINVAL;
1377322810Shselasky	}
1378322810Shselasky
1379331769Shselasky	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1380331769Shselasky		return -EINVAL;
1381331769Shselasky
1382322810Shselasky	idx = get_index(vma->vm_pgoff);
1383331769Shselasky	if (idx >= uuari->num_uars)
1384322810Shselasky		return -EINVAL;
1385322810Shselasky
1386322810Shselasky	pfn = uar_index2pfn(dev, uuari->uars[idx].index);
1387331769Shselasky	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
1388322810Shselasky
1389322810Shselasky	vma->vm_page_prot = prot;
1390331769Shselasky	err = io_remap_pfn_range(vma, vma->vm_start, pfn,
1391331769Shselasky				 PAGE_SIZE, vma->vm_page_prot);
1392331769Shselasky	if (err) {
1393331769Shselasky		mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%llx, pfn=%pa, mmap_cmd=%s\n",
1394331769Shselasky			    err, (unsigned long long)vma->vm_start, &pfn, mmap_cmd2str(cmd));
1395322810Shselasky		return -EAGAIN;
1396322810Shselasky	}
1397322810Shselasky
1398331769Shselasky	pa = pfn << PAGE_SHIFT;
1399331769Shselasky	mlx5_ib_dbg(dev, "mapped %s at 0x%llx, PA %pa\n", mmap_cmd2str(cmd),
1400331769Shselasky		    (unsigned long long)vma->vm_start, &pa);
1401322810Shselasky
1402331769Shselasky	return mlx5_ib_set_vma_data(vma, context);
1403322810Shselasky}
1404322810Shselasky
1405322810Shselaskystatic int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
1406322810Shselasky{
1407322810Shselasky	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1408322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1409322810Shselasky	unsigned long command;
1410331769Shselasky	phys_addr_t pfn;
1411322810Shselasky
1412322810Shselasky	command = get_command(vma->vm_pgoff);
1413322810Shselasky	switch (command) {
1414331769Shselasky	case MLX5_IB_MMAP_WC_PAGE:
1415331769Shselasky	case MLX5_IB_MMAP_NC_PAGE:
1416322810Shselasky	case MLX5_IB_MMAP_REGULAR_PAGE:
1417331769Shselasky		return uar_mmap(dev, command, vma, context);
1418322810Shselasky
1419331769Shselasky	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
1420331769Shselasky		return -ENOSYS;
1421322810Shselasky
1422331769Shselasky	case MLX5_IB_MMAP_CORE_CLOCK:
1423331769Shselasky		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1424331769Shselasky			return -EINVAL;
1425322810Shselasky
1426331769Shselasky		if (vma->vm_flags & VM_WRITE)
1427331769Shselasky			return -EPERM;
1428331769Shselasky
1429331769Shselasky		/* Don't expose to user-space information it shouldn't have */
1430331769Shselasky		if (PAGE_SIZE > 4096)
1431331769Shselasky			return -EOPNOTSUPP;
1432331769Shselasky
1433331769Shselasky		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1434331769Shselasky		pfn = (dev->mdev->iseg_base +
1435331769Shselasky		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
1436331769Shselasky			PAGE_SHIFT;
1437331769Shselasky		if (io_remap_pfn_range(vma, vma->vm_start, pfn,
1438331769Shselasky				       PAGE_SIZE, vma->vm_page_prot))
1439331769Shselasky			return -EAGAIN;
1440331769Shselasky
1441331769Shselasky		mlx5_ib_dbg(dev, "mapped internal timer at 0x%llx, PA 0x%llx\n",
1442331769Shselasky			    (unsigned long long)vma->vm_start,
1443331769Shselasky			    (unsigned long long)pfn << PAGE_SHIFT);
1444322810Shselasky		break;
1445322810Shselasky
1446322810Shselasky	default:
1447322810Shselasky		return -EINVAL;
1448322810Shselasky	}
1449322810Shselasky
1450322810Shselasky	return 0;
1451322810Shselasky}
1452322810Shselasky
1453322810Shselaskystatic struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
1454322810Shselasky				      struct ib_ucontext *context,
1455322810Shselasky				      struct ib_udata *udata)
1456322810Shselasky{
1457322810Shselasky	struct mlx5_ib_alloc_pd_resp resp;
1458322810Shselasky	struct mlx5_ib_pd *pd;
1459322810Shselasky	int err;
1460322810Shselasky
1461322810Shselasky	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1462322810Shselasky	if (!pd)
1463322810Shselasky		return ERR_PTR(-ENOMEM);
1464322810Shselasky
1465322810Shselasky	err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
1466322810Shselasky	if (err) {
1467322810Shselasky		kfree(pd);
1468322810Shselasky		return ERR_PTR(err);
1469322810Shselasky	}
1470322810Shselasky
1471322810Shselasky	if (context) {
1472322810Shselasky		resp.pdn = pd->pdn;
1473322810Shselasky		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1474322810Shselasky			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
1475322810Shselasky			kfree(pd);
1476322810Shselasky			return ERR_PTR(-EFAULT);
1477322810Shselasky		}
1478322810Shselasky	}
1479322810Shselasky
1480322810Shselasky	return &pd->ibpd;
1481322810Shselasky}
1482322810Shselasky
1483322810Shselaskystatic int mlx5_ib_dealloc_pd(struct ib_pd *pd)
1484322810Shselasky{
1485322810Shselasky	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
1486322810Shselasky	struct mlx5_ib_pd *mpd = to_mpd(pd);
1487322810Shselasky
1488322810Shselasky	mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
1489322810Shselasky	kfree(mpd);
1490322810Shselasky
1491322810Shselasky	return 0;
1492322810Shselasky}
1493322810Shselasky
1494331769Shselaskyenum {
1495331769Shselasky	MATCH_CRITERIA_ENABLE_OUTER_BIT,
1496331769Shselasky	MATCH_CRITERIA_ENABLE_MISC_BIT,
1497331769Shselasky	MATCH_CRITERIA_ENABLE_INNER_BIT
1498331769Shselasky};
1499331769Shselasky
1500331769Shselasky#define HEADER_IS_ZERO(match_criteria, headers)			           \
1501331769Shselasky	!(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1502331769Shselasky		    0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
1503331769Shselasky
1504331769Shselaskystatic u8 get_match_criteria_enable(u32 *match_criteria)
1505331769Shselasky{
1506331769Shselasky	u8 match_criteria_enable;
1507331769Shselasky
1508331769Shselasky	match_criteria_enable =
1509331769Shselasky		(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1510331769Shselasky		MATCH_CRITERIA_ENABLE_OUTER_BIT;
1511331769Shselasky	match_criteria_enable |=
1512331769Shselasky		(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1513331769Shselasky		MATCH_CRITERIA_ENABLE_MISC_BIT;
1514331769Shselasky	match_criteria_enable |=
1515331769Shselasky		(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1516331769Shselasky		MATCH_CRITERIA_ENABLE_INNER_BIT;
1517331769Shselasky
1518331769Shselasky	return match_criteria_enable;
1519331769Shselasky}
1520331769Shselasky
1521331769Shselaskystatic void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
1522331769Shselasky{
1523331769Shselasky	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
1524331769Shselasky	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
1525331769Shselasky}
1526331769Shselasky
1527331769Shselaskystatic void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
1528331769Shselasky{
1529331769Shselasky	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
1530331769Shselasky	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
1531331769Shselasky	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
1532331769Shselasky	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
1533331769Shselasky}
1534331769Shselasky
1535331769Shselasky#define LAST_ETH_FIELD vlan_tag
1536331769Shselasky#define LAST_IB_FIELD sl
1537331769Shselasky#define LAST_IPV4_FIELD tos
1538331769Shselasky#define LAST_IPV6_FIELD traffic_class
1539331769Shselasky#define LAST_TCP_UDP_FIELD src_port
1540331769Shselasky
1541331769Shselasky/* Field is the last supported field */
1542331769Shselasky#define FIELDS_NOT_SUPPORTED(filter, field)\
1543331769Shselasky	memchr_inv((void *)&filter.field  +\
1544331769Shselasky		   sizeof(filter.field), 0,\
1545331769Shselasky		   sizeof(filter) -\
1546331769Shselasky		   offsetof(typeof(filter), field) -\
1547331769Shselasky		   sizeof(filter.field))
1548331769Shselasky
1549331769Shselaskystatic int parse_flow_attr(u32 *match_c, u32 *match_v,
1550331769Shselasky			   const union ib_flow_spec *ib_spec)
1551331769Shselasky{
1552331769Shselasky	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1553331769Shselasky					     outer_headers);
1554331769Shselasky	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1555331769Shselasky					     outer_headers);
1556331769Shselasky	void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
1557331769Shselasky					   misc_parameters);
1558331769Shselasky	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
1559331769Shselasky					   misc_parameters);
1560331769Shselasky
1561331769Shselasky	switch (ib_spec->type) {
1562331769Shselasky	case IB_FLOW_SPEC_ETH:
1563331769Shselasky		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1564331769Shselasky			return -ENOTSUPP;
1565331769Shselasky
1566331769Shselasky		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1567331769Shselasky					     dmac_47_16),
1568331769Shselasky				ib_spec->eth.mask.dst_mac);
1569331769Shselasky		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1570331769Shselasky					     dmac_47_16),
1571331769Shselasky				ib_spec->eth.val.dst_mac);
1572331769Shselasky
1573331769Shselasky		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1574331769Shselasky					     smac_47_16),
1575331769Shselasky				ib_spec->eth.mask.src_mac);
1576331769Shselasky		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1577331769Shselasky					     smac_47_16),
1578331769Shselasky				ib_spec->eth.val.src_mac);
1579331769Shselasky
1580331769Shselasky		if (ib_spec->eth.mask.vlan_tag) {
1581331769Shselasky			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1582331769Shselasky				 cvlan_tag, 1);
1583331769Shselasky			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1584331769Shselasky				 cvlan_tag, 1);
1585331769Shselasky
1586331769Shselasky			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1587331769Shselasky				 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
1588331769Shselasky			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1589331769Shselasky				 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
1590331769Shselasky
1591331769Shselasky			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1592331769Shselasky				 first_cfi,
1593331769Shselasky				 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
1594331769Shselasky			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1595331769Shselasky				 first_cfi,
1596331769Shselasky				 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
1597331769Shselasky
1598331769Shselasky			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1599331769Shselasky				 first_prio,
1600331769Shselasky				 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
1601331769Shselasky			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1602331769Shselasky				 first_prio,
1603331769Shselasky				 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
1604331769Shselasky		}
1605331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1606331769Shselasky			 ethertype, ntohs(ib_spec->eth.mask.ether_type));
1607331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1608331769Shselasky			 ethertype, ntohs(ib_spec->eth.val.ether_type));
1609331769Shselasky		break;
1610331769Shselasky	case IB_FLOW_SPEC_IPV4:
1611331769Shselasky		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1612331769Shselasky			return -ENOTSUPP;
1613331769Shselasky
1614331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1615331769Shselasky			 ethertype, 0xffff);
1616331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1617331769Shselasky			 ethertype, ETH_P_IP);
1618331769Shselasky
1619331769Shselasky		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1620331769Shselasky				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
1621331769Shselasky		       &ib_spec->ipv4.mask.src_ip,
1622331769Shselasky		       sizeof(ib_spec->ipv4.mask.src_ip));
1623331769Shselasky		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1624331769Shselasky				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
1625331769Shselasky		       &ib_spec->ipv4.val.src_ip,
1626331769Shselasky		       sizeof(ib_spec->ipv4.val.src_ip));
1627331769Shselasky		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1628331769Shselasky				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1629331769Shselasky		       &ib_spec->ipv4.mask.dst_ip,
1630331769Shselasky		       sizeof(ib_spec->ipv4.mask.dst_ip));
1631331769Shselasky		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1632331769Shselasky				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1633331769Shselasky		       &ib_spec->ipv4.val.dst_ip,
1634331769Shselasky		       sizeof(ib_spec->ipv4.val.dst_ip));
1635331769Shselasky
1636331769Shselasky		set_tos(outer_headers_c, outer_headers_v,
1637331769Shselasky			ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
1638331769Shselasky
1639331769Shselasky		set_proto(outer_headers_c, outer_headers_v,
1640331769Shselasky			  ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
1641331769Shselasky		break;
1642331769Shselasky	case IB_FLOW_SPEC_IPV6:
1643331769Shselasky		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
1644331769Shselasky			return -ENOTSUPP;
1645331769Shselasky
1646331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1647331769Shselasky			 ethertype, 0xffff);
1648331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1649331769Shselasky			 ethertype, IPPROTO_IPV6);
1650331769Shselasky
1651331769Shselasky		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1652331769Shselasky				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
1653331769Shselasky		       &ib_spec->ipv6.mask.src_ip,
1654331769Shselasky		       sizeof(ib_spec->ipv6.mask.src_ip));
1655331769Shselasky		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1656331769Shselasky				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
1657331769Shselasky		       &ib_spec->ipv6.val.src_ip,
1658331769Shselasky		       sizeof(ib_spec->ipv6.val.src_ip));
1659331769Shselasky		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1660331769Shselasky				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1661331769Shselasky		       &ib_spec->ipv6.mask.dst_ip,
1662331769Shselasky		       sizeof(ib_spec->ipv6.mask.dst_ip));
1663331769Shselasky		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1664331769Shselasky				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1665331769Shselasky		       &ib_spec->ipv6.val.dst_ip,
1666331769Shselasky		       sizeof(ib_spec->ipv6.val.dst_ip));
1667331769Shselasky
1668331769Shselasky		set_tos(outer_headers_c, outer_headers_v,
1669331769Shselasky			ib_spec->ipv6.mask.traffic_class,
1670331769Shselasky			ib_spec->ipv6.val.traffic_class);
1671331769Shselasky
1672331769Shselasky		set_proto(outer_headers_c, outer_headers_v,
1673331769Shselasky			  ib_spec->ipv6.mask.next_hdr,
1674331769Shselasky			  ib_spec->ipv6.val.next_hdr);
1675331769Shselasky
1676331769Shselasky		MLX5_SET(fte_match_set_misc, misc_params_c,
1677331769Shselasky			 outer_ipv6_flow_label,
1678331769Shselasky			 ntohl(ib_spec->ipv6.mask.flow_label));
1679331769Shselasky		MLX5_SET(fte_match_set_misc, misc_params_v,
1680331769Shselasky			 outer_ipv6_flow_label,
1681331769Shselasky			 ntohl(ib_spec->ipv6.val.flow_label));
1682331769Shselasky		break;
1683331769Shselasky	case IB_FLOW_SPEC_TCP:
1684331769Shselasky		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
1685331769Shselasky					 LAST_TCP_UDP_FIELD))
1686331769Shselasky			return -ENOTSUPP;
1687331769Shselasky
1688331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
1689331769Shselasky			 0xff);
1690331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
1691331769Shselasky			 IPPROTO_TCP);
1692331769Shselasky
1693331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
1694331769Shselasky			 ntohs(ib_spec->tcp_udp.mask.src_port));
1695331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
1696331769Shselasky			 ntohs(ib_spec->tcp_udp.val.src_port));
1697331769Shselasky
1698331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
1699331769Shselasky			 ntohs(ib_spec->tcp_udp.mask.dst_port));
1700331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
1701331769Shselasky			 ntohs(ib_spec->tcp_udp.val.dst_port));
1702331769Shselasky		break;
1703331769Shselasky	case IB_FLOW_SPEC_UDP:
1704331769Shselasky		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
1705331769Shselasky					 LAST_TCP_UDP_FIELD))
1706331769Shselasky			return -ENOTSUPP;
1707331769Shselasky
1708331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
1709331769Shselasky			 0xff);
1710331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
1711331769Shselasky			 IPPROTO_UDP);
1712331769Shselasky
1713331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
1714331769Shselasky			 ntohs(ib_spec->tcp_udp.mask.src_port));
1715331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
1716331769Shselasky			 ntohs(ib_spec->tcp_udp.val.src_port));
1717331769Shselasky
1718331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
1719331769Shselasky			 ntohs(ib_spec->tcp_udp.mask.dst_port));
1720331769Shselasky		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
1721331769Shselasky			 ntohs(ib_spec->tcp_udp.val.dst_port));
1722331769Shselasky		break;
1723331769Shselasky	default:
1724331769Shselasky		return -EINVAL;
1725331769Shselasky	}
1726331769Shselasky
1727331769Shselasky	return 0;
1728331769Shselasky}
1729331769Shselasky
1730331769Shselasky/* If a flow could catch both multicast and unicast packets,
1731331769Shselasky * it won't fall into the multicast flow steering table and this rule
1732331769Shselasky * could steal other multicast packets.
1733331769Shselasky */
1734331769Shselaskystatic bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
1735331769Shselasky{
1736331769Shselasky	struct ib_flow_spec_eth *eth_spec;
1737331769Shselasky
1738331769Shselasky	if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
1739331769Shselasky	    ib_attr->size < sizeof(struct ib_flow_attr) +
1740331769Shselasky	    sizeof(struct ib_flow_spec_eth) ||
1741331769Shselasky	    ib_attr->num_of_specs < 1)
1742331769Shselasky		return false;
1743331769Shselasky
1744331769Shselasky	eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
1745331769Shselasky	if (eth_spec->type != IB_FLOW_SPEC_ETH ||
1746331769Shselasky	    eth_spec->size != sizeof(*eth_spec))
1747331769Shselasky		return false;
1748331769Shselasky
1749331769Shselasky	return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
1750331769Shselasky	       is_multicast_ether_addr(eth_spec->val.dst_mac);
1751331769Shselasky}
1752331769Shselasky
1753331769Shselaskystatic bool is_valid_attr(const struct ib_flow_attr *flow_attr)
1754331769Shselasky{
1755331769Shselasky	union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1756331769Shselasky	bool has_ipv4_spec = false;
1757331769Shselasky	bool eth_type_ipv4 = true;
1758331769Shselasky	unsigned int spec_index;
1759331769Shselasky
1760331769Shselasky	/* Validate that ethertype is correct */
1761331769Shselasky	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1762331769Shselasky		if (ib_spec->type == IB_FLOW_SPEC_ETH &&
1763331769Shselasky		    ib_spec->eth.mask.ether_type) {
1764331769Shselasky			if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) &&
1765331769Shselasky			      ib_spec->eth.val.ether_type == htons(ETH_P_IP)))
1766331769Shselasky				eth_type_ipv4 = false;
1767331769Shselasky		} else if (ib_spec->type == IB_FLOW_SPEC_IPV4) {
1768331769Shselasky			has_ipv4_spec = true;
1769331769Shselasky		}
1770331769Shselasky		ib_spec = (void *)ib_spec + ib_spec->size;
1771331769Shselasky	}
1772331769Shselasky	return !has_ipv4_spec || eth_type_ipv4;
1773331769Shselasky}
1774331769Shselasky
1775331769Shselaskystatic void put_flow_table(struct mlx5_ib_dev *dev,
1776331769Shselasky			   struct mlx5_ib_flow_prio *prio, bool ft_added)
1777331769Shselasky{
1778331769Shselasky	prio->refcount -= !!ft_added;
1779331769Shselasky	if (!prio->refcount) {
1780331769Shselasky		mlx5_destroy_flow_table(prio->flow_table);
1781331769Shselasky		prio->flow_table = NULL;
1782331769Shselasky	}
1783331769Shselasky}
1784331769Shselasky
1785331769Shselaskystatic int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
1786331769Shselasky{
1787331769Shselasky	struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
1788331769Shselasky	struct mlx5_ib_flow_handler *handler = container_of(flow_id,
1789331769Shselasky							  struct mlx5_ib_flow_handler,
1790331769Shselasky							  ibflow);
1791331769Shselasky	struct mlx5_ib_flow_handler *iter, *tmp;
1792331769Shselasky
1793331769Shselasky	mutex_lock(&dev->flow_db.lock);
1794331769Shselasky
1795331769Shselasky	list_for_each_entry_safe(iter, tmp, &handler->list, list) {
1796331769Shselasky		mlx5_del_flow_rule(iter->rule);
1797331769Shselasky		put_flow_table(dev, iter->prio, true);
1798331769Shselasky		list_del(&iter->list);
1799331769Shselasky		kfree(iter);
1800331769Shselasky	}
1801331769Shselasky
1802331769Shselasky	mlx5_del_flow_rule(handler->rule);
1803331769Shselasky	put_flow_table(dev, handler->prio, true);
1804331769Shselasky	mutex_unlock(&dev->flow_db.lock);
1805331769Shselasky
1806331769Shselasky	kfree(handler);
1807331769Shselasky
1808331769Shselasky	return 0;
1809331769Shselasky}
1810331769Shselasky
1811331769Shselaskystatic int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
1812331769Shselasky{
1813331769Shselasky	priority *= 2;
1814331769Shselasky	if (!dont_trap)
1815331769Shselasky		priority++;
1816331769Shselasky	return priority;
1817331769Shselasky}
1818331769Shselasky
1819331769Shselaskyenum flow_table_type {
1820331769Shselasky	MLX5_IB_FT_RX,
1821331769Shselasky	MLX5_IB_FT_TX
1822331769Shselasky};
1823331769Shselasky
1824331769Shselasky#define MLX5_FS_MAX_TYPES	 10
1825331769Shselasky#define MLX5_FS_MAX_ENTRIES	 32000UL
1826331769Shselaskystatic struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
1827331769Shselasky						struct ib_flow_attr *flow_attr,
1828331769Shselasky						enum flow_table_type ft_type)
1829331769Shselasky{
1830331769Shselasky	bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
1831331769Shselasky	struct mlx5_flow_namespace *ns = NULL;
1832331769Shselasky	struct mlx5_ib_flow_prio *prio;
1833331769Shselasky	struct mlx5_flow_table *ft;
1834331769Shselasky	int num_entries;
1835331769Shselasky	int num_groups;
1836331769Shselasky	int priority;
1837331769Shselasky	int err = 0;
1838331769Shselasky
1839331769Shselasky	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1840331769Shselasky		if (flow_is_multicast_only(flow_attr) &&
1841331769Shselasky		    !dont_trap)
1842331769Shselasky			priority = MLX5_IB_FLOW_MCAST_PRIO;
1843331769Shselasky		else
1844331769Shselasky			priority = ib_prio_to_core_prio(flow_attr->priority,
1845331769Shselasky							dont_trap);
1846331769Shselasky		ns = mlx5_get_flow_namespace(dev->mdev,
1847331769Shselasky					     MLX5_FLOW_NAMESPACE_BYPASS);
1848331769Shselasky		num_entries = MLX5_FS_MAX_ENTRIES;
1849331769Shselasky		num_groups = MLX5_FS_MAX_TYPES;
1850331769Shselasky		prio = &dev->flow_db.prios[priority];
1851331769Shselasky	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1852331769Shselasky		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
1853331769Shselasky		ns = mlx5_get_flow_namespace(dev->mdev,
1854331769Shselasky					     MLX5_FLOW_NAMESPACE_LEFTOVERS);
1855331769Shselasky		build_leftovers_ft_param("bypass", &priority,
1856331769Shselasky					 &num_entries,
1857331769Shselasky					 &num_groups);
1858331769Shselasky		prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
1859331769Shselasky	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
1860331769Shselasky		if (!MLX5_CAP_FLOWTABLE(dev->mdev,
1861331769Shselasky					allow_sniffer_and_nic_rx_shared_tir))
1862331769Shselasky			return ERR_PTR(-ENOTSUPP);
1863331769Shselasky
1864331769Shselasky		ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
1865331769Shselasky					     MLX5_FLOW_NAMESPACE_SNIFFER_RX :
1866331769Shselasky					     MLX5_FLOW_NAMESPACE_SNIFFER_TX);
1867331769Shselasky
1868331769Shselasky		prio = &dev->flow_db.sniffer[ft_type];
1869331769Shselasky		priority = 0;
1870331769Shselasky		num_entries = 1;
1871331769Shselasky		num_groups = 1;
1872331769Shselasky	}
1873331769Shselasky
1874331769Shselasky	if (!ns)
1875331769Shselasky		return ERR_PTR(-ENOTSUPP);
1876331769Shselasky
1877331769Shselasky	ft = prio->flow_table;
1878331769Shselasky	if (!ft) {
1879331769Shselasky		ft = mlx5_create_auto_grouped_flow_table(ns, priority, "bypass",
1880331769Shselasky							 num_entries,
1881331769Shselasky							 num_groups);
1882331769Shselasky
1883331769Shselasky		if (!IS_ERR(ft)) {
1884331769Shselasky			prio->refcount = 0;
1885331769Shselasky			prio->flow_table = ft;
1886331769Shselasky		} else {
1887331769Shselasky			err = PTR_ERR(ft);
1888331769Shselasky		}
1889331769Shselasky	}
1890331769Shselasky
1891331769Shselasky	return err ? ERR_PTR(err) : prio;
1892331769Shselasky}
1893331769Shselasky
1894331769Shselaskystatic struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1895331769Shselasky						     struct mlx5_ib_flow_prio *ft_prio,
1896331769Shselasky						     const struct ib_flow_attr *flow_attr,
1897331769Shselasky						     struct mlx5_flow_destination *dst)
1898331769Shselasky{
1899331769Shselasky	struct mlx5_flow_table	*ft = ft_prio->flow_table;
1900331769Shselasky	struct mlx5_ib_flow_handler *handler;
1901331769Shselasky	struct mlx5_flow_spec *spec;
1902331769Shselasky	const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
1903331769Shselasky	unsigned int spec_index;
1904331769Shselasky	u32 action;
1905331769Shselasky	int err = 0;
1906331769Shselasky
1907331769Shselasky	if (!is_valid_attr(flow_attr))
1908331769Shselasky		return ERR_PTR(-EINVAL);
1909331769Shselasky
1910331769Shselasky	spec = mlx5_vzalloc(sizeof(*spec));
1911331769Shselasky	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1912331769Shselasky	if (!handler || !spec) {
1913331769Shselasky		err = -ENOMEM;
1914331769Shselasky		goto free;
1915331769Shselasky	}
1916331769Shselasky
1917331769Shselasky	INIT_LIST_HEAD(&handler->list);
1918331769Shselasky
1919331769Shselasky	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1920331769Shselasky		err = parse_flow_attr(spec->match_criteria,
1921331769Shselasky				      spec->match_value, ib_flow);
1922331769Shselasky		if (err < 0)
1923331769Shselasky			goto free;
1924331769Shselasky
1925331769Shselasky		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
1926331769Shselasky	}
1927331769Shselasky
1928331769Shselasky	spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
1929331769Shselasky	action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
1930331769Shselasky		MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1931331769Shselasky	handler->rule = mlx5_add_flow_rule(ft, spec->match_criteria_enable,
1932331769Shselasky					   spec->match_criteria,
1933331769Shselasky					   spec->match_value,
1934331769Shselasky					   action,
1935331769Shselasky					   MLX5_FS_DEFAULT_FLOW_TAG,
1936331769Shselasky					   dst);
1937331769Shselasky
1938331769Shselasky	if (IS_ERR(handler->rule)) {
1939331769Shselasky		err = PTR_ERR(handler->rule);
1940331769Shselasky		goto free;
1941331769Shselasky	}
1942331769Shselasky
1943331769Shselasky	ft_prio->refcount++;
1944331769Shselasky	handler->prio = ft_prio;
1945331769Shselasky
1946331769Shselasky	ft_prio->flow_table = ft;
1947331769Shselaskyfree:
1948331769Shselasky	if (err)
1949331769Shselasky		kfree(handler);
1950331769Shselasky	kvfree(spec);
1951331769Shselasky	return err ? ERR_PTR(err) : handler;
1952331769Shselasky}
1953331769Shselasky
1954331769Shselaskystatic struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
1955331769Shselasky							  struct mlx5_ib_flow_prio *ft_prio,
1956331769Shselasky							  struct ib_flow_attr *flow_attr,
1957331769Shselasky							  struct mlx5_flow_destination *dst)
1958331769Shselasky{
1959331769Shselasky	struct mlx5_ib_flow_handler *handler_dst = NULL;
1960331769Shselasky	struct mlx5_ib_flow_handler *handler = NULL;
1961331769Shselasky
1962331769Shselasky	handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
1963331769Shselasky	if (!IS_ERR(handler)) {
1964331769Shselasky		handler_dst = create_flow_rule(dev, ft_prio,
1965331769Shselasky					       flow_attr, dst);
1966331769Shselasky		if (IS_ERR(handler_dst)) {
1967331769Shselasky			mlx5_del_flow_rule(handler->rule);
1968331769Shselasky			ft_prio->refcount--;
1969331769Shselasky			kfree(handler);
1970331769Shselasky			handler = handler_dst;
1971331769Shselasky		} else {
1972331769Shselasky			list_add(&handler_dst->list, &handler->list);
1973331769Shselasky		}
1974331769Shselasky	}
1975331769Shselasky
1976331769Shselasky	return handler;
1977331769Shselasky}
1978331769Shselaskyenum {
1979331769Shselasky	LEFTOVERS_MC,
1980331769Shselasky	LEFTOVERS_UC,
1981331769Shselasky};
1982331769Shselasky
1983331769Shselaskystatic struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
1984331769Shselasky							  struct mlx5_ib_flow_prio *ft_prio,
1985331769Shselasky							  struct ib_flow_attr *flow_attr,
1986331769Shselasky							  struct mlx5_flow_destination *dst)
1987331769Shselasky{
1988331769Shselasky	struct mlx5_ib_flow_handler *handler_ucast = NULL;
1989331769Shselasky	struct mlx5_ib_flow_handler *handler = NULL;
1990331769Shselasky
1991331769Shselasky	static struct {
1992331769Shselasky		struct ib_flow_attr	flow_attr;
1993331769Shselasky		struct ib_flow_spec_eth eth_flow;
1994331769Shselasky	} leftovers_specs[] = {
1995331769Shselasky		[LEFTOVERS_MC] = {
1996331769Shselasky			.flow_attr = {
1997331769Shselasky				.num_of_specs = 1,
1998331769Shselasky				.size = sizeof(leftovers_specs[0])
1999331769Shselasky			},
2000331769Shselasky			.eth_flow = {
2001331769Shselasky				.type = IB_FLOW_SPEC_ETH,
2002331769Shselasky				.size = sizeof(struct ib_flow_spec_eth),
2003331769Shselasky				.mask = {.dst_mac = {0x1} },
2004331769Shselasky				.val =  {.dst_mac = {0x1} }
2005331769Shselasky			}
2006331769Shselasky		},
2007331769Shselasky		[LEFTOVERS_UC] = {
2008331769Shselasky			.flow_attr = {
2009331769Shselasky				.num_of_specs = 1,
2010331769Shselasky				.size = sizeof(leftovers_specs[0])
2011331769Shselasky			},
2012331769Shselasky			.eth_flow = {
2013331769Shselasky				.type = IB_FLOW_SPEC_ETH,
2014331769Shselasky				.size = sizeof(struct ib_flow_spec_eth),
2015331769Shselasky				.mask = {.dst_mac = {0x1} },
2016331769Shselasky				.val = {.dst_mac = {} }
2017331769Shselasky			}
2018331769Shselasky		}
2019331769Shselasky	};
2020331769Shselasky
2021331769Shselasky	handler = create_flow_rule(dev, ft_prio,
2022331769Shselasky				   &leftovers_specs[LEFTOVERS_MC].flow_attr,
2023331769Shselasky				   dst);
2024331769Shselasky	if (!IS_ERR(handler) &&
2025331769Shselasky	    flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
2026331769Shselasky		handler_ucast = create_flow_rule(dev, ft_prio,
2027331769Shselasky						 &leftovers_specs[LEFTOVERS_UC].flow_attr,
2028331769Shselasky						 dst);
2029331769Shselasky		if (IS_ERR(handler_ucast)) {
2030331769Shselasky			mlx5_del_flow_rule(handler->rule);
2031331769Shselasky			ft_prio->refcount--;
2032331769Shselasky			kfree(handler);
2033331769Shselasky			handler = handler_ucast;
2034331769Shselasky		} else {
2035331769Shselasky			list_add(&handler_ucast->list, &handler->list);
2036331769Shselasky		}
2037331769Shselasky	}
2038331769Shselasky
2039331769Shselasky	return handler;
2040331769Shselasky}
2041331769Shselasky
2042331769Shselaskystatic struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
2043331769Shselasky							struct mlx5_ib_flow_prio *ft_rx,
2044331769Shselasky							struct mlx5_ib_flow_prio *ft_tx,
2045331769Shselasky							struct mlx5_flow_destination *dst)
2046331769Shselasky{
2047331769Shselasky	struct mlx5_ib_flow_handler *handler_rx;
2048331769Shselasky	struct mlx5_ib_flow_handler *handler_tx;
2049331769Shselasky	int err;
2050331769Shselasky	static const struct ib_flow_attr flow_attr  = {
2051331769Shselasky		.num_of_specs = 0,
2052331769Shselasky		.size = sizeof(flow_attr)
2053331769Shselasky	};
2054331769Shselasky
2055331769Shselasky	handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
2056331769Shselasky	if (IS_ERR(handler_rx)) {
2057331769Shselasky		err = PTR_ERR(handler_rx);
2058331769Shselasky		goto err;
2059331769Shselasky	}
2060331769Shselasky
2061331769Shselasky	handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
2062331769Shselasky	if (IS_ERR(handler_tx)) {
2063331769Shselasky		err = PTR_ERR(handler_tx);
2064331769Shselasky		goto err_tx;
2065331769Shselasky	}
2066331769Shselasky
2067331769Shselasky	list_add(&handler_tx->list, &handler_rx->list);
2068331769Shselasky
2069331769Shselasky	return handler_rx;
2070331769Shselasky
2071331769Shselaskyerr_tx:
2072331769Shselasky	mlx5_del_flow_rule(handler_rx->rule);
2073331769Shselasky	ft_rx->refcount--;
2074331769Shselasky	kfree(handler_rx);
2075331769Shselaskyerr:
2076331769Shselasky	return ERR_PTR(err);
2077331769Shselasky}
2078331769Shselasky
2079331769Shselaskystatic struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
2080331769Shselasky					   struct ib_flow_attr *flow_attr,
2081331769Shselasky					   int domain)
2082331769Shselasky{
2083331769Shselasky	struct mlx5_ib_dev *dev = to_mdev(qp->device);
2084331769Shselasky	struct mlx5_ib_qp *mqp = to_mqp(qp);
2085331769Shselasky	struct mlx5_ib_flow_handler *handler = NULL;
2086331769Shselasky	struct mlx5_flow_destination *dst = NULL;
2087331769Shselasky	struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
2088331769Shselasky	struct mlx5_ib_flow_prio *ft_prio;
2089331769Shselasky	int err;
2090331769Shselasky
2091331769Shselasky	if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
2092331769Shselasky		return ERR_PTR(-ENOSPC);
2093331769Shselasky
2094331769Shselasky	if (domain != IB_FLOW_DOMAIN_USER ||
2095331769Shselasky	    flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
2096331769Shselasky	    (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
2097331769Shselasky		return ERR_PTR(-EINVAL);
2098331769Shselasky
2099331769Shselasky	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
2100331769Shselasky	if (!dst)
2101331769Shselasky		return ERR_PTR(-ENOMEM);
2102331769Shselasky
2103331769Shselasky	mutex_lock(&dev->flow_db.lock);
2104331769Shselasky
2105331769Shselasky	ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
2106331769Shselasky	if (IS_ERR(ft_prio)) {
2107331769Shselasky		err = PTR_ERR(ft_prio);
2108331769Shselasky		goto unlock;
2109331769Shselasky	}
2110331769Shselasky	if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2111331769Shselasky		ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
2112331769Shselasky		if (IS_ERR(ft_prio_tx)) {
2113331769Shselasky			err = PTR_ERR(ft_prio_tx);
2114331769Shselasky			ft_prio_tx = NULL;
2115331769Shselasky			goto destroy_ft;
2116331769Shselasky		}
2117331769Shselasky	}
2118331769Shselasky
2119331769Shselasky	dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
2120331769Shselasky	if (mqp->flags & MLX5_IB_QP_RSS)
2121331769Shselasky		dst->tir_num = mqp->rss_qp.tirn;
2122331769Shselasky	else
2123331769Shselasky		dst->tir_num = mqp->raw_packet_qp.rq.tirn;
2124331769Shselasky
2125331769Shselasky	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
2126331769Shselasky		if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
2127331769Shselasky			handler = create_dont_trap_rule(dev, ft_prio,
2128331769Shselasky							flow_attr, dst);
2129331769Shselasky		} else {
2130331769Shselasky			handler = create_flow_rule(dev, ft_prio, flow_attr,
2131331769Shselasky						   dst);
2132331769Shselasky		}
2133331769Shselasky	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2134331769Shselasky		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2135331769Shselasky		handler = create_leftovers_rule(dev, ft_prio, flow_attr,
2136331769Shselasky						dst);
2137331769Shselasky	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2138331769Shselasky		handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
2139331769Shselasky	} else {
2140331769Shselasky		err = -EINVAL;
2141331769Shselasky		goto destroy_ft;
2142331769Shselasky	}
2143331769Shselasky
2144331769Shselasky	if (IS_ERR(handler)) {
2145331769Shselasky		err = PTR_ERR(handler);
2146331769Shselasky		handler = NULL;
2147331769Shselasky		goto destroy_ft;
2148331769Shselasky	}
2149331769Shselasky
2150331769Shselasky	mutex_unlock(&dev->flow_db.lock);
2151331769Shselasky	kfree(dst);
2152331769Shselasky
2153331769Shselasky	return &handler->ibflow;
2154331769Shselasky
2155331769Shselaskydestroy_ft:
2156331769Shselasky	put_flow_table(dev, ft_prio, false);
2157331769Shselasky	if (ft_prio_tx)
2158331769Shselasky		put_flow_table(dev, ft_prio_tx, false);
2159331769Shselaskyunlock:
2160331769Shselasky	mutex_unlock(&dev->flow_db.lock);
2161331769Shselasky	kfree(dst);
2162331769Shselasky	kfree(handler);
2163331769Shselasky	return ERR_PTR(err);
2164331769Shselasky}
2165331769Shselasky
2166322810Shselaskystatic int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2167322810Shselasky{
2168322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2169322810Shselasky	int err;
2170322810Shselasky
2171331769Shselasky	err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
2172322810Shselasky	if (err)
2173322810Shselasky		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2174322810Shselasky			     ibqp->qp_num, gid->raw);
2175322810Shselasky
2176322810Shselasky	return err;
2177322810Shselasky}
2178322810Shselasky
2179322810Shselaskystatic int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2180322810Shselasky{
2181322810Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2182322810Shselasky	int err;
2183322810Shselasky
2184331769Shselasky	err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
2185322810Shselasky	if (err)
2186322810Shselasky		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2187322810Shselasky			     ibqp->qp_num, gid->raw);
2188322810Shselasky
2189322810Shselasky	return err;
2190322810Shselasky}
2191322810Shselasky
2192322810Shselaskystatic int init_node_data(struct mlx5_ib_dev *dev)
2193322810Shselasky{
2194322810Shselasky	int err;
2195322810Shselasky
2196322810Shselasky	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2197322810Shselasky	if (err)
2198322810Shselasky		return err;
2199322810Shselasky
2200322810Shselasky	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2201322810Shselasky}
2202322810Shselasky
2203322810Shselaskystatic ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
2204322810Shselasky			     char *buf)
2205322810Shselasky{
2206322810Shselasky	struct mlx5_ib_dev *dev =
2207322810Shselasky		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2208322810Shselasky
2209322810Shselasky	return sprintf(buf, "%lld\n", (long long)dev->mdev->priv.fw_pages);
2210322810Shselasky}
2211322810Shselasky
2212322810Shselaskystatic ssize_t show_reg_pages(struct device *device,
2213322810Shselasky			      struct device_attribute *attr, char *buf)
2214322810Shselasky{
2215322810Shselasky	struct mlx5_ib_dev *dev =
2216322810Shselasky		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2217322810Shselasky
2218322810Shselasky	return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2219322810Shselasky}
2220322810Shselasky
2221322810Shselaskystatic ssize_t show_hca(struct device *device, struct device_attribute *attr,
2222322810Shselasky			char *buf)
2223322810Shselasky{
2224322810Shselasky	struct mlx5_ib_dev *dev =
2225322810Shselasky		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2226322810Shselasky	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
2227322810Shselasky}
2228322810Shselasky
2229322810Shselaskystatic ssize_t show_rev(struct device *device, struct device_attribute *attr,
2230322810Shselasky			char *buf)
2231322810Shselasky{
2232322810Shselasky	struct mlx5_ib_dev *dev =
2233322810Shselasky		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2234331769Shselasky	return sprintf(buf, "%x\n", dev->mdev->pdev->revision);
2235322810Shselasky}
2236322810Shselasky
2237322810Shselaskystatic ssize_t show_board(struct device *device, struct device_attribute *attr,
2238322810Shselasky			  char *buf)
2239322810Shselasky{
2240322810Shselasky	struct mlx5_ib_dev *dev =
2241322810Shselasky		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2242322810Shselasky	return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
2243322810Shselasky		       dev->mdev->board_id);
2244322810Shselasky}
2245322810Shselasky
2246322810Shselaskystatic DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
2247322810Shselaskystatic DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
2248322810Shselaskystatic DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
2249322810Shselaskystatic DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
2250322810Shselaskystatic DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
2251322810Shselasky
2252322810Shselaskystatic struct device_attribute *mlx5_class_attributes[] = {
2253322810Shselasky	&dev_attr_hw_rev,
2254322810Shselasky	&dev_attr_hca_type,
2255322810Shselasky	&dev_attr_board_id,
2256322810Shselasky	&dev_attr_fw_pages,
2257322810Shselasky	&dev_attr_reg_pages,
2258322810Shselasky};
2259322810Shselasky
2260331769Shselaskystatic void pkey_change_handler(struct work_struct *work)
2261331769Shselasky{
2262331769Shselasky	struct mlx5_ib_port_resources *ports =
2263331769Shselasky		container_of(work, struct mlx5_ib_port_resources,
2264331769Shselasky			     pkey_change_work);
2265331769Shselasky
2266331769Shselasky	mutex_lock(&ports->devr->mutex);
2267331769Shselasky	mlx5_ib_gsi_pkey_change(ports->gsi);
2268331769Shselasky	mutex_unlock(&ports->devr->mutex);
2269331769Shselasky}
2270331769Shselasky
2271322810Shselaskystatic void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2272322810Shselasky{
2273322810Shselasky	struct mlx5_ib_qp *mqp;
2274322810Shselasky	struct mlx5_ib_cq *send_mcq, *recv_mcq;
2275322810Shselasky	struct mlx5_core_cq *mcq;
2276322810Shselasky	struct list_head cq_armed_list;
2277322810Shselasky	unsigned long flags_qp;
2278322810Shselasky	unsigned long flags_cq;
2279322810Shselasky	unsigned long flags;
2280322810Shselasky
2281322810Shselasky	INIT_LIST_HEAD(&cq_armed_list);
2282322810Shselasky
2283322810Shselasky	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2284322810Shselasky	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2285322810Shselasky	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2286322810Shselasky		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2287322810Shselasky		if (mqp->sq.tail != mqp->sq.head) {
2288322810Shselasky			send_mcq = to_mcq(mqp->ibqp.send_cq);
2289322810Shselasky			spin_lock_irqsave(&send_mcq->lock, flags_cq);
2290322810Shselasky			if (send_mcq->mcq.comp &&
2291322810Shselasky			    mqp->ibqp.send_cq->comp_handler) {
2292322810Shselasky				if (!send_mcq->mcq.reset_notify_added) {
2293322810Shselasky					send_mcq->mcq.reset_notify_added = 1;
2294322810Shselasky					list_add_tail(&send_mcq->mcq.reset_notify,
2295322810Shselasky						      &cq_armed_list);
2296322810Shselasky				}
2297322810Shselasky			}
2298322810Shselasky			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2299322810Shselasky		}
2300322810Shselasky		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2301322810Shselasky		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2302322810Shselasky		/* no handling is needed for SRQ */
2303322810Shselasky		if (!mqp->ibqp.srq) {
2304322810Shselasky			if (mqp->rq.tail != mqp->rq.head) {
2305322810Shselasky				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2306322810Shselasky				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2307322810Shselasky				if (recv_mcq->mcq.comp &&
2308322810Shselasky				    mqp->ibqp.recv_cq->comp_handler) {
2309322810Shselasky					if (!recv_mcq->mcq.reset_notify_added) {
2310322810Shselasky						recv_mcq->mcq.reset_notify_added = 1;
2311322810Shselasky						list_add_tail(&recv_mcq->mcq.reset_notify,
2312322810Shselasky							      &cq_armed_list);
2313322810Shselasky					}
2314322810Shselasky				}
2315322810Shselasky				spin_unlock_irqrestore(&recv_mcq->lock,
2316322810Shselasky						       flags_cq);
2317322810Shselasky			}
2318322810Shselasky		}
2319322810Shselasky		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2320322810Shselasky	}
2321322810Shselasky	/*At that point all inflight post send were put to be executed as of we
2322322810Shselasky	 * lock/unlock above locks Now need to arm all involved CQs.
2323322810Shselasky	 */
2324322810Shselasky	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2325322810Shselasky		mcq->comp(mcq);
2326322810Shselasky	}
2327322810Shselasky	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2328322810Shselasky}
2329322810Shselasky
2330322810Shselaskystatic void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
2331322810Shselasky			  enum mlx5_dev_event event, unsigned long param)
2332322810Shselasky{
2333322810Shselasky	struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
2334322810Shselasky	struct ib_event ibev;
2335331769Shselasky	bool fatal = false;
2336322810Shselasky	u8 port = 0;
2337322810Shselasky
2338322810Shselasky	switch (event) {
2339322810Shselasky	case MLX5_DEV_EVENT_SYS_ERROR:
2340322810Shselasky		ibev.event = IB_EVENT_DEVICE_FATAL;
2341322810Shselasky		mlx5_ib_handle_internal_error(ibdev);
2342331769Shselasky		fatal = true;
2343322810Shselasky		break;
2344322810Shselasky
2345322810Shselasky	case MLX5_DEV_EVENT_PORT_UP:
2346322810Shselasky	case MLX5_DEV_EVENT_PORT_DOWN:
2347322810Shselasky	case MLX5_DEV_EVENT_PORT_INITIALIZED:
2348322810Shselasky		port = (u8)param;
2349331769Shselasky
2350331769Shselasky		/* In RoCE, port up/down events are handled in
2351331769Shselasky		 * mlx5_netdev_event().
2352331769Shselasky		 */
2353331769Shselasky		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2354331769Shselasky			IB_LINK_LAYER_ETHERNET)
2355331769Shselasky			return;
2356331769Shselasky
2357331769Shselasky		ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
2358331769Shselasky			     IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2359322810Shselasky		break;
2360322810Shselasky
2361322810Shselasky	case MLX5_DEV_EVENT_LID_CHANGE:
2362322810Shselasky		ibev.event = IB_EVENT_LID_CHANGE;
2363322810Shselasky		port = (u8)param;
2364322810Shselasky		break;
2365322810Shselasky
2366322810Shselasky	case MLX5_DEV_EVENT_PKEY_CHANGE:
2367322810Shselasky		ibev.event = IB_EVENT_PKEY_CHANGE;
2368322810Shselasky		port = (u8)param;
2369331769Shselasky
2370331769Shselasky		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
2371322810Shselasky		break;
2372322810Shselasky
2373322810Shselasky	case MLX5_DEV_EVENT_GUID_CHANGE:
2374322810Shselasky		ibev.event = IB_EVENT_GID_CHANGE;
2375322810Shselasky		port = (u8)param;
2376322810Shselasky		break;
2377322810Shselasky
2378322810Shselasky	case MLX5_DEV_EVENT_CLIENT_REREG:
2379322810Shselasky		ibev.event = IB_EVENT_CLIENT_REREGISTER;
2380322810Shselasky		port = (u8)param;
2381322810Shselasky		break;
2382322810Shselasky
2383322810Shselasky	default:
2384322810Shselasky		break;
2385322810Shselasky	}
2386322810Shselasky
2387322810Shselasky	ibev.device	      = &ibdev->ib_dev;
2388322810Shselasky	ibev.element.port_num = port;
2389322810Shselasky
2390331769Shselasky	if (port < 1 || port > ibdev->num_ports) {
2391322810Shselasky		mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
2392322810Shselasky		return;
2393322810Shselasky	}
2394322810Shselasky
2395322810Shselasky	if (ibdev->ib_active)
2396322810Shselasky		ib_dispatch_event(&ibev);
2397331769Shselasky
2398331769Shselasky	if (fatal)
2399331769Shselasky		ibdev->ib_active = false;
2400322810Shselasky}
2401322810Shselasky
2402322810Shselaskystatic void get_ext_port_caps(struct mlx5_ib_dev *dev)
2403322810Shselasky{
2404322810Shselasky	int port;
2405322810Shselasky
2406322810Shselasky	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
2407322810Shselasky		mlx5_query_ext_port_caps(dev, port);
2408322810Shselasky}
2409322810Shselasky
2410322810Shselaskystatic int get_port_caps(struct mlx5_ib_dev *dev)
2411322810Shselasky{
2412322810Shselasky	struct ib_device_attr *dprops = NULL;
2413322810Shselasky	struct ib_port_attr *pprops = NULL;
2414322810Shselasky	int err = -ENOMEM;
2415322810Shselasky	int port;
2416331769Shselasky	struct ib_udata uhw = {.inlen = 0, .outlen = 0};
2417322810Shselasky
2418322810Shselasky	pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
2419322810Shselasky	if (!pprops)
2420322810Shselasky		goto out;
2421322810Shselasky
2422322810Shselasky	dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
2423322810Shselasky	if (!dprops)
2424322810Shselasky		goto out;
2425322810Shselasky
2426331769Shselasky	err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
2427322810Shselasky	if (err) {
2428322810Shselasky		mlx5_ib_warn(dev, "query_device failed %d\n", err);
2429322810Shselasky		goto out;
2430322810Shselasky	}
2431322810Shselasky
2432322810Shselasky	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
2433322810Shselasky		err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
2434322810Shselasky		if (err) {
2435322810Shselasky			mlx5_ib_warn(dev, "query_port %d failed %d\n",
2436322810Shselasky				     port, err);
2437322810Shselasky			break;
2438322810Shselasky		}
2439331769Shselasky		dev->mdev->port_caps[port - 1].pkey_table_len =
2440331769Shselasky						dprops->max_pkeys;
2441331769Shselasky		dev->mdev->port_caps[port - 1].gid_table_len =
2442331769Shselasky						pprops->gid_tbl_len;
2443322810Shselasky		mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
2444322810Shselasky			    dprops->max_pkeys, pprops->gid_tbl_len);
2445322810Shselasky	}
2446322810Shselasky
2447322810Shselaskyout:
2448322810Shselasky	kfree(pprops);
2449322810Shselasky	kfree(dprops);
2450322810Shselasky
2451322810Shselasky	return err;
2452322810Shselasky}
2453322810Shselasky
2454322810Shselaskystatic void destroy_umrc_res(struct mlx5_ib_dev *dev)
2455322810Shselasky{
2456322810Shselasky	int err;
2457322810Shselasky
2458322810Shselasky	err = mlx5_mr_cache_cleanup(dev);
2459322810Shselasky	if (err)
2460322810Shselasky		mlx5_ib_warn(dev, "mr cache cleanup failed\n");
2461322810Shselasky
2462331769Shselasky	mlx5_ib_destroy_qp(dev->umrc.qp);
2463331769Shselasky	ib_free_cq(dev->umrc.cq);
2464322810Shselasky	ib_dealloc_pd(dev->umrc.pd);
2465322810Shselasky}
2466322810Shselasky
2467322810Shselaskyenum {
2468322810Shselasky	MAX_UMR_WR = 128,
2469322810Shselasky};
2470322810Shselasky
2471322810Shselaskystatic int create_umr_res(struct mlx5_ib_dev *dev)
2472322810Shselasky{
2473331769Shselasky	struct ib_qp_init_attr *init_attr = NULL;
2474331769Shselasky	struct ib_qp_attr *attr = NULL;
2475322810Shselasky	struct ib_pd *pd;
2476331769Shselasky	struct ib_cq *cq;
2477331769Shselasky	struct ib_qp *qp;
2478322810Shselasky	int ret;
2479322810Shselasky
2480331769Shselasky	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
2481331769Shselasky	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
2482331769Shselasky	if (!attr || !init_attr) {
2483331769Shselasky		ret = -ENOMEM;
2484331769Shselasky		goto error_0;
2485331769Shselasky	}
2486331769Shselasky
2487331769Shselasky	pd = ib_alloc_pd(&dev->ib_dev, 0);
2488322810Shselasky	if (IS_ERR(pd)) {
2489322810Shselasky		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
2490322810Shselasky		ret = PTR_ERR(pd);
2491322810Shselasky		goto error_0;
2492322810Shselasky	}
2493322810Shselasky
2494331769Shselasky	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
2495331769Shselasky	if (IS_ERR(cq)) {
2496331769Shselasky		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
2497331769Shselasky		ret = PTR_ERR(cq);
2498331769Shselasky		goto error_2;
2499322810Shselasky	}
2500322810Shselasky
2501331769Shselasky	init_attr->send_cq = cq;
2502331769Shselasky	init_attr->recv_cq = cq;
2503331769Shselasky	init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
2504331769Shselasky	init_attr->cap.max_send_wr = MAX_UMR_WR;
2505331769Shselasky	init_attr->cap.max_send_sge = 1;
2506331769Shselasky	init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
2507331769Shselasky	init_attr->port_num = 1;
2508331769Shselasky	qp = mlx5_ib_create_qp(pd, init_attr, NULL);
2509331769Shselasky	if (IS_ERR(qp)) {
2510331769Shselasky		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
2511331769Shselasky		ret = PTR_ERR(qp);
2512331769Shselasky		goto error_3;
2513331769Shselasky	}
2514331769Shselasky	qp->device     = &dev->ib_dev;
2515331769Shselasky	qp->real_qp    = qp;
2516331769Shselasky	qp->uobject    = NULL;
2517331769Shselasky	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
2518331769Shselasky
2519331769Shselasky	attr->qp_state = IB_QPS_INIT;
2520331769Shselasky	attr->port_num = 1;
2521331769Shselasky	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
2522331769Shselasky				IB_QP_PORT, NULL);
2523331769Shselasky	if (ret) {
2524331769Shselasky		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
2525331769Shselasky		goto error_4;
2526331769Shselasky	}
2527331769Shselasky
2528331769Shselasky	memset(attr, 0, sizeof(*attr));
2529331769Shselasky	attr->qp_state = IB_QPS_RTR;
2530331769Shselasky	attr->path_mtu = IB_MTU_256;
2531331769Shselasky
2532331769Shselasky	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2533331769Shselasky	if (ret) {
2534331769Shselasky		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
2535331769Shselasky		goto error_4;
2536331769Shselasky	}
2537331769Shselasky
2538331769Shselasky	memset(attr, 0, sizeof(*attr));
2539331769Shselasky	attr->qp_state = IB_QPS_RTS;
2540331769Shselasky	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2541331769Shselasky	if (ret) {
2542331769Shselasky		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
2543331769Shselasky		goto error_4;
2544331769Shselasky	}
2545331769Shselasky
2546331769Shselasky	dev->umrc.qp = qp;
2547331769Shselasky	dev->umrc.cq = cq;
2548322810Shselasky	dev->umrc.pd = pd;
2549322810Shselasky
2550331769Shselasky	sema_init(&dev->umrc.sem, MAX_UMR_WR);
2551322810Shselasky	ret = mlx5_mr_cache_init(dev);
2552322810Shselasky	if (ret) {
2553322810Shselasky		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
2554322810Shselasky		goto error_4;
2555322810Shselasky	}
2556322810Shselasky
2557331769Shselasky	kfree(attr);
2558331769Shselasky	kfree(init_attr);
2559331769Shselasky
2560322810Shselasky	return 0;
2561322810Shselasky
2562322810Shselaskyerror_4:
2563331769Shselasky	mlx5_ib_destroy_qp(qp);
2564331769Shselasky
2565331769Shselaskyerror_3:
2566331769Shselasky	ib_free_cq(cq);
2567331769Shselasky
2568331769Shselaskyerror_2:
2569322810Shselasky	ib_dealloc_pd(pd);
2570331769Shselasky
2571322810Shselaskyerror_0:
2572331769Shselasky	kfree(attr);
2573331769Shselasky	kfree(init_attr);
2574322810Shselasky	return ret;
2575322810Shselasky}
2576322810Shselasky
2577322810Shselaskystatic int create_dev_resources(struct mlx5_ib_resources *devr)
2578322810Shselasky{
2579322810Shselasky	struct ib_srq_init_attr attr;
2580322810Shselasky	struct mlx5_ib_dev *dev;
2581331769Shselasky	struct ib_cq_init_attr cq_attr = {.cqe = 1};
2582331769Shselasky	int port;
2583322810Shselasky	int ret = 0;
2584322810Shselasky
2585322810Shselasky	dev = container_of(devr, struct mlx5_ib_dev, devr);
2586322810Shselasky
2587331769Shselasky	mutex_init(&devr->mutex);
2588331769Shselasky
2589322810Shselasky	devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
2590322810Shselasky	if (IS_ERR(devr->p0)) {
2591322810Shselasky		ret = PTR_ERR(devr->p0);
2592322810Shselasky		goto error0;
2593322810Shselasky	}
2594322810Shselasky	devr->p0->device  = &dev->ib_dev;
2595322810Shselasky	devr->p0->uobject = NULL;
2596322810Shselasky	atomic_set(&devr->p0->usecnt, 0);
2597322810Shselasky
2598322810Shselasky	devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
2599322810Shselasky	if (IS_ERR(devr->c0)) {
2600322810Shselasky		ret = PTR_ERR(devr->c0);
2601322810Shselasky		goto error1;
2602322810Shselasky	}
2603322810Shselasky	devr->c0->device        = &dev->ib_dev;
2604322810Shselasky	devr->c0->uobject       = NULL;
2605322810Shselasky	devr->c0->comp_handler  = NULL;
2606322810Shselasky	devr->c0->event_handler = NULL;
2607322810Shselasky	devr->c0->cq_context    = NULL;
2608322810Shselasky	atomic_set(&devr->c0->usecnt, 0);
2609322810Shselasky
2610322810Shselasky	devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2611322810Shselasky	if (IS_ERR(devr->x0)) {
2612322810Shselasky		ret = PTR_ERR(devr->x0);
2613322810Shselasky		goto error2;
2614322810Shselasky	}
2615322810Shselasky	devr->x0->device = &dev->ib_dev;
2616322810Shselasky	devr->x0->inode = NULL;
2617322810Shselasky	atomic_set(&devr->x0->usecnt, 0);
2618322810Shselasky	mutex_init(&devr->x0->tgt_qp_mutex);
2619322810Shselasky	INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
2620322810Shselasky
2621322810Shselasky	devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2622322810Shselasky	if (IS_ERR(devr->x1)) {
2623322810Shselasky		ret = PTR_ERR(devr->x1);
2624322810Shselasky		goto error3;
2625322810Shselasky	}
2626322810Shselasky	devr->x1->device = &dev->ib_dev;
2627322810Shselasky	devr->x1->inode = NULL;
2628322810Shselasky	atomic_set(&devr->x1->usecnt, 0);
2629322810Shselasky	mutex_init(&devr->x1->tgt_qp_mutex);
2630322810Shselasky	INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
2631322810Shselasky
2632322810Shselasky	memset(&attr, 0, sizeof(attr));
2633322810Shselasky	attr.attr.max_sge = 1;
2634322810Shselasky	attr.attr.max_wr = 1;
2635322810Shselasky	attr.srq_type = IB_SRQT_XRC;
2636322810Shselasky	attr.ext.xrc.cq = devr->c0;
2637322810Shselasky	attr.ext.xrc.xrcd = devr->x0;
2638322810Shselasky
2639322810Shselasky	devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2640322810Shselasky	if (IS_ERR(devr->s0)) {
2641322810Shselasky		ret = PTR_ERR(devr->s0);
2642322810Shselasky		goto error4;
2643322810Shselasky	}
2644322810Shselasky	devr->s0->device	= &dev->ib_dev;
2645322810Shselasky	devr->s0->pd		= devr->p0;
2646322810Shselasky	devr->s0->uobject       = NULL;
2647322810Shselasky	devr->s0->event_handler = NULL;
2648322810Shselasky	devr->s0->srq_context   = NULL;
2649322810Shselasky	devr->s0->srq_type      = IB_SRQT_XRC;
2650331769Shselasky	devr->s0->ext.xrc.xrcd	= devr->x0;
2651322810Shselasky	devr->s0->ext.xrc.cq	= devr->c0;
2652322810Shselasky	atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
2653322810Shselasky	atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
2654322810Shselasky	atomic_inc(&devr->p0->usecnt);
2655322810Shselasky	atomic_set(&devr->s0->usecnt, 0);
2656322810Shselasky
2657322810Shselasky	memset(&attr, 0, sizeof(attr));
2658322810Shselasky	attr.attr.max_sge = 1;
2659322810Shselasky	attr.attr.max_wr = 1;
2660322810Shselasky	attr.srq_type = IB_SRQT_BASIC;
2661322810Shselasky	devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2662322810Shselasky	if (IS_ERR(devr->s1)) {
2663322810Shselasky		ret = PTR_ERR(devr->s1);
2664322810Shselasky		goto error5;
2665322810Shselasky	}
2666322810Shselasky	devr->s1->device	= &dev->ib_dev;
2667322810Shselasky	devr->s1->pd		= devr->p0;
2668322810Shselasky	devr->s1->uobject       = NULL;
2669322810Shselasky	devr->s1->event_handler = NULL;
2670322810Shselasky	devr->s1->srq_context   = NULL;
2671322810Shselasky	devr->s1->srq_type      = IB_SRQT_BASIC;
2672322810Shselasky	devr->s1->ext.xrc.cq	= devr->c0;
2673322810Shselasky	atomic_inc(&devr->p0->usecnt);
2674331769Shselasky	atomic_set(&devr->s0->usecnt, 0);
2675322810Shselasky
2676331769Shselasky	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
2677331769Shselasky		INIT_WORK(&devr->ports[port].pkey_change_work,
2678331769Shselasky			  pkey_change_handler);
2679331769Shselasky		devr->ports[port].devr = devr;
2680331769Shselasky	}
2681331769Shselasky
2682322810Shselasky	return 0;
2683322810Shselasky
2684322810Shselaskyerror5:
2685322810Shselasky	mlx5_ib_destroy_srq(devr->s0);
2686322810Shselaskyerror4:
2687322810Shselasky	mlx5_ib_dealloc_xrcd(devr->x1);
2688322810Shselaskyerror3:
2689322810Shselasky	mlx5_ib_dealloc_xrcd(devr->x0);
2690322810Shselaskyerror2:
2691322810Shselasky	mlx5_ib_destroy_cq(devr->c0);
2692322810Shselaskyerror1:
2693322810Shselasky	mlx5_ib_dealloc_pd(devr->p0);
2694322810Shselaskyerror0:
2695322810Shselasky	return ret;
2696322810Shselasky}
2697322810Shselasky
2698322810Shselaskystatic void destroy_dev_resources(struct mlx5_ib_resources *devr)
2699322810Shselasky{
2700331769Shselasky	struct mlx5_ib_dev *dev =
2701331769Shselasky		container_of(devr, struct mlx5_ib_dev, devr);
2702331769Shselasky	int port;
2703331769Shselasky
2704322810Shselasky	mlx5_ib_destroy_srq(devr->s1);
2705322810Shselasky	mlx5_ib_destroy_srq(devr->s0);
2706322810Shselasky	mlx5_ib_dealloc_xrcd(devr->x0);
2707322810Shselasky	mlx5_ib_dealloc_xrcd(devr->x1);
2708322810Shselasky	mlx5_ib_destroy_cq(devr->c0);
2709322810Shselasky	mlx5_ib_dealloc_pd(devr->p0);
2710331769Shselasky
2711331769Shselasky	/* Make sure no change P_Key work items are still executing */
2712331769Shselasky	for (port = 0; port < dev->num_ports; ++port)
2713331769Shselasky		cancel_work_sync(&devr->ports[port].pkey_change_work);
2714322810Shselasky}
2715322810Shselasky
2716325604Shselaskystatic u32 get_core_cap_flags(struct ib_device *ibdev)
2717325604Shselasky{
2718325604Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2719325604Shselasky	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2720325604Shselasky	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2721325604Shselasky	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2722325604Shselasky	u32 ret = 0;
2723325604Shselasky
2724325604Shselasky	if (ll == IB_LINK_LAYER_INFINIBAND)
2725325604Shselasky		return RDMA_CORE_PORT_IBA_IB;
2726325604Shselasky
2727325604Shselasky	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
2728331769Shselasky		return 0;
2729325604Shselasky
2730325604Shselasky	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
2731331769Shselasky		return 0;
2732325604Shselasky
2733325604Shselasky	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
2734325604Shselasky		ret |= RDMA_CORE_PORT_IBA_ROCE;
2735325604Shselasky
2736325604Shselasky	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
2737325604Shselasky		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2738325604Shselasky
2739325604Shselasky	return ret;
2740325604Shselasky}
2741325604Shselasky
2742325604Shselaskystatic int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
2743325604Shselasky			       struct ib_port_immutable *immutable)
2744325604Shselasky{
2745325604Shselasky	struct ib_port_attr attr;
2746325604Shselasky	int err;
2747325604Shselasky
2748331769Shselasky	err = mlx5_ib_query_port(ibdev, port_num, &attr);
2749325604Shselasky	if (err)
2750325604Shselasky		return err;
2751325604Shselasky
2752325604Shselasky	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2753325604Shselasky	immutable->gid_tbl_len = attr.gid_tbl_len;
2754325604Shselasky	immutable->core_cap_flags = get_core_cap_flags(ibdev);
2755331769Shselasky	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2756325604Shselasky
2757325604Shselasky	return 0;
2758325604Shselasky}
2759325604Shselasky
2760331769Shselaskystatic void get_dev_fw_str(struct ib_device *ibdev, char *str,
2761331769Shselasky			   size_t str_len)
2762322810Shselasky{
2763331769Shselasky	struct mlx5_ib_dev *dev =
2764331769Shselasky		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
2765331769Shselasky	snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
2766331769Shselasky		       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
2767331769Shselasky}
2768322810Shselasky
2769331769Shselaskystatic int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
2770331769Shselasky{
2771331769Shselasky	return 0;
2772331769Shselasky}
2773322810Shselasky
2774331769Shselaskystatic void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
2775331769Shselasky{
2776331769Shselasky}
2777322810Shselasky
2778331769Shselaskystatic void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
2779331769Shselasky{
2780331769Shselasky	if (dev->roce.nb.notifier_call) {
2781331769Shselasky		unregister_netdevice_notifier(&dev->roce.nb);
2782331769Shselasky		dev->roce.nb.notifier_call = NULL;
2783322810Shselasky	}
2784322810Shselasky}
2785322810Shselasky
2786331769Shselaskystatic int mlx5_enable_roce(struct mlx5_ib_dev *dev)
2787322810Shselasky{
2788331769Shselasky	VNET_ITERATOR_DECL(vnet_iter);
2789331769Shselasky	struct net_device *idev;
2790322810Shselasky	int err;
2791322810Shselasky
2792331769Shselasky	/* Check if mlx5en net device already exists */
2793331769Shselasky	VNET_LIST_RLOCK();
2794331769Shselasky	VNET_FOREACH(vnet_iter) {
2795331769Shselasky		IFNET_RLOCK();
2796331769Shselasky		CURVNET_SET_QUIET(vnet_iter);
2797331769Shselasky		TAILQ_FOREACH(idev, &V_ifnet, if_link) {
2798331769Shselasky			/* check if network interface belongs to mlx5en */
2799331769Shselasky			if (!mlx5_netdev_match(idev, dev->mdev, "mce"))
2800331769Shselasky				continue;
2801331769Shselasky			write_lock(&dev->roce.netdev_lock);
2802331769Shselasky			dev->roce.netdev = idev;
2803331769Shselasky			write_unlock(&dev->roce.netdev_lock);
2804331769Shselasky		}
2805331769Shselasky		CURVNET_RESTORE();
2806331769Shselasky		IFNET_RUNLOCK();
2807331769Shselasky	}
2808331769Shselasky	VNET_LIST_RUNLOCK();
2809322810Shselasky
2810331769Shselasky	dev->roce.nb.notifier_call = mlx5_netdev_event;
2811331769Shselasky	err = register_netdevice_notifier(&dev->roce.nb);
2812322810Shselasky	if (err) {
2813331769Shselasky		dev->roce.nb.notifier_call = NULL;
2814331769Shselasky		return err;
2815322810Shselasky	}
2816322810Shselasky
2817331769Shselasky	err = mlx5_nic_vport_enable_roce(dev->mdev);
2818331769Shselasky	if (err)
2819331769Shselasky		goto err_unregister_netdevice_notifier;
2820322810Shselasky
2821331769Shselasky	err = mlx5_roce_lag_init(dev);
2822331769Shselasky	if (err)
2823331769Shselasky		goto err_disable_roce;
2824322810Shselasky
2825331769Shselasky	return 0;
2826322810Shselasky
2827331769Shselaskyerr_disable_roce:
2828331769Shselasky	mlx5_nic_vport_disable_roce(dev->mdev);
2829322810Shselasky
2830331769Shselaskyerr_unregister_netdevice_notifier:
2831331769Shselasky	mlx5_remove_roce_notifier(dev);
2832331769Shselasky	return err;
2833322810Shselasky}
2834322810Shselasky
2835331769Shselaskystatic void mlx5_disable_roce(struct mlx5_ib_dev *dev)
2836322810Shselasky{
2837331769Shselasky	mlx5_roce_lag_cleanup(dev);
2838331769Shselasky	mlx5_nic_vport_disable_roce(dev->mdev);
2839322810Shselasky}
2840322810Shselasky
2841322810Shselaskystatic void mlx5_ib_dealloc_q_port_counter(struct mlx5_ib_dev *dev, u8 port_num)
2842322810Shselasky{
2843322810Shselasky	mlx5_vport_dealloc_q_counter(dev->mdev,
2844322810Shselasky				     MLX5_INTERFACE_PROTOCOL_IB,
2845322810Shselasky				     dev->port[port_num].q_cnt_id);
2846322810Shselasky	dev->port[port_num].q_cnt_id = 0;
2847322810Shselasky}
2848322810Shselasky
2849322810Shselaskystatic void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
2850322810Shselasky{
2851322810Shselasky	unsigned int i;
2852322810Shselasky
2853322810Shselasky	for (i = 0; i < dev->num_ports; i++)
2854322810Shselasky		mlx5_ib_dealloc_q_port_counter(dev, i);
2855322810Shselasky}
2856322810Shselasky
2857322810Shselaskystatic int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
2858322810Shselasky{
2859322810Shselasky	int i;
2860322810Shselasky	int ret;
2861322810Shselasky
2862322810Shselasky	for (i = 0; i < dev->num_ports; i++) {
2863322810Shselasky		ret = mlx5_vport_alloc_q_counter(dev->mdev,
2864322810Shselasky						 MLX5_INTERFACE_PROTOCOL_IB,
2865322810Shselasky						 &dev->port[i].q_cnt_id);
2866322810Shselasky		if (ret) {
2867322810Shselasky			mlx5_ib_warn(dev,
2868331769Shselasky				     "couldn't allocate queue counter for port %d, err %d\n",
2869331769Shselasky				     i + 1, ret);
2870322810Shselasky			goto dealloc_counters;
2871322810Shselasky		}
2872322810Shselasky	}
2873322810Shselasky
2874322810Shselasky	return 0;
2875322810Shselasky
2876322810Shselaskydealloc_counters:
2877322810Shselasky	while (--i >= 0)
2878322810Shselasky		mlx5_ib_dealloc_q_port_counter(dev, i);
2879322810Shselasky
2880322810Shselasky	return ret;
2881322810Shselasky}
2882322810Shselasky
2883331769Shselaskystatic const char * const names[] = {
2884331769Shselasky	"rx_write_requests",
2885331769Shselasky	"rx_read_requests",
2886331769Shselasky	"rx_atomic_requests",
2887331769Shselasky	"out_of_buffer",
2888331769Shselasky	"out_of_sequence",
2889331769Shselasky	"duplicate_request",
2890331769Shselasky	"rnr_nak_retry_err",
2891331769Shselasky	"packet_seq_err",
2892331769Shselasky	"implied_nak_seq_err",
2893331769Shselasky	"local_ack_timeout_err",
2894322810Shselasky};
2895322810Shselasky
2896331769Shselaskystatic const size_t stats_offsets[] = {
2897331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests),
2898331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests),
2899331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests),
2900331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer),
2901331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence),
2902331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, duplicate_request),
2903331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err),
2904331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err),
2905331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err),
2906331769Shselasky	MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err),
2907322810Shselasky};
2908322810Shselasky
2909331769Shselaskystatic struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
2910331769Shselasky						    u8 port_num)
2911322810Shselasky{
2912331769Shselasky	BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets));
2913322810Shselasky
2914331769Shselasky	/* We support only per port stats */
2915331769Shselasky	if (port_num == 0)
2916331769Shselasky		return NULL;
2917322810Shselasky
2918331769Shselasky	return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names),
2919331769Shselasky					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
2920322810Shselasky}
2921322810Shselasky
2922331769Shselaskystatic int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
2923331769Shselasky				struct rdma_hw_stats *stats,
2924331769Shselasky				u8 port, int index)
2925322810Shselasky{
2926331769Shselasky	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2927322810Shselasky	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
2928322810Shselasky	void *out;
2929331769Shselasky	__be32 val;
2930322810Shselasky	int ret;
2931331769Shselasky	int i;
2932322810Shselasky
2933331769Shselasky	if (!port || !stats)
2934331769Shselasky		return -ENOSYS;
2935331769Shselasky
2936322810Shselasky	out = mlx5_vzalloc(outlen);
2937322810Shselasky	if (!out)
2938322810Shselasky		return -ENOMEM;
2939322810Shselasky
2940331769Shselasky	ret = mlx5_vport_query_q_counter(dev->mdev,
2941331769Shselasky					dev->port[port - 1].q_cnt_id, 0,
2942331769Shselasky					out, outlen);
2943322810Shselasky	if (ret)
2944322810Shselasky		goto free;
2945322810Shselasky
2946331769Shselasky	for (i = 0; i < ARRAY_SIZE(names); i++) {
2947331769Shselasky		val = *(__be32 *)(out + stats_offsets[i]);
2948331769Shselasky		stats->value[i] = (u64)be32_to_cpu(val);
2949331769Shselasky	}
2950322810Shselaskyfree:
2951331769Shselasky	kvfree(out);
2952331769Shselasky	return ARRAY_SIZE(names);
2953322810Shselasky}
2954322810Shselasky
2955322810Shselaskystatic void *mlx5_ib_add(struct mlx5_core_dev *mdev)
2956322810Shselasky{
2957322810Shselasky	struct mlx5_ib_dev *dev;
2958331769Shselasky	enum rdma_link_layer ll;
2959331769Shselasky	int port_type_cap;
2960331769Shselasky	const char *name;
2961322810Shselasky	int err;
2962322810Shselasky	int i;
2963322810Shselasky
2964331769Shselasky	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
2965331769Shselasky	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
2966331769Shselasky
2967331769Shselasky	if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
2968331769Shselasky		return NULL;
2969331769Shselasky
2970322810Shselasky	printk_once(KERN_INFO "%s", mlx5_version);
2971322810Shselasky
2972322810Shselasky	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
2973322810Shselasky	if (!dev)
2974322810Shselasky		return NULL;
2975322810Shselasky
2976322810Shselasky	dev->mdev = mdev;
2977322810Shselasky
2978322810Shselasky	dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
2979331769Shselasky			    GFP_KERNEL);
2980322810Shselasky	if (!dev->port)
2981322810Shselasky		goto err_dealloc;
2982322810Shselasky
2983331769Shselasky	rwlock_init(&dev->roce.netdev_lock);
2984322810Shselasky	err = get_port_caps(dev);
2985322810Shselasky	if (err)
2986322810Shselasky		goto err_free_port;
2987322810Shselasky
2988322810Shselasky	if (mlx5_use_mad_ifc(dev))
2989322810Shselasky		get_ext_port_caps(dev);
2990322810Shselasky
2991322810Shselasky	MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
2992322810Shselasky
2993331769Shselasky	name = "mlx5_%d";
2994331769Shselasky
2995331769Shselasky	strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
2996322810Shselasky	dev->ib_dev.owner		= THIS_MODULE;
2997322810Shselasky	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
2998331769Shselasky	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
2999322810Shselasky	dev->num_ports		= MLX5_CAP_GEN(mdev, num_ports);
3000322810Shselasky	dev->ib_dev.phys_port_cnt     = dev->num_ports;
3001322810Shselasky	dev->ib_dev.num_comp_vectors    =
3002322810Shselasky		dev->mdev->priv.eq_table.num_comp_vectors;
3003322810Shselasky	dev->ib_dev.dma_device	= &mdev->pdev->dev;
3004322810Shselasky
3005322810Shselasky	dev->ib_dev.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION;
3006322810Shselasky	dev->ib_dev.uverbs_cmd_mask	=
3007322810Shselasky		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
3008322810Shselasky		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
3009322810Shselasky		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
3010322810Shselasky		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
3011322810Shselasky		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
3012331784Shselasky		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
3013331784Shselasky		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)		|
3014322810Shselasky		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
3015331769Shselasky		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
3016322810Shselasky		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
3017322810Shselasky		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
3018322810Shselasky		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
3019322810Shselasky		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
3020322810Shselasky		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
3021322810Shselasky		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
3022322810Shselasky		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
3023322810Shselasky		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
3024322810Shselasky		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
3025322810Shselasky		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
3026322810Shselasky		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
3027322810Shselasky		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
3028322810Shselasky		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
3029322810Shselasky		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
3030322810Shselasky		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
3031322810Shselasky		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
3032322810Shselasky		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
3033331769Shselasky	dev->ib_dev.uverbs_ex_cmd_mask =
3034331769Shselasky		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)	|
3035331769Shselasky		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)	|
3036331769Shselasky		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
3037322810Shselasky
3038322810Shselasky	dev->ib_dev.query_device	= mlx5_ib_query_device;
3039322810Shselasky	dev->ib_dev.query_port		= mlx5_ib_query_port;
3040322810Shselasky	dev->ib_dev.get_link_layer	= mlx5_ib_port_link_layer;
3041331769Shselasky	if (ll == IB_LINK_LAYER_ETHERNET)
3042331769Shselasky		dev->ib_dev.get_netdev	= mlx5_ib_get_netdev;
3043322810Shselasky	dev->ib_dev.query_gid		= mlx5_ib_query_gid;
3044331769Shselasky	dev->ib_dev.add_gid		= mlx5_ib_add_gid;
3045331769Shselasky	dev->ib_dev.del_gid		= mlx5_ib_del_gid;
3046322810Shselasky	dev->ib_dev.query_pkey		= mlx5_ib_query_pkey;
3047322810Shselasky	dev->ib_dev.modify_device	= mlx5_ib_modify_device;
3048322810Shselasky	dev->ib_dev.modify_port		= mlx5_ib_modify_port;
3049322810Shselasky	dev->ib_dev.alloc_ucontext	= mlx5_ib_alloc_ucontext;
3050322810Shselasky	dev->ib_dev.dealloc_ucontext	= mlx5_ib_dealloc_ucontext;
3051322810Shselasky	dev->ib_dev.mmap		= mlx5_ib_mmap;
3052322810Shselasky	dev->ib_dev.alloc_pd		= mlx5_ib_alloc_pd;
3053322810Shselasky	dev->ib_dev.dealloc_pd		= mlx5_ib_dealloc_pd;
3054322810Shselasky	dev->ib_dev.create_ah		= mlx5_ib_create_ah;
3055322810Shselasky	dev->ib_dev.query_ah		= mlx5_ib_query_ah;
3056322810Shselasky	dev->ib_dev.destroy_ah		= mlx5_ib_destroy_ah;
3057322810Shselasky	dev->ib_dev.create_srq		= mlx5_ib_create_srq;
3058322810Shselasky	dev->ib_dev.modify_srq		= mlx5_ib_modify_srq;
3059322810Shselasky	dev->ib_dev.query_srq		= mlx5_ib_query_srq;
3060322810Shselasky	dev->ib_dev.destroy_srq		= mlx5_ib_destroy_srq;
3061322810Shselasky	dev->ib_dev.post_srq_recv	= mlx5_ib_post_srq_recv;
3062322810Shselasky	dev->ib_dev.create_qp		= mlx5_ib_create_qp;
3063322810Shselasky	dev->ib_dev.modify_qp		= mlx5_ib_modify_qp;
3064322810Shselasky	dev->ib_dev.query_qp		= mlx5_ib_query_qp;
3065322810Shselasky	dev->ib_dev.destroy_qp		= mlx5_ib_destroy_qp;
3066322810Shselasky	dev->ib_dev.post_send		= mlx5_ib_post_send;
3067322810Shselasky	dev->ib_dev.post_recv		= mlx5_ib_post_recv;
3068322810Shselasky	dev->ib_dev.create_cq		= mlx5_ib_create_cq;
3069322810Shselasky	dev->ib_dev.modify_cq		= mlx5_ib_modify_cq;
3070322810Shselasky	dev->ib_dev.resize_cq		= mlx5_ib_resize_cq;
3071322810Shselasky	dev->ib_dev.destroy_cq		= mlx5_ib_destroy_cq;
3072322810Shselasky	dev->ib_dev.poll_cq		= mlx5_ib_poll_cq;
3073322810Shselasky	dev->ib_dev.req_notify_cq	= mlx5_ib_arm_cq;
3074322810Shselasky	dev->ib_dev.get_dma_mr		= mlx5_ib_get_dma_mr;
3075322810Shselasky	dev->ib_dev.reg_user_mr		= mlx5_ib_reg_user_mr;
3076331769Shselasky	dev->ib_dev.rereg_user_mr	= mlx5_ib_rereg_user_mr;
3077322810Shselasky	dev->ib_dev.reg_phys_mr		= mlx5_ib_reg_phys_mr;
3078322810Shselasky	dev->ib_dev.dereg_mr		= mlx5_ib_dereg_mr;
3079322810Shselasky	dev->ib_dev.attach_mcast	= mlx5_ib_mcg_attach;
3080322810Shselasky	dev->ib_dev.detach_mcast	= mlx5_ib_mcg_detach;
3081322810Shselasky	dev->ib_dev.process_mad		= mlx5_ib_process_mad;
3082331769Shselasky	dev->ib_dev.alloc_mr		= mlx5_ib_alloc_mr;
3083331769Shselasky	dev->ib_dev.map_mr_sg		= mlx5_ib_map_mr_sg;
3084331769Shselasky	dev->ib_dev.check_mr_status	= mlx5_ib_check_mr_status;
3085325604Shselasky	dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
3086331769Shselasky	dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
3087331769Shselasky	if (mlx5_core_is_pf(mdev)) {
3088331769Shselasky		dev->ib_dev.get_vf_config	= mlx5_ib_get_vf_config;
3089331769Shselasky		dev->ib_dev.set_vf_link_state	= mlx5_ib_set_vf_link_state;
3090331769Shselasky		dev->ib_dev.get_vf_stats	= mlx5_ib_get_vf_stats;
3091331769Shselasky		dev->ib_dev.set_vf_guid		= mlx5_ib_set_vf_guid;
3092331769Shselasky	}
3093322810Shselasky
3094331769Shselasky	mlx5_ib_internal_fill_odp_caps(dev);
3095331769Shselasky
3096331769Shselasky	if (MLX5_CAP_GEN(mdev, imaicl)) {
3097331769Shselasky		dev->ib_dev.alloc_mw		= mlx5_ib_alloc_mw;
3098331769Shselasky		dev->ib_dev.dealloc_mw		= mlx5_ib_dealloc_mw;
3099331769Shselasky		dev->ib_dev.uverbs_cmd_mask |=
3100331769Shselasky			(1ull << IB_USER_VERBS_CMD_ALLOC_MW)	|
3101331769Shselasky			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
3102331769Shselasky	}
3103331769Shselasky
3104331769Shselasky	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
3105331769Shselasky	    MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
3106331769Shselasky		dev->ib_dev.get_hw_stats	= mlx5_ib_get_hw_stats;
3107331769Shselasky		dev->ib_dev.alloc_hw_stats	= mlx5_ib_alloc_hw_stats;
3108331769Shselasky	}
3109331769Shselasky
3110322810Shselasky	if (MLX5_CAP_GEN(mdev, xrc)) {
3111322810Shselasky		dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
3112322810Shselasky		dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
3113322810Shselasky		dev->ib_dev.uverbs_cmd_mask |=
3114322810Shselasky			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
3115322810Shselasky			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
3116322810Shselasky	}
3117322810Shselasky
3118331769Shselasky	if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
3119331769Shselasky	    IB_LINK_LAYER_ETHERNET) {
3120331769Shselasky		dev->ib_dev.create_flow	= mlx5_ib_create_flow;
3121331769Shselasky		dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
3122331769Shselasky		dev->ib_dev.create_wq	 = mlx5_ib_create_wq;
3123331769Shselasky		dev->ib_dev.modify_wq	 = mlx5_ib_modify_wq;
3124331769Shselasky		dev->ib_dev.destroy_wq	 = mlx5_ib_destroy_wq;
3125331769Shselasky		dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
3126331769Shselasky		dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
3127331769Shselasky		dev->ib_dev.uverbs_ex_cmd_mask |=
3128331769Shselasky			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
3129331769Shselasky			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
3130331769Shselasky			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
3131331769Shselasky			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
3132331769Shselasky			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
3133331769Shselasky			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
3134331769Shselasky			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
3135331769Shselasky	}
3136322810Shselasky	err = init_node_data(dev);
3137322810Shselasky	if (err)
3138331769Shselasky		goto err_free_port;
3139322810Shselasky
3140331769Shselasky	mutex_init(&dev->flow_db.lock);
3141322810Shselasky	mutex_init(&dev->cap_mask_mutex);
3142322810Shselasky	INIT_LIST_HEAD(&dev->qp_list);
3143322810Shselasky	spin_lock_init(&dev->reset_flow_resource_lock);
3144322810Shselasky
3145331769Shselasky	if (ll == IB_LINK_LAYER_ETHERNET) {
3146331769Shselasky		err = mlx5_enable_roce(dev);
3147331769Shselasky		if (err)
3148331769Shselasky			goto err_free_port;
3149331769Shselasky	}
3150331769Shselasky
3151322810Shselasky	err = create_dev_resources(&dev->devr);
3152322810Shselasky	if (err)
3153322810Shselasky		goto err_disable_roce;
3154322810Shselasky
3155331769Shselasky	err = mlx5_ib_odp_init_one(dev);
3156331769Shselasky	if (err)
3157331769Shselasky		goto err_rsrc;
3158322810Shselasky
3159322810Shselasky	err = mlx5_ib_alloc_q_counters(dev);
3160322810Shselasky	if (err)
3161322810Shselasky		goto err_odp;
3162322810Shselasky
3163322810Shselasky	err = ib_register_device(&dev->ib_dev, NULL);
3164322810Shselasky	if (err)
3165322810Shselasky		goto err_q_cnt;
3166322810Shselasky
3167322810Shselasky	err = create_umr_res(dev);
3168322810Shselasky	if (err)
3169322810Shselasky		goto err_dev;
3170322810Shselasky
3171322810Shselasky	for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
3172322810Shselasky		err = device_create_file(&dev->ib_dev.dev,
3173322810Shselasky					 mlx5_class_attributes[i]);
3174322810Shselasky		if (err)
3175331769Shselasky			goto err_umrc;
3176322810Shselasky	}
3177322810Shselasky
3178331808Shselasky	err = mlx5_ib_init_congestion(dev);
3179331808Shselasky	if (err)
3180331808Shselasky		goto err_umrc;
3181331808Shselasky
3182322810Shselasky	dev->ib_active = true;
3183322810Shselasky
3184322810Shselasky	return dev;
3185322810Shselasky
3186331769Shselaskyerr_umrc:
3187322810Shselasky	destroy_umrc_res(dev);
3188322810Shselasky
3189322810Shselaskyerr_dev:
3190322810Shselasky	ib_unregister_device(&dev->ib_dev);
3191322810Shselasky
3192322810Shselaskyerr_q_cnt:
3193322810Shselasky	mlx5_ib_dealloc_q_counters(dev);
3194322810Shselasky
3195322810Shselaskyerr_odp:
3196331769Shselasky	mlx5_ib_odp_remove_one(dev);
3197331769Shselasky
3198331769Shselaskyerr_rsrc:
3199322810Shselasky	destroy_dev_resources(&dev->devr);
3200322810Shselasky
3201322810Shselaskyerr_disable_roce:
3202331769Shselasky	if (ll == IB_LINK_LAYER_ETHERNET) {
3203331769Shselasky		mlx5_disable_roce(dev);
3204331769Shselasky		mlx5_remove_roce_notifier(dev);
3205331769Shselasky	}
3206331769Shselasky
3207322810Shselaskyerr_free_port:
3208322810Shselasky	kfree(dev->port);
3209322810Shselasky
3210322810Shselaskyerr_dealloc:
3211322810Shselasky	ib_dealloc_device((struct ib_device *)dev);
3212322810Shselasky
3213322810Shselasky	return NULL;
3214322810Shselasky}
3215322810Shselasky
3216322810Shselaskystatic void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
3217322810Shselasky{
3218322810Shselasky	struct mlx5_ib_dev *dev = context;
3219331769Shselasky	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
3220322810Shselasky
3221331808Shselasky	mlx5_ib_cleanup_congestion(dev);
3222331769Shselasky	mlx5_remove_roce_notifier(dev);
3223331769Shselasky	ib_unregister_device(&dev->ib_dev);
3224322810Shselasky	mlx5_ib_dealloc_q_counters(dev);
3225322810Shselasky	destroy_umrc_res(dev);
3226331769Shselasky	mlx5_ib_odp_remove_one(dev);
3227322810Shselasky	destroy_dev_resources(&dev->devr);
3228331769Shselasky	if (ll == IB_LINK_LAYER_ETHERNET)
3229331769Shselasky		mlx5_disable_roce(dev);
3230322810Shselasky	kfree(dev->port);
3231322810Shselasky	ib_dealloc_device(&dev->ib_dev);
3232322810Shselasky}
3233322810Shselasky
3234322810Shselaskystatic struct mlx5_interface mlx5_ib_interface = {
3235322810Shselasky	.add            = mlx5_ib_add,
3236322810Shselasky	.remove         = mlx5_ib_remove,
3237322810Shselasky	.event          = mlx5_ib_event,
3238322810Shselasky	.protocol	= MLX5_INTERFACE_PROTOCOL_IB,
3239322810Shselasky};
3240322810Shselasky
3241322810Shselaskystatic int __init mlx5_ib_init(void)
3242322810Shselasky{
3243322810Shselasky	int err;
3244322810Shselasky
3245322810Shselasky	if (deprecated_prof_sel != 2)
3246331769Shselasky		pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
3247322810Shselasky
3248331769Shselasky	err = mlx5_ib_odp_init();
3249331769Shselasky	if (err)
3250331769Shselasky		return err;
3251331769Shselasky
3252322810Shselasky	err = mlx5_register_interface(&mlx5_ib_interface);
3253322810Shselasky	if (err)
3254322810Shselasky		goto clean_odp;
3255322810Shselasky
3256322810Shselasky	return err;
3257322810Shselasky
3258322810Shselaskyclean_odp:
3259331769Shselasky	mlx5_ib_odp_cleanup();
3260322810Shselasky	return err;
3261322810Shselasky}
3262322810Shselasky
3263322810Shselaskystatic void __exit mlx5_ib_cleanup(void)
3264322810Shselasky{
3265322810Shselasky	mlx5_unregister_interface(&mlx5_ib_interface);
3266331769Shselasky	mlx5_ib_odp_cleanup();
3267322810Shselasky}
3268322810Shselasky
3269322810Shselaskymodule_init_order(mlx5_ib_init, SI_ORDER_THIRD);
3270322810Shselaskymodule_exit_order(mlx5_ib_cleanup, SI_ORDER_THIRD);
3271