mlx5_en_main.c revision 347813
1238423Sjhb/*-
2238423Sjhb * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
3283927Sjhb *
4238423Sjhb * Redistribution and use in source and binary forms, with or without
5238423Sjhb * modification, are permitted provided that the following conditions
6238423Sjhb * are met:
7238423Sjhb * 1. Redistributions of source code must retain the above copyright
8238423Sjhb *    notice, this list of conditions and the following disclaimer.
9238423Sjhb * 2. Redistributions in binary form must reproduce the above copyright
10238423Sjhb *    notice, this list of conditions and the following disclaimer in the
11238423Sjhb *    documentation and/or other materials provided with the distribution.
12238423Sjhb *
13238423Sjhb * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14238423Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15238423Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16238423Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17238423Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18238423Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19238423Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20238423Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21238423Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22238423Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23238423Sjhb * SUCH DAMAGE.
24238423Sjhb *
25238423Sjhb * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 347813 2019-05-16 17:23:36Z hselasky $
26238423Sjhb */
27238423Sjhb
28238423Sjhb#include "en.h"
29238423Sjhb
30238423Sjhb#include <sys/sockio.h>
31238423Sjhb#include <machine/atomic.h>
32238423Sjhb
33238423Sjhb#ifndef ETH_DRIVER_VERSION
34238423Sjhb#define	ETH_DRIVER_VERSION	"3.5.0"
35238423Sjhb#endif
36238423Sjhb#define DRIVER_RELDATE	"November 2018"
37238423Sjhb
38238423Sjhbstatic const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver "
39238423Sjhb	ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
40238423Sjhb
41238423Sjhbstatic int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
42238423Sjhb
43238423Sjhbstruct mlx5e_channel_param {
44238423Sjhb	struct mlx5e_rq_param rq;
45238423Sjhb	struct mlx5e_sq_param sq;
46238423Sjhb	struct mlx5e_cq_param rx_cq;
47238423Sjhb	struct mlx5e_cq_param tx_cq;
48238423Sjhb};
49238423Sjhb
50238423Sjhbstatic const struct {
51238423Sjhb	u32	subtype;
52238423Sjhb	u64	baudrate;
53238423Sjhb}	mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
54238423Sjhb
55238423Sjhb	[MLX5E_1000BASE_CX_SGMII] = {
56238423Sjhb		.subtype = IFM_1000_CX_SGMII,
57238423Sjhb		.baudrate = IF_Mbps(1000ULL),
58238423Sjhb	},
59238423Sjhb	[MLX5E_1000BASE_KX] = {
60238423Sjhb		.subtype = IFM_1000_KX,
61238423Sjhb		.baudrate = IF_Mbps(1000ULL),
62238423Sjhb	},
63238423Sjhb	[MLX5E_10GBASE_CX4] = {
64259960Sjhb		.subtype = IFM_10G_CX4,
65259960Sjhb		.baudrate = IF_Gbps(10ULL),
66259960Sjhb	},
67238423Sjhb	[MLX5E_10GBASE_KX4] = {
68238423Sjhb		.subtype = IFM_10G_KX4,
69238423Sjhb		.baudrate = IF_Gbps(10ULL),
70238423Sjhb	},
71238423Sjhb	[MLX5E_10GBASE_KR] = {
72259960Sjhb		.subtype = IFM_10G_KR,
73238423Sjhb		.baudrate = IF_Gbps(10ULL),
74238423Sjhb	},
75238423Sjhb	[MLX5E_20GBASE_KR2] = {
76238423Sjhb		.subtype = IFM_20G_KR2,
77238423Sjhb		.baudrate = IF_Gbps(20ULL),
78238423Sjhb	},
79238423Sjhb	[MLX5E_40GBASE_CR4] = {
80238423Sjhb		.subtype = IFM_40G_CR4,
81238423Sjhb		.baudrate = IF_Gbps(40ULL),
82238423Sjhb	},
83238423Sjhb	[MLX5E_40GBASE_KR4] = {
84238423Sjhb		.subtype = IFM_40G_KR4,
85238423Sjhb		.baudrate = IF_Gbps(40ULL),
86238423Sjhb	},
87238423Sjhb	[MLX5E_56GBASE_R4] = {
88238423Sjhb		.subtype = IFM_56G_R4,
89238423Sjhb		.baudrate = IF_Gbps(56ULL),
90238423Sjhb	},
91238423Sjhb	[MLX5E_10GBASE_CR] = {
92238423Sjhb		.subtype = IFM_10G_CR1,
93238423Sjhb		.baudrate = IF_Gbps(10ULL),
94238423Sjhb	},
95238423Sjhb	[MLX5E_10GBASE_SR] = {
96238423Sjhb		.subtype = IFM_10G_SR,
97238423Sjhb		.baudrate = IF_Gbps(10ULL),
98238423Sjhb	},
99238423Sjhb	[MLX5E_10GBASE_ER] = {
100238423Sjhb		.subtype = IFM_10G_ER,
101238423Sjhb		.baudrate = IF_Gbps(10ULL),
102238423Sjhb	},
103238423Sjhb	[MLX5E_40GBASE_SR4] = {
104238423Sjhb		.subtype = IFM_40G_SR4,
105238423Sjhb		.baudrate = IF_Gbps(40ULL),
106238423Sjhb	},
107238423Sjhb	[MLX5E_40GBASE_LR4] = {
108238423Sjhb		.subtype = IFM_40G_LR4,
109238423Sjhb		.baudrate = IF_Gbps(40ULL),
110238423Sjhb	},
111238423Sjhb	[MLX5E_100GBASE_CR4] = {
112238423Sjhb		.subtype = IFM_100G_CR4,
113238423Sjhb		.baudrate = IF_Gbps(100ULL),
114238423Sjhb	},
115238423Sjhb	[MLX5E_100GBASE_SR4] = {
116238423Sjhb		.subtype = IFM_100G_SR4,
117238423Sjhb		.baudrate = IF_Gbps(100ULL),
118238423Sjhb	},
119238423Sjhb	[MLX5E_100GBASE_KR4] = {
120238423Sjhb		.subtype = IFM_100G_KR4,
121238423Sjhb		.baudrate = IF_Gbps(100ULL),
122238423Sjhb	},
123238423Sjhb	[MLX5E_100GBASE_LR4] = {
124238423Sjhb		.subtype = IFM_100G_LR4,
125238423Sjhb		.baudrate = IF_Gbps(100ULL),
126238423Sjhb	},
127238423Sjhb	[MLX5E_100BASE_TX] = {
128238423Sjhb		.subtype = IFM_100_TX,
129238423Sjhb		.baudrate = IF_Mbps(100ULL),
130238423Sjhb	},
131238423Sjhb	[MLX5E_1000BASE_T] = {
132238423Sjhb		.subtype = IFM_1000_T,
133238423Sjhb		.baudrate = IF_Mbps(1000ULL),
134238423Sjhb	},
135238423Sjhb	[MLX5E_10GBASE_T] = {
136238423Sjhb		.subtype = IFM_10G_T,
137238423Sjhb		.baudrate = IF_Gbps(10ULL),
138238423Sjhb	},
139238423Sjhb	[MLX5E_25GBASE_CR] = {
140238423Sjhb		.subtype = IFM_25G_CR,
141238423Sjhb		.baudrate = IF_Gbps(25ULL),
142238423Sjhb	},
143238423Sjhb	[MLX5E_25GBASE_KR] = {
144238423Sjhb		.subtype = IFM_25G_KR,
145238423Sjhb		.baudrate = IF_Gbps(25ULL),
146238423Sjhb	},
147238423Sjhb	[MLX5E_25GBASE_SR] = {
148238423Sjhb		.subtype = IFM_25G_SR,
149238423Sjhb		.baudrate = IF_Gbps(25ULL),
150238423Sjhb	},
151238423Sjhb	[MLX5E_50GBASE_CR2] = {
152238423Sjhb		.subtype = IFM_50G_CR2,
153238423Sjhb		.baudrate = IF_Gbps(50ULL),
154238423Sjhb	},
155238423Sjhb	[MLX5E_50GBASE_KR2] = {
156238423Sjhb		.subtype = IFM_50G_KR2,
157238423Sjhb		.baudrate = IF_Gbps(50ULL),
158238423Sjhb	},
159238423Sjhb};
160238423Sjhb
161238423SjhbMALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
162238423Sjhb
163238423Sjhbstatic void
164238423Sjhbmlx5e_update_carrier(struct mlx5e_priv *priv)
165238423Sjhb{
166238423Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
167238423Sjhb	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
168238423Sjhb	u32 eth_proto_oper;
169238423Sjhb	int error;
170238423Sjhb	u8 port_state;
171238423Sjhb	u8 is_er_type;
172238423Sjhb	u8 i;
173238423Sjhb
174238423Sjhb	port_state = mlx5_query_vport_state(mdev,
175238423Sjhb	    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
176238423Sjhb
177238423Sjhb	if (port_state == VPORT_STATE_UP) {
178238423Sjhb		priv->media_status_last |= IFM_ACTIVE;
179238423Sjhb	} else {
180238423Sjhb		priv->media_status_last &= ~IFM_ACTIVE;
181238423Sjhb		priv->media_active_last = IFM_ETHER;
182238423Sjhb		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
183238423Sjhb		return;
184238423Sjhb	}
185259960Sjhb
186238423Sjhb	error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
187238423Sjhb	if (error) {
188238423Sjhb		priv->media_active_last = IFM_ETHER;
189238423Sjhb		priv->ifp->if_baudrate = 1;
190238423Sjhb		if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
191259960Sjhb		    __func__, error);
192238423Sjhb		return;
193259960Sjhb	}
194259960Sjhb	eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
195259960Sjhb
196259960Sjhb	for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
197259960Sjhb		if (mlx5e_mode_table[i].baudrate == 0)
198259960Sjhb			continue;
199259960Sjhb		if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
200259960Sjhb			u32 subtype = mlx5e_mode_table[i].subtype;
201259960Sjhb
202259960Sjhb			priv->ifp->if_baudrate =
203259960Sjhb			    mlx5e_mode_table[i].baudrate;
204259960Sjhb
205259960Sjhb			switch (subtype) {
206238423Sjhb			case IFM_10G_ER:
207238423Sjhb				error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
208259960Sjhb				if (error != 0) {
209259960Sjhb					if_printf(priv->ifp, "%s: query port pddr failed: %d\n",
210238423Sjhb					    __func__, error);
211238423Sjhb				}
212238423Sjhb				if (error != 0 || is_er_type == 0)
213238423Sjhb					subtype = IFM_10G_LR;
214238423Sjhb				break;
215238423Sjhb			case IFM_40G_LR4:
216238423Sjhb				error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
217238423Sjhb				if (error != 0) {
218238423Sjhb					if_printf(priv->ifp, "%s: query port pddr failed: %d\n",
219238423Sjhb					    __func__, error);
220238423Sjhb				}
221238423Sjhb				if (error == 0 && is_er_type != 0)
222238423Sjhb					subtype = IFM_40G_ER4;
223238423Sjhb				break;
224238423Sjhb			}
225238423Sjhb			priv->media_active_last = subtype | IFM_ETHER | IFM_FDX;
226238423Sjhb			break;
227238423Sjhb		}
228238423Sjhb	}
229238423Sjhb	if_link_state_change(priv->ifp, LINK_STATE_UP);
230238423Sjhb}
231259960Sjhb
232259960Sjhbstatic void
233238423Sjhbmlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
234238423Sjhb{
235259960Sjhb	struct mlx5e_priv *priv = dev->if_softc;
236259960Sjhb
237259960Sjhb	ifmr->ifm_status = priv->media_status_last;
238259960Sjhb	ifmr->ifm_active = priv->media_active_last |
239259960Sjhb	    (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
240238423Sjhb	    (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
241238423Sjhb
242238423Sjhb}
243238423Sjhb
244238423Sjhbstatic u32
245238423Sjhbmlx5e_find_link_mode(u32 subtype)
246238423Sjhb{
247238423Sjhb	u32 i;
248238423Sjhb	u32 link_mode = 0;
249238423Sjhb
250238423Sjhb	switch (subtype) {
251238423Sjhb	case IFM_10G_LR:
252238423Sjhb		subtype = IFM_10G_ER;
253238423Sjhb		break;
254238423Sjhb	case IFM_40G_ER4:
255238423Sjhb		subtype = IFM_40G_LR4;
256238423Sjhb		break;
257238423Sjhb	}
258238423Sjhb
259238423Sjhb	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
260238423Sjhb		if (mlx5e_mode_table[i].baudrate == 0)
261238423Sjhb			continue;
262238423Sjhb		if (mlx5e_mode_table[i].subtype == subtype)
263238423Sjhb			link_mode |= MLX5E_PROT_MASK(i);
264238423Sjhb	}
265238423Sjhb
266238423Sjhb	return (link_mode);
267238423Sjhb}
268238423Sjhb
269238423Sjhbstatic int
270238423Sjhbmlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
271238423Sjhb{
272238423Sjhb	return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
273238423Sjhb	    priv->params.rx_pauseframe_control,
274238423Sjhb	    priv->params.tx_pauseframe_control,
275238423Sjhb	    priv->params.rx_priority_flow_control,
276238423Sjhb	    priv->params.tx_priority_flow_control));
277238423Sjhb}
278238423Sjhb
279238423Sjhbstatic int
280238423Sjhbmlx5e_set_port_pfc(struct mlx5e_priv *priv)
281238423Sjhb{
282238423Sjhb	int error;
283238423Sjhb
284238423Sjhb	if (priv->gone != 0) {
285238423Sjhb		error = -ENXIO;
286238423Sjhb	} else if (priv->params.rx_pauseframe_control ||
287238423Sjhb	    priv->params.tx_pauseframe_control) {
288238423Sjhb		if_printf(priv->ifp,
289238423Sjhb		    "Global pauseframes must be disabled before enabling PFC.\n");
290238423Sjhb		error = -EINVAL;
291238423Sjhb	} else {
292238423Sjhb		error = mlx5e_set_port_pause_and_pfc(priv);
293238423Sjhb	}
294238423Sjhb	return (error);
295238423Sjhb}
296238423Sjhb
297238423Sjhbstatic int
298238423Sjhbmlx5e_media_change(struct ifnet *dev)
299238423Sjhb{
300238423Sjhb	struct mlx5e_priv *priv = dev->if_softc;
301238423Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
302238423Sjhb	u32 eth_proto_cap;
303238423Sjhb	u32 link_mode;
304238423Sjhb	int was_opened;
305238423Sjhb	int locked;
306238423Sjhb	int error;
307238423Sjhb
308238423Sjhb	locked = PRIV_LOCKED(priv);
309238423Sjhb	if (!locked)
310238423Sjhb		PRIV_LOCK(priv);
311238423Sjhb
312238423Sjhb	if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
313238423Sjhb		error = EINVAL;
314238423Sjhb		goto done;
315238423Sjhb	}
316238423Sjhb	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
317238423Sjhb
318238423Sjhb	/* query supported capabilities */
319238423Sjhb	error = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
320238423Sjhb	if (error != 0) {
321238423Sjhb		if_printf(dev, "Query port media capability failed\n");
322238423Sjhb		goto done;
323238423Sjhb	}
324238423Sjhb	/* check for autoselect */
325238423Sjhb	if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
326238423Sjhb		link_mode = eth_proto_cap;
327238423Sjhb		if (link_mode == 0) {
328238423Sjhb			if_printf(dev, "Port media capability is zero\n");
329238423Sjhb			error = EINVAL;
330238423Sjhb			goto done;
331238423Sjhb		}
332238423Sjhb	} else {
333238423Sjhb		link_mode = link_mode & eth_proto_cap;
334238423Sjhb		if (link_mode == 0) {
335238423Sjhb			if_printf(dev, "Not supported link mode requested\n");
336238423Sjhb			error = EINVAL;
337238423Sjhb			goto done;
338238423Sjhb		}
339238423Sjhb	}
340238423Sjhb	if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
341238423Sjhb		/* check if PFC is enabled */
342238423Sjhb		if (priv->params.rx_priority_flow_control ||
343238423Sjhb		    priv->params.tx_priority_flow_control) {
344238423Sjhb			if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n");
345238423Sjhb			error = EINVAL;
346238423Sjhb			goto done;
347238423Sjhb		}
348238423Sjhb	}
349238423Sjhb	/* update pauseframe control bits */
350238423Sjhb	priv->params.rx_pauseframe_control =
351238423Sjhb	    (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
352238423Sjhb	priv->params.tx_pauseframe_control =
353238423Sjhb	    (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
354238423Sjhb
355238423Sjhb	/* check if device is opened */
356238423Sjhb	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
357238423Sjhb
358238423Sjhb	/* reconfigure the hardware */
359238423Sjhb	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
360238423Sjhb	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
361238423Sjhb	error = -mlx5e_set_port_pause_and_pfc(priv);
362238423Sjhb	if (was_opened)
363238423Sjhb		mlx5_set_port_status(mdev, MLX5_PORT_UP);
364238423Sjhb
365238423Sjhbdone:
366238423Sjhb	if (!locked)
367238423Sjhb		PRIV_UNLOCK(priv);
368238423Sjhb	return (error);
369238423Sjhb}
370238423Sjhb
371238423Sjhbstatic void
372238423Sjhbmlx5e_update_carrier_work(struct work_struct *work)
373238423Sjhb{
374238423Sjhb	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
375238423Sjhb	    update_carrier_work);
376238423Sjhb
377238423Sjhb	PRIV_LOCK(priv);
378238423Sjhb	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
379238423Sjhb		mlx5e_update_carrier(priv);
380238423Sjhb	PRIV_UNLOCK(priv);
381238423Sjhb}
382238423Sjhb
383238423Sjhb/*
384238423Sjhb * This function reads the physical port counters from the firmware
385238423Sjhb * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
386238423Sjhb * macros. The output is converted from big-endian 64-bit values into
387238423Sjhb * host endian ones and stored in the "priv->stats.pport" structure.
388238423Sjhb */
389238423Sjhbstatic void
390238423Sjhbmlx5e_update_pport_counters(struct mlx5e_priv *priv)
391238423Sjhb{
392238423Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
393238423Sjhb	struct mlx5e_pport_stats *s = &priv->stats.pport;
394238423Sjhb	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
395238423Sjhb	u32 *in;
396238423Sjhb	u32 *out;
397238423Sjhb	const u64 *ptr;
398238423Sjhb	unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
399238423Sjhb	unsigned x;
400238423Sjhb	unsigned y;
401238423Sjhb	unsigned z;
402238423Sjhb
403238423Sjhb	/* allocate firmware request structures */
404238423Sjhb	in = mlx5_vzalloc(sz);
405238423Sjhb	out = mlx5_vzalloc(sz);
406238423Sjhb	if (in == NULL || out == NULL)
407238423Sjhb		goto free_out;
408238423Sjhb
409238423Sjhb	/*
410238423Sjhb	 * Get pointer to the 64-bit counter set which is located at a
411238423Sjhb	 * fixed offset in the output firmware request structure:
412238423Sjhb	 */
413238423Sjhb	ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
414238423Sjhb
415238423Sjhb	MLX5_SET(ppcnt_reg, in, local_port, 1);
416238423Sjhb
417238423Sjhb	/* read IEEE802_3 counter group using predefined counter layout */
418238423Sjhb	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
419238423Sjhb	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
420238423Sjhb	for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
421238423Sjhb	     x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
422238423Sjhb		s->arg[y] = be64toh(ptr[x]);
423238423Sjhb
424238423Sjhb	/* read RFC2819 counter group using predefined counter layout */
425238423Sjhb	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
426238423Sjhb	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
427238423Sjhb	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
428238423Sjhb		s->arg[y] = be64toh(ptr[x]);
429238423Sjhb	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
430238423Sjhb	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
431238423Sjhb		s_debug->arg[y] = be64toh(ptr[x]);
432238423Sjhb
433238423Sjhb	/* read RFC2863 counter group using predefined counter layout */
434238423Sjhb	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
435238423Sjhb	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
436238423Sjhb	for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
437238423Sjhb		s_debug->arg[y] = be64toh(ptr[x]);
438238423Sjhb
439238423Sjhb	/* read physical layer stats counter group using predefined counter layout */
440238423Sjhb	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
441238423Sjhb	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
442238423Sjhb	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
443238423Sjhb		s_debug->arg[y] = be64toh(ptr[x]);
444238423Sjhb
445238423Sjhb	/* read Extended Ethernet counter group using predefined counter layout */
446238423Sjhb	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
447238423Sjhb	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
448238423Sjhb	for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++)
449238423Sjhb		s_debug->arg[y] = be64toh(ptr[x]);
450238423Sjhb
451238423Sjhb	/* read per-priority counters */
452238423Sjhb	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
453238423Sjhb
454238423Sjhb	/* iterate all the priorities */
455238423Sjhb	for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
456238423Sjhb		MLX5_SET(ppcnt_reg, in, prio_tc, z);
457238423Sjhb		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
458238423Sjhb
459238423Sjhb		/* read per priority stats counter group using predefined counter layout */
460238423Sjhb		for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
461238423Sjhb		    MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
462238423Sjhb			s->arg[y] = be64toh(ptr[x]);
463238423Sjhb	}
464238423Sjhb
465238423Sjhbfree_out:
466238423Sjhb	/* free firmware request structures */
467238423Sjhb	kvfree(in);
468238423Sjhb	kvfree(out);
469238423Sjhb}
470238423Sjhb
471238423Sjhb/*
472238423Sjhb * This function is called regularly to collect all statistics
473238423Sjhb * counters from the firmware. The values can be viewed through the
474238423Sjhb * sysctl interface. Execution is serialized using the priv's global
475238423Sjhb * configuration lock.
476238423Sjhb */
477238423Sjhbstatic void
478238423Sjhbmlx5e_update_stats_locked(struct mlx5e_priv *priv)
479238423Sjhb{
480238423Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
481238423Sjhb	struct mlx5e_vport_stats *s = &priv->stats.vport;
482238423Sjhb	struct mlx5e_sq_stats *sq_stats;
483238423Sjhb	struct buf_ring *sq_br;
484238423Sjhb#if (__FreeBSD_version < 1100000)
485238423Sjhb	struct ifnet *ifp = priv->ifp;
486238423Sjhb#endif
487238423Sjhb
488238423Sjhb	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
489259960Sjhb	u32 *out;
490259960Sjhb	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
491259960Sjhb	u64 tso_packets = 0;
492259960Sjhb	u64 tso_bytes = 0;
493259960Sjhb	u64 tx_queue_dropped = 0;
494259960Sjhb	u64 tx_defragged = 0;
495259960Sjhb	u64 tx_offload_none = 0;
496259960Sjhb	u64 lro_packets = 0;
497259960Sjhb	u64 lro_bytes = 0;
498259960Sjhb	u64 sw_lro_queued = 0;
499259960Sjhb	u64 sw_lro_flushed = 0;
500259960Sjhb	u64 rx_csum_none = 0;
501259960Sjhb	u64 rx_wqe_err = 0;
502259960Sjhb	u32 rx_out_of_buffer = 0;
503259960Sjhb	int i;
504259960Sjhb	int j;
505259960Sjhb
506259960Sjhb	out = mlx5_vzalloc(outlen);
507259960Sjhb	if (out == NULL)
508259960Sjhb		goto free_out;
509259960Sjhb
510259960Sjhb	/* Collect firts the SW counters and then HW for consistency */
511259960Sjhb	for (i = 0; i < priv->params.num_channels; i++) {
512259960Sjhb		struct mlx5e_channel *pch = priv->channel + i;
513259960Sjhb		struct mlx5e_rq *rq = &pch->rq;
514259960Sjhb		struct mlx5e_rq_stats *rq_stats = &pch->rq.stats;
515259960Sjhb
516259960Sjhb		/* collect stats from LRO */
517259960Sjhb		rq_stats->sw_lro_queued = rq->lro.lro_queued;
518259960Sjhb		rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
519259960Sjhb		sw_lro_queued += rq_stats->sw_lro_queued;
520259960Sjhb		sw_lro_flushed += rq_stats->sw_lro_flushed;
521259960Sjhb		lro_packets += rq_stats->lro_packets;
522238423Sjhb		lro_bytes += rq_stats->lro_bytes;
523238423Sjhb		rx_csum_none += rq_stats->csum_none;
524238423Sjhb		rx_wqe_err += rq_stats->wqe_err;
525238423Sjhb
526238423Sjhb		for (j = 0; j < priv->num_tc; j++) {
527238423Sjhb			sq_stats = &pch->sq[j].stats;
528238423Sjhb			sq_br = pch->sq[j].br;
529238423Sjhb
530238423Sjhb			tso_packets += sq_stats->tso_packets;
531238423Sjhb			tso_bytes += sq_stats->tso_bytes;
532238423Sjhb			tx_queue_dropped += sq_stats->dropped;
533238423Sjhb			if (sq_br != NULL)
534238423Sjhb				tx_queue_dropped += sq_br->br_drops;
535238423Sjhb			tx_defragged += sq_stats->defragged;
536238423Sjhb			tx_offload_none += sq_stats->csum_offload_none;
537238423Sjhb		}
538238423Sjhb	}
539238423Sjhb
540238423Sjhb	/* update counters */
541238423Sjhb	s->tso_packets = tso_packets;
542238423Sjhb	s->tso_bytes = tso_bytes;
543238423Sjhb	s->tx_queue_dropped = tx_queue_dropped;
544238423Sjhb	s->tx_defragged = tx_defragged;
545238423Sjhb	s->lro_packets = lro_packets;
546238423Sjhb	s->lro_bytes = lro_bytes;
547238423Sjhb	s->sw_lro_queued = sw_lro_queued;
548238423Sjhb	s->sw_lro_flushed = sw_lro_flushed;
549238423Sjhb	s->rx_csum_none = rx_csum_none;
550238423Sjhb	s->rx_wqe_err = rx_wqe_err;
551238423Sjhb
552238423Sjhb	/* HW counters */
553238423Sjhb	memset(in, 0, sizeof(in));
554238423Sjhb
555238423Sjhb	MLX5_SET(query_vport_counter_in, in, opcode,
556238423Sjhb	    MLX5_CMD_OP_QUERY_VPORT_COUNTER);
557238423Sjhb	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
558238423Sjhb	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
559238423Sjhb
560238423Sjhb	memset(out, 0, outlen);
561238423Sjhb
562238423Sjhb	/* get number of out-of-buffer drops first */
563238423Sjhb	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 &&
564238423Sjhb	    mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
565238423Sjhb	    &rx_out_of_buffer) == 0) {
566238423Sjhb		/* accumulate difference into a 64-bit counter */
567238423Sjhb		s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer -
568238423Sjhb		    s->rx_out_of_buffer_prev);
569238423Sjhb		s->rx_out_of_buffer_prev = rx_out_of_buffer;
570238423Sjhb	}
571238423Sjhb
572238423Sjhb	/* get port statistics */
573238423Sjhb	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) {
574238423Sjhb#define	MLX5_GET_CTR(out, x) \
575238423Sjhb	MLX5_GET64(query_vport_counter_out, out, x)
576238423Sjhb
577238423Sjhb		s->rx_error_packets =
578238423Sjhb		    MLX5_GET_CTR(out, received_errors.packets);
579238423Sjhb		s->rx_error_bytes =
580238423Sjhb		    MLX5_GET_CTR(out, received_errors.octets);
581238423Sjhb		s->tx_error_packets =
582238423Sjhb		    MLX5_GET_CTR(out, transmit_errors.packets);
583238423Sjhb		s->tx_error_bytes =
584238423Sjhb		    MLX5_GET_CTR(out, transmit_errors.octets);
585238423Sjhb
586238423Sjhb		s->rx_unicast_packets =
587238423Sjhb		    MLX5_GET_CTR(out, received_eth_unicast.packets);
588238423Sjhb		s->rx_unicast_bytes =
589238423Sjhb		    MLX5_GET_CTR(out, received_eth_unicast.octets);
590238423Sjhb		s->tx_unicast_packets =
591238423Sjhb		    MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
592238423Sjhb		s->tx_unicast_bytes =
593238423Sjhb		    MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
594238423Sjhb
595238423Sjhb		s->rx_multicast_packets =
596238423Sjhb		    MLX5_GET_CTR(out, received_eth_multicast.packets);
597238423Sjhb		s->rx_multicast_bytes =
598238423Sjhb		    MLX5_GET_CTR(out, received_eth_multicast.octets);
599238423Sjhb		s->tx_multicast_packets =
600238423Sjhb		    MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
601238423Sjhb		s->tx_multicast_bytes =
602238423Sjhb		    MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
603238423Sjhb
604238423Sjhb		s->rx_broadcast_packets =
605238423Sjhb		    MLX5_GET_CTR(out, received_eth_broadcast.packets);
606238423Sjhb		s->rx_broadcast_bytes =
607238423Sjhb		    MLX5_GET_CTR(out, received_eth_broadcast.octets);
608238423Sjhb		s->tx_broadcast_packets =
609238423Sjhb		    MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
610238423Sjhb		s->tx_broadcast_bytes =
611238423Sjhb		    MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
612238423Sjhb
613238423Sjhb		s->rx_packets = s->rx_unicast_packets +
614238423Sjhb		    s->rx_multicast_packets + s->rx_broadcast_packets -
615238423Sjhb		    s->rx_out_of_buffer;
616238423Sjhb		s->rx_bytes = s->rx_unicast_bytes + s->rx_multicast_bytes +
617238423Sjhb		    s->rx_broadcast_bytes;
618238423Sjhb		s->tx_packets = s->tx_unicast_packets +
619259960Sjhb		    s->tx_multicast_packets + s->tx_broadcast_packets;
620259960Sjhb		s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes +
621259960Sjhb		    s->tx_broadcast_bytes;
622259960Sjhb
623259960Sjhb		/* Update calculated offload counters */
624259960Sjhb		s->tx_csum_offload = s->tx_packets - tx_offload_none;
625259960Sjhb		s->rx_csum_good = s->rx_packets - s->rx_csum_none;
626259960Sjhb	}
627238423Sjhb
628238423Sjhb	/* Get physical port counters */
629238423Sjhb	mlx5e_update_pport_counters(priv);
630238423Sjhb
631238423Sjhb	s->tx_jumbo_packets =
632238423Sjhb	    priv->stats.port_stats_debug.tx_stat_p1519to2047octets +
633238423Sjhb	    priv->stats.port_stats_debug.tx_stat_p2048to4095octets +
634238423Sjhb	    priv->stats.port_stats_debug.tx_stat_p4096to8191octets +
635238423Sjhb	    priv->stats.port_stats_debug.tx_stat_p8192to10239octets;
636238423Sjhb
637238423Sjhb#if (__FreeBSD_version < 1100000)
638238423Sjhb	/* no get_counters interface in fbsd 10 */
639238423Sjhb	ifp->if_ipackets = s->rx_packets;
640238423Sjhb	ifp->if_ierrors = s->rx_error_packets +
641238423Sjhb	    priv->stats.pport.alignment_err +
642238423Sjhb	    priv->stats.pport.check_seq_err +
643238423Sjhb	    priv->stats.pport.crc_align_errors +
644238423Sjhb	    priv->stats.pport.in_range_len_errors +
645238423Sjhb	    priv->stats.pport.jabbers +
646238423Sjhb	    priv->stats.pport.out_of_range_len +
647238423Sjhb	    priv->stats.pport.oversize_pkts +
648238423Sjhb	    priv->stats.pport.symbol_err +
649238423Sjhb	    priv->stats.pport.too_long_errors +
650238423Sjhb	    priv->stats.pport.undersize_pkts +
651238423Sjhb	    priv->stats.pport.unsupported_op_rx;
652238423Sjhb	ifp->if_iqdrops = s->rx_out_of_buffer +
653238423Sjhb	    priv->stats.pport.drop_events;
654238423Sjhb	ifp->if_opackets = s->tx_packets;
655238423Sjhb	ifp->if_oerrors = s->tx_error_packets;
656238423Sjhb	ifp->if_snd.ifq_drops = s->tx_queue_dropped;
657238423Sjhb	ifp->if_ibytes = s->rx_bytes;
658238423Sjhb	ifp->if_obytes = s->tx_bytes;
659238423Sjhb	ifp->if_collisions =
660238423Sjhb	    priv->stats.pport.collisions;
661238423Sjhb#endif
662238423Sjhb
663238423Sjhbfree_out:
664238423Sjhb	kvfree(out);
665238423Sjhb
666238423Sjhb	/* Update diagnostics, if any */
667238423Sjhb	if (priv->params_ethtool.diag_pci_enable ||
668238423Sjhb	    priv->params_ethtool.diag_general_enable) {
669238423Sjhb		int error = mlx5_core_get_diagnostics_full(mdev,
670238423Sjhb		    priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
671238423Sjhb		    priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
672238423Sjhb		if (error != 0)
673238423Sjhb			if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error);
674238423Sjhb	}
675238423Sjhb}
676238423Sjhb
677238423Sjhbstatic void
678238423Sjhbmlx5e_update_stats_work(struct work_struct *work)
679238423Sjhb{
680238423Sjhb	struct mlx5e_priv *priv;
681238423Sjhb
682238423Sjhb	priv  = container_of(work, struct mlx5e_priv, update_stats_work);
683238423Sjhb	PRIV_LOCK(priv);
684238423Sjhb	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
685238423Sjhb		mlx5e_update_stats_locked(priv);
686238423Sjhb	PRIV_UNLOCK(priv);
687238423Sjhb}
688238423Sjhb
689238423Sjhbstatic void
690238423Sjhbmlx5e_update_stats(void *arg)
691238423Sjhb{
692238423Sjhb	struct mlx5e_priv *priv = arg;
693238423Sjhb
694238423Sjhb	queue_work(priv->wq, &priv->update_stats_work);
695238423Sjhb
696238423Sjhb	callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
697238423Sjhb}
698238423Sjhb
699238423Sjhbstatic void
700238423Sjhbmlx5e_async_event_sub(struct mlx5e_priv *priv,
701238423Sjhb    enum mlx5_dev_event event)
702238423Sjhb{
703238423Sjhb	switch (event) {
704238423Sjhb	case MLX5_DEV_EVENT_PORT_UP:
705238423Sjhb	case MLX5_DEV_EVENT_PORT_DOWN:
706238423Sjhb		queue_work(priv->wq, &priv->update_carrier_work);
707238423Sjhb		break;
708238423Sjhb
709238423Sjhb	default:
710238423Sjhb		break;
711238423Sjhb	}
712238423Sjhb}
713238423Sjhb
714238423Sjhbstatic void
715238423Sjhbmlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
716238423Sjhb    enum mlx5_dev_event event, unsigned long param)
717238423Sjhb{
718238423Sjhb	struct mlx5e_priv *priv = vpriv;
719238423Sjhb
720238423Sjhb	mtx_lock(&priv->async_events_mtx);
721238423Sjhb	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
722238423Sjhb		mlx5e_async_event_sub(priv, event);
723238423Sjhb	mtx_unlock(&priv->async_events_mtx);
724238423Sjhb}
725238423Sjhb
726238423Sjhbstatic void
727238423Sjhbmlx5e_enable_async_events(struct mlx5e_priv *priv)
728238423Sjhb{
729238423Sjhb	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
730238423Sjhb}
731238423Sjhb
732238423Sjhbstatic void
733238423Sjhbmlx5e_disable_async_events(struct mlx5e_priv *priv)
734238423Sjhb{
735238423Sjhb	mtx_lock(&priv->async_events_mtx);
736238423Sjhb	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
737238423Sjhb	mtx_unlock(&priv->async_events_mtx);
738238423Sjhb}
739238423Sjhb
740238423Sjhbstatic const char *mlx5e_rq_stats_desc[] = {
741238423Sjhb	MLX5E_RQ_STATS(MLX5E_STATS_DESC)
742238423Sjhb};
743238423Sjhb
744238423Sjhbstatic int
745238423Sjhbmlx5e_create_rq(struct mlx5e_channel *c,
746238423Sjhb    struct mlx5e_rq_param *param,
747238423Sjhb    struct mlx5e_rq *rq)
748238423Sjhb{
749238423Sjhb	struct mlx5e_priv *priv = c->priv;
750238423Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
751238423Sjhb	char buffer[16];
752238423Sjhb	void *rqc = param->rqc;
753238423Sjhb	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
754238423Sjhb	int wq_sz;
755238423Sjhb	int err;
756238423Sjhb	int i;
757238423Sjhb	u32 nsegs, wqe_sz;
758238423Sjhb
759238423Sjhb	err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
760238423Sjhb	if (err != 0)
761238423Sjhb		goto done;
762238423Sjhb
763238423Sjhb	/* Create DMA descriptor TAG */
764238423Sjhb	if ((err = -bus_dma_tag_create(
765238423Sjhb	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
766238423Sjhb	    1,				/* any alignment */
767238423Sjhb	    0,				/* no boundary */
768238423Sjhb	    BUS_SPACE_MAXADDR,		/* lowaddr */
769238423Sjhb	    BUS_SPACE_MAXADDR,		/* highaddr */
770238423Sjhb	    NULL, NULL,			/* filter, filterarg */
771238423Sjhb	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsize */
772238423Sjhb	    nsegs,			/* nsegments */
773238423Sjhb	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsegsize */
774238423Sjhb	    0,				/* flags */
775238423Sjhb	    NULL, NULL,			/* lockfunc, lockfuncarg */
776238423Sjhb	    &rq->dma_tag)))
777238423Sjhb		goto done;
778238423Sjhb
779238423Sjhb	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
780238423Sjhb	    &rq->wq_ctrl);
781238423Sjhb	if (err)
782238423Sjhb		goto err_free_dma_tag;
783238423Sjhb
784238423Sjhb	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
785238423Sjhb
786238423Sjhb	err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
787238423Sjhb	if (err != 0)
788238423Sjhb		goto err_rq_wq_destroy;
789238423Sjhb
790238423Sjhb	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
791238423Sjhb
792238423Sjhb	err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz);
793238423Sjhb	if (err)
794238423Sjhb		goto err_rq_wq_destroy;
795238423Sjhb
796238423Sjhb	rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
797238423Sjhb	for (i = 0; i != wq_sz; i++) {
798238423Sjhb		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
799238423Sjhb#if (MLX5E_MAX_RX_SEGS == 1)
800238423Sjhb		uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
801238423Sjhb#else
802238423Sjhb		int j;
803238423Sjhb#endif
804238423Sjhb
805238423Sjhb		err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
806238423Sjhb		if (err != 0) {
807238423Sjhb			while (i--)
808238423Sjhb				bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
809238423Sjhb			goto err_rq_mbuf_free;
810238423Sjhb		}
811238423Sjhb
812238423Sjhb		/* set value for constant fields */
813238423Sjhb#if (MLX5E_MAX_RX_SEGS == 1)
814238423Sjhb		wqe->data[0].lkey = c->mkey_be;
815238423Sjhb		wqe->data[0].byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
816238423Sjhb#else
817238423Sjhb		for (j = 0; j < rq->nsegs; j++)
818238423Sjhb			wqe->data[j].lkey = c->mkey_be;
819238423Sjhb#endif
820238423Sjhb	}
821238423Sjhb
822238423Sjhb	INIT_WORK(&rq->dim.work, mlx5e_dim_work);
823238423Sjhb	if (priv->params.rx_cq_moderation_mode < 2) {
824238423Sjhb		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
825238423Sjhb	} else {
826238423Sjhb		void *cqc = container_of(param,
827238423Sjhb		    struct mlx5e_channel_param, rq)->rx_cq.cqc;
828238423Sjhb
829238423Sjhb		switch (MLX5_GET(cqc, cqc, cq_period_mode)) {
830238423Sjhb		case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
831238423Sjhb			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
832238423Sjhb			break;
833238423Sjhb		case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
834238423Sjhb			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
835238423Sjhb			break;
836238423Sjhb		default:
837238423Sjhb			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
838238423Sjhb			break;
839238423Sjhb		}
840238423Sjhb	}
841238423Sjhb
842238423Sjhb	rq->ifp = c->ifp;
843238423Sjhb	rq->channel = c;
844238423Sjhb	rq->ix = c->ix;
845238423Sjhb
846238423Sjhb	snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
847238423Sjhb	mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
848238423Sjhb	    buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
849238423Sjhb	    rq->stats.arg);
850238423Sjhb	return (0);
851238423Sjhb
852238423Sjhberr_rq_mbuf_free:
853238423Sjhb	free(rq->mbuf, M_MLX5EN);
854238423Sjhb	tcp_lro_free(&rq->lro);
855238423Sjhberr_rq_wq_destroy:
856238423Sjhb	mlx5_wq_destroy(&rq->wq_ctrl);
857238423Sjhberr_free_dma_tag:
858238423Sjhb	bus_dma_tag_destroy(rq->dma_tag);
859238423Sjhbdone:
860238423Sjhb	return (err);
861238423Sjhb}
862238423Sjhb
863238423Sjhbstatic void
864238423Sjhbmlx5e_destroy_rq(struct mlx5e_rq *rq)
865238423Sjhb{
866238423Sjhb	int wq_sz;
867238423Sjhb	int i;
868238423Sjhb
869238423Sjhb	/* destroy all sysctl nodes */
870238423Sjhb	sysctl_ctx_free(&rq->stats.ctx);
871238423Sjhb
872238423Sjhb	/* free leftover LRO packets, if any */
873238423Sjhb	tcp_lro_free(&rq->lro);
874238423Sjhb
875238423Sjhb	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
876238423Sjhb	for (i = 0; i != wq_sz; i++) {
877238423Sjhb		if (rq->mbuf[i].mbuf != NULL) {
878238423Sjhb			bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
879238423Sjhb			m_freem(rq->mbuf[i].mbuf);
880238423Sjhb		}
881238423Sjhb		bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
882238423Sjhb	}
883238423Sjhb	free(rq->mbuf, M_MLX5EN);
884238423Sjhb	mlx5_wq_destroy(&rq->wq_ctrl);
885238423Sjhb}
886238423Sjhb
887238423Sjhbstatic int
888238423Sjhbmlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
889238423Sjhb{
890238423Sjhb	struct mlx5e_channel *c = rq->channel;
891238423Sjhb	struct mlx5e_priv *priv = c->priv;
892238423Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
893238423Sjhb
894238423Sjhb	void *in;
895238423Sjhb	void *rqc;
896238423Sjhb	void *wq;
897238423Sjhb	int inlen;
898238423Sjhb	int err;
899238423Sjhb
900238423Sjhb	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
901238423Sjhb	    sizeof(u64) * rq->wq_ctrl.buf.npages;
902238423Sjhb	in = mlx5_vzalloc(inlen);
903238423Sjhb	if (in == NULL)
904238423Sjhb		return (-ENOMEM);
905238423Sjhb
906238423Sjhb	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
907238423Sjhb	wq = MLX5_ADDR_OF(rqc, rqc, wq);
908238423Sjhb
909238423Sjhb	memcpy(rqc, param->rqc, sizeof(param->rqc));
910238423Sjhb
911238423Sjhb	MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
912238423Sjhb	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
913238423Sjhb	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
914238423Sjhb	if (priv->counter_set_id >= 0)
915238423Sjhb		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
916238423Sjhb	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
917238423Sjhb	    PAGE_SHIFT);
918238423Sjhb	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
919238423Sjhb
920238423Sjhb	mlx5_fill_page_array(&rq->wq_ctrl.buf,
921238423Sjhb	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
922238423Sjhb
923238423Sjhb	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
924238423Sjhb
925238423Sjhb	kvfree(in);
926238423Sjhb
927238423Sjhb	return (err);
928238423Sjhb}
929238423Sjhb
930238423Sjhbstatic int
931238423Sjhbmlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
932238423Sjhb{
933238423Sjhb	struct mlx5e_channel *c = rq->channel;
934238423Sjhb	struct mlx5e_priv *priv = c->priv;
935238423Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
936238423Sjhb
937238423Sjhb	void *in;
938238423Sjhb	void *rqc;
939238423Sjhb	int inlen;
940238423Sjhb	int err;
941238423Sjhb
942238423Sjhb	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
943238423Sjhb	in = mlx5_vzalloc(inlen);
944238423Sjhb	if (in == NULL)
945238423Sjhb		return (-ENOMEM);
946238423Sjhb
947238423Sjhb	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
948238423Sjhb
949238423Sjhb	MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
950238423Sjhb	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
951238423Sjhb	MLX5_SET(rqc, rqc, state, next_state);
952238423Sjhb
953238423Sjhb	err = mlx5_core_modify_rq(mdev, in, inlen);
954238423Sjhb
955238423Sjhb	kvfree(in);
956238423Sjhb
957238423Sjhb	return (err);
958238423Sjhb}
959238423Sjhb
960238423Sjhbstatic void
961238423Sjhbmlx5e_disable_rq(struct mlx5e_rq *rq)
962238423Sjhb{
963238423Sjhb	struct mlx5e_channel *c = rq->channel;
964238423Sjhb	struct mlx5e_priv *priv = c->priv;
965238423Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
966238423Sjhb
967238423Sjhb	mlx5_core_destroy_rq(mdev, rq->rqn);
968238423Sjhb}
969238423Sjhb
970238423Sjhbstatic int
971238423Sjhbmlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
972238423Sjhb{
973238423Sjhb	struct mlx5e_channel *c = rq->channel;
974238423Sjhb	struct mlx5e_priv *priv = c->priv;
975238423Sjhb	struct mlx5_wq_ll *wq = &rq->wq;
976238423Sjhb	int i;
977238423Sjhb
978238423Sjhb	for (i = 0; i < 1000; i++) {
979238423Sjhb		if (wq->cur_sz >= priv->params.min_rx_wqes)
980238423Sjhb			return (0);
981238423Sjhb
982238423Sjhb		msleep(4);
983238423Sjhb	}
984238423Sjhb	return (-ETIMEDOUT);
985238423Sjhb}
986238423Sjhb
987238423Sjhbstatic int
988238423Sjhbmlx5e_open_rq(struct mlx5e_channel *c,
989238423Sjhb    struct mlx5e_rq_param *param,
990238423Sjhb    struct mlx5e_rq *rq)
991238423Sjhb{
992238423Sjhb	int err;
993238423Sjhb
994238423Sjhb	err = mlx5e_create_rq(c, param, rq);
995238423Sjhb	if (err)
996238423Sjhb		return (err);
997238423Sjhb
998238423Sjhb	err = mlx5e_enable_rq(rq, param);
999238423Sjhb	if (err)
1000238423Sjhb		goto err_destroy_rq;
1001238423Sjhb
1002238423Sjhb	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1003238423Sjhb	if (err)
1004238423Sjhb		goto err_disable_rq;
1005238423Sjhb
1006238423Sjhb	c->rq.enabled = 1;
1007238423Sjhb
1008238423Sjhb	return (0);
1009238423Sjhb
1010238423Sjhberr_disable_rq:
1011238423Sjhb	mlx5e_disable_rq(rq);
1012238423Sjhberr_destroy_rq:
1013238423Sjhb	mlx5e_destroy_rq(rq);
1014238423Sjhb
1015238423Sjhb	return (err);
1016238423Sjhb}
1017238423Sjhb
1018238423Sjhbstatic void
1019238423Sjhbmlx5e_close_rq(struct mlx5e_rq *rq)
1020238423Sjhb{
1021238423Sjhb	mtx_lock(&rq->mtx);
1022238423Sjhb	rq->enabled = 0;
1023238423Sjhb	callout_stop(&rq->watchdog);
1024238423Sjhb	mtx_unlock(&rq->mtx);
1025238423Sjhb
1026238423Sjhb	callout_drain(&rq->watchdog);
1027238423Sjhb
1028238423Sjhb	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
1029238423Sjhb}
1030238423Sjhb
1031238423Sjhbstatic void
1032238423Sjhbmlx5e_close_rq_wait(struct mlx5e_rq *rq)
1033238423Sjhb{
1034238423Sjhb
1035238423Sjhb	mlx5e_disable_rq(rq);
1036238423Sjhb	mlx5e_close_cq(&rq->cq);
1037238423Sjhb	cancel_work_sync(&rq->dim.work);
1038238423Sjhb	mlx5e_destroy_rq(rq);
1039238423Sjhb}
1040238423Sjhb
1041238423Sjhbvoid
1042238423Sjhbmlx5e_free_sq_db(struct mlx5e_sq *sq)
1043238423Sjhb{
1044238423Sjhb	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1045238423Sjhb	int x;
1046238423Sjhb
1047238423Sjhb	for (x = 0; x != wq_sz; x++)
1048238423Sjhb		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1049238423Sjhb	free(sq->mbuf, M_MLX5EN);
1050238423Sjhb}
1051238423Sjhb
1052238423Sjhbint
1053238423Sjhbmlx5e_alloc_sq_db(struct mlx5e_sq *sq)
1054238423Sjhb{
1055238423Sjhb	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1056238423Sjhb	int err;
1057238423Sjhb	int x;
1058238423Sjhb
1059238423Sjhb	sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1060238423Sjhb
1061238423Sjhb	/* Create DMA descriptor MAPs */
1062238423Sjhb	for (x = 0; x != wq_sz; x++) {
1063238423Sjhb		err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
1064238423Sjhb		if (err != 0) {
1065238423Sjhb			while (x--)
1066238423Sjhb				bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1067238423Sjhb			free(sq->mbuf, M_MLX5EN);
1068238423Sjhb			return (err);
1069238423Sjhb		}
1070238423Sjhb	}
1071238423Sjhb	return (0);
1072238423Sjhb}
1073238423Sjhb
1074238423Sjhbstatic const char *mlx5e_sq_stats_desc[] = {
1075238423Sjhb	MLX5E_SQ_STATS(MLX5E_STATS_DESC)
1076238423Sjhb};
1077238423Sjhb
1078238423Sjhbvoid
1079238423Sjhbmlx5e_update_sq_inline(struct mlx5e_sq *sq)
1080238423Sjhb{
1081238423Sjhb	sq->max_inline = sq->priv->params.tx_max_inline;
1082238423Sjhb	sq->min_inline_mode = sq->priv->params.tx_min_inline_mode;
1083238423Sjhb
1084259960Sjhb	/*
1085259960Sjhb	 * Check if trust state is DSCP or if inline mode is NONE which
1086259960Sjhb	 * indicates CX-5 or newer hardware.
1087259960Sjhb	 */
1088259960Sjhb	if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP ||
1089259960Sjhb	    sq->min_inline_mode == MLX5_INLINE_MODE_NONE) {
1090259960Sjhb		if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert))
1091259960Sjhb			sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN;
1092238423Sjhb		else
1093238423Sjhb			sq->min_insert_caps = MLX5E_INSERT_NON_VLAN;
1094238423Sjhb	} else {
1095238423Sjhb		sq->min_insert_caps = 0;
1096238423Sjhb	}
1097238423Sjhb}
1098238423Sjhb
1099238423Sjhbstatic void
1100238423Sjhbmlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1101238423Sjhb{
1102238423Sjhb	int i;
1103238423Sjhb
1104238423Sjhb	for (i = 0; i != c->num_tc; i++) {
1105238423Sjhb		mtx_lock(&c->sq[i].lock);
1106238423Sjhb		mlx5e_update_sq_inline(&c->sq[i]);
1107238423Sjhb		mtx_unlock(&c->sq[i].lock);
1108238423Sjhb	}
1109238423Sjhb}
1110238423Sjhb
1111238423Sjhbvoid
1112238423Sjhbmlx5e_refresh_sq_inline(struct mlx5e_priv *priv)
1113238423Sjhb{
1114238423Sjhb	int i;
1115238423Sjhb
1116238423Sjhb	/* check if channels are closed */
1117238423Sjhb	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
1118238423Sjhb		return;
1119238423Sjhb
1120238423Sjhb	for (i = 0; i < priv->params.num_channels; i++)
1121238423Sjhb		mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]);
1122259960Sjhb}
1123259960Sjhb
1124259960Sjhbstatic int
1125259960Sjhbmlx5e_create_sq(struct mlx5e_channel *c,
1126259960Sjhb    int tc,
1127259960Sjhb    struct mlx5e_sq_param *param,
1128259960Sjhb    struct mlx5e_sq *sq)
1129259960Sjhb{
1130259960Sjhb	struct mlx5e_priv *priv = c->priv;
1131259960Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
1132238423Sjhb	char buffer[16];
1133238423Sjhb	void *sqc = param->sqc;
1134238423Sjhb	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
1135238423Sjhb	int err;
1136238423Sjhb
1137238423Sjhb	/* Create DMA descriptor TAG */
1138238423Sjhb	if ((err = -bus_dma_tag_create(
1139238423Sjhb	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1140238423Sjhb	    1,				/* any alignment */
1141238423Sjhb	    0,				/* no boundary */
1142238423Sjhb	    BUS_SPACE_MAXADDR,		/* lowaddr */
1143238423Sjhb	    BUS_SPACE_MAXADDR,		/* highaddr */
1144238423Sjhb	    NULL, NULL,			/* filter, filterarg */
1145238423Sjhb	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
1146238423Sjhb	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
1147238423Sjhb	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
1148238423Sjhb	    0,				/* flags */
1149238423Sjhb	    NULL, NULL,			/* lockfunc, lockfuncarg */
1150238423Sjhb	    &sq->dma_tag)))
1151238423Sjhb		goto done;
1152238423Sjhb
1153238423Sjhb	err = mlx5_alloc_map_uar(mdev, &sq->uar);
1154238423Sjhb	if (err)
1155238423Sjhb		goto err_free_dma_tag;
1156238423Sjhb
1157238423Sjhb	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1158238423Sjhb	    &sq->wq_ctrl);
1159238423Sjhb	if (err)
1160238423Sjhb		goto err_unmap_free_uar;
1161238423Sjhb
1162238423Sjhb	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1163238423Sjhb	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1164238423Sjhb
1165238423Sjhb	err = mlx5e_alloc_sq_db(sq);
1166238423Sjhb	if (err)
1167238423Sjhb		goto err_sq_wq_destroy;
1168238423Sjhb
1169238423Sjhb	sq->mkey_be = c->mkey_be;
1170238423Sjhb	sq->ifp = priv->ifp;
1171238423Sjhb	sq->priv = priv;
1172238423Sjhb	sq->tc = tc;
1173238423Sjhb
1174238423Sjhb	mlx5e_update_sq_inline(sq);
1175238423Sjhb
1176238423Sjhb	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1177238423Sjhb	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1178238423Sjhb	    buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1179238423Sjhb	    sq->stats.arg);
1180238423Sjhb
1181238423Sjhb	return (0);
1182238423Sjhb
1183238423Sjhberr_sq_wq_destroy:
1184238423Sjhb	mlx5_wq_destroy(&sq->wq_ctrl);
1185238423Sjhb
1186238423Sjhberr_unmap_free_uar:
1187238423Sjhb	mlx5_unmap_free_uar(mdev, &sq->uar);
1188238423Sjhb
1189238423Sjhberr_free_dma_tag:
1190238423Sjhb	bus_dma_tag_destroy(sq->dma_tag);
1191238423Sjhbdone:
1192238423Sjhb	return (err);
1193238423Sjhb}
1194238423Sjhb
1195238423Sjhbstatic void
1196238423Sjhbmlx5e_destroy_sq(struct mlx5e_sq *sq)
1197238423Sjhb{
1198238423Sjhb	/* destroy all sysctl nodes */
1199238423Sjhb	sysctl_ctx_free(&sq->stats.ctx);
1200238423Sjhb
1201238423Sjhb	mlx5e_free_sq_db(sq);
1202238423Sjhb	mlx5_wq_destroy(&sq->wq_ctrl);
1203238423Sjhb	mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1204238423Sjhb}
1205238423Sjhb
1206238423Sjhbint
1207238423Sjhbmlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1208238423Sjhb    int tis_num)
1209238423Sjhb{
1210238423Sjhb	void *in;
1211238423Sjhb	void *sqc;
1212238423Sjhb	void *wq;
1213238423Sjhb	int inlen;
1214238423Sjhb	int err;
1215238423Sjhb
1216238423Sjhb	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1217238423Sjhb	    sizeof(u64) * sq->wq_ctrl.buf.npages;
1218238423Sjhb	in = mlx5_vzalloc(inlen);
1219238423Sjhb	if (in == NULL)
1220238423Sjhb		return (-ENOMEM);
1221238423Sjhb
1222238423Sjhb	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1223238423Sjhb	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1224238423Sjhb
1225238423Sjhb	memcpy(sqc, param->sqc, sizeof(param->sqc));
1226238423Sjhb
1227238423Sjhb	MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1228238423Sjhb	MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1229238423Sjhb	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1230238423Sjhb	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1231238423Sjhb	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1232238423Sjhb
1233238423Sjhb	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1234238423Sjhb	MLX5_SET(wq, wq, uar_page, sq->uar.index);
1235238423Sjhb	MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1236238423Sjhb	    PAGE_SHIFT);
1237238423Sjhb	MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1238238423Sjhb
1239238423Sjhb	mlx5_fill_page_array(&sq->wq_ctrl.buf,
1240238423Sjhb	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1241238423Sjhb
1242238423Sjhb	err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1243238423Sjhb
1244238423Sjhb	kvfree(in);
1245238423Sjhb
1246238423Sjhb	return (err);
1247238423Sjhb}
1248238423Sjhb
1249238423Sjhbint
1250238423Sjhbmlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1251238423Sjhb{
1252238423Sjhb	void *in;
1253238423Sjhb	void *sqc;
1254238423Sjhb	int inlen;
1255238423Sjhb	int err;
1256238423Sjhb
1257238423Sjhb	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1258238423Sjhb	in = mlx5_vzalloc(inlen);
1259238423Sjhb	if (in == NULL)
1260238423Sjhb		return (-ENOMEM);
1261238423Sjhb
1262238423Sjhb	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1263238423Sjhb
1264238423Sjhb	MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1265238423Sjhb	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1266238423Sjhb	MLX5_SET(sqc, sqc, state, next_state);
1267238423Sjhb
1268238423Sjhb	err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1269238423Sjhb
1270238423Sjhb	kvfree(in);
1271238423Sjhb
1272238423Sjhb	return (err);
1273238423Sjhb}
1274238423Sjhb
1275238423Sjhbvoid
1276238423Sjhbmlx5e_disable_sq(struct mlx5e_sq *sq)
1277238423Sjhb{
1278238423Sjhb
1279238423Sjhb	mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1280238423Sjhb}
1281238423Sjhb
1282238423Sjhbstatic int
1283238423Sjhbmlx5e_open_sq(struct mlx5e_channel *c,
1284238423Sjhb    int tc,
1285238423Sjhb    struct mlx5e_sq_param *param,
1286238423Sjhb    struct mlx5e_sq *sq)
1287238423Sjhb{
1288238423Sjhb	int err;
1289238423Sjhb
1290238423Sjhb	err = mlx5e_create_sq(c, tc, param, sq);
1291238423Sjhb	if (err)
1292238423Sjhb		return (err);
1293238423Sjhb
1294238423Sjhb	err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1295238423Sjhb	if (err)
1296238423Sjhb		goto err_destroy_sq;
1297238423Sjhb
1298238423Sjhb	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1299238423Sjhb	if (err)
1300238423Sjhb		goto err_disable_sq;
1301238423Sjhb
1302238423Sjhb	WRITE_ONCE(sq->running, 1);
1303238423Sjhb
1304238423Sjhb	return (0);
1305238423Sjhb
1306238423Sjhberr_disable_sq:
1307238423Sjhb	mlx5e_disable_sq(sq);
1308238423Sjhberr_destroy_sq:
1309238423Sjhb	mlx5e_destroy_sq(sq);
1310238423Sjhb
1311238423Sjhb	return (err);
1312238423Sjhb}
1313238423Sjhb
1314238423Sjhbstatic void
1315238423Sjhbmlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1316238423Sjhb{
1317238423Sjhb	/* fill up remainder with NOPs */
1318238423Sjhb	while (sq->cev_counter != 0) {
1319238423Sjhb		while (!mlx5e_sq_has_room_for(sq, 1)) {
1320238423Sjhb			if (can_sleep != 0) {
1321238423Sjhb				mtx_unlock(&sq->lock);
1322238423Sjhb				msleep(4);
1323238423Sjhb				mtx_lock(&sq->lock);
1324238423Sjhb			} else {
1325238423Sjhb				goto done;
1326238423Sjhb			}
1327238423Sjhb		}
1328238423Sjhb		/* send a single NOP */
1329238423Sjhb		mlx5e_send_nop(sq, 1);
1330238423Sjhb		atomic_thread_fence_rel();
1331238423Sjhb	}
1332238423Sjhbdone:
1333238423Sjhb	/* Check if we need to write the doorbell */
1334238423Sjhb	if (likely(sq->doorbell.d64 != 0)) {
1335238423Sjhb		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1336238423Sjhb		sq->doorbell.d64 = 0;
1337238423Sjhb	}
1338238423Sjhb}
1339238423Sjhb
1340238423Sjhbvoid
1341238423Sjhbmlx5e_sq_cev_timeout(void *arg)
1342238423Sjhb{
1343238423Sjhb	struct mlx5e_sq *sq = arg;
1344238423Sjhb
1345238423Sjhb	mtx_assert(&sq->lock, MA_OWNED);
1346238423Sjhb
1347238423Sjhb	/* check next state */
1348238423Sjhb	switch (sq->cev_next_state) {
1349238423Sjhb	case MLX5E_CEV_STATE_SEND_NOPS:
1350238423Sjhb		/* fill TX ring with NOPs, if any */
1351238423Sjhb		mlx5e_sq_send_nops_locked(sq, 0);
1352238423Sjhb
1353238423Sjhb		/* check if completed */
1354238423Sjhb		if (sq->cev_counter == 0) {
1355238423Sjhb			sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1356238423Sjhb			return;
1357238423Sjhb		}
1358238423Sjhb		break;
1359238423Sjhb	default:
1360238423Sjhb		/* send NOPs on next timeout */
1361238423Sjhb		sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1362238423Sjhb		break;
1363238423Sjhb	}
1364238423Sjhb
1365238423Sjhb	/* restart timer */
1366259960Sjhb	callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1367259960Sjhb}
1368259960Sjhb
1369259960Sjhbvoid
1370259960Sjhbmlx5e_drain_sq(struct mlx5e_sq *sq)
1371238423Sjhb{
1372238423Sjhb	int error;
1373238423Sjhb	struct mlx5_core_dev *mdev= sq->priv->mdev;
1374238423Sjhb
1375238423Sjhb	/*
1376238423Sjhb	 * Check if already stopped.
1377238423Sjhb	 *
1378238423Sjhb	 * NOTE: Serialization of this function is managed by the
1379238423Sjhb	 * caller ensuring the priv's state lock is locked or in case
1380238423Sjhb	 * of rate limit support, a single thread manages drain and
1381238423Sjhb	 * resume of SQs. The "running" variable can therefore safely
1382238423Sjhb	 * be read without any locks.
1383238423Sjhb	 */
1384238423Sjhb	if (READ_ONCE(sq->running) == 0)
1385238423Sjhb		return;
1386238423Sjhb
1387238423Sjhb	/* don't put more packets into the SQ */
1388238423Sjhb	WRITE_ONCE(sq->running, 0);
1389238423Sjhb
1390238423Sjhb	/* serialize access to DMA rings */
1391238423Sjhb	mtx_lock(&sq->lock);
1392238423Sjhb
1393238423Sjhb	/* teardown event factor timer, if any */
1394238423Sjhb	sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1395238423Sjhb	callout_stop(&sq->cev_callout);
1396238423Sjhb
1397238423Sjhb	/* send dummy NOPs in order to flush the transmit ring */
1398238423Sjhb	mlx5e_sq_send_nops_locked(sq, 1);
1399238423Sjhb	mtx_unlock(&sq->lock);
1400238423Sjhb
1401238423Sjhb	/* make sure it is safe to free the callout */
1402238423Sjhb	callout_drain(&sq->cev_callout);
1403238423Sjhb
1404238423Sjhb	/* wait till SQ is empty or link is down */
1405238423Sjhb	mtx_lock(&sq->lock);
1406238423Sjhb	while (sq->cc != sq->pc &&
1407238423Sjhb	    (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1408238423Sjhb	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1409238423Sjhb		mtx_unlock(&sq->lock);
1410238423Sjhb		msleep(1);
1411238423Sjhb		sq->cq.mcq.comp(&sq->cq.mcq);
1412238423Sjhb		mtx_lock(&sq->lock);
1413238423Sjhb	}
1414238423Sjhb	mtx_unlock(&sq->lock);
1415238423Sjhb
1416259960Sjhb	/* error out remaining requests */
1417238423Sjhb	error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1418238423Sjhb	if (error != 0) {
1419238423Sjhb		if_printf(sq->ifp,
1420238423Sjhb		    "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1421238423Sjhb	}
1422238423Sjhb
1423238423Sjhb	/* wait till SQ is empty */
1424238423Sjhb	mtx_lock(&sq->lock);
1425238423Sjhb	while (sq->cc != sq->pc &&
1426238423Sjhb	       mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1427238423Sjhb		mtx_unlock(&sq->lock);
1428238423Sjhb		msleep(1);
1429238423Sjhb		sq->cq.mcq.comp(&sq->cq.mcq);
1430238423Sjhb		mtx_lock(&sq->lock);
1431238423Sjhb	}
1432238423Sjhb	mtx_unlock(&sq->lock);
1433238423Sjhb}
1434238423Sjhb
1435238423Sjhbstatic void
1436238423Sjhbmlx5e_close_sq_wait(struct mlx5e_sq *sq)
1437238423Sjhb{
1438238423Sjhb
1439238423Sjhb	mlx5e_drain_sq(sq);
1440238423Sjhb	mlx5e_disable_sq(sq);
1441238423Sjhb	mlx5e_destroy_sq(sq);
1442238423Sjhb}
1443238423Sjhb
1444238423Sjhbstatic int
1445238423Sjhbmlx5e_create_cq(struct mlx5e_priv *priv,
1446238423Sjhb    struct mlx5e_cq_param *param,
1447238423Sjhb    struct mlx5e_cq *cq,
1448238423Sjhb    mlx5e_cq_comp_t *comp,
1449259960Sjhb    int eq_ix)
1450259960Sjhb{
1451259960Sjhb	struct mlx5_core_dev *mdev = priv->mdev;
1452259960Sjhb	struct mlx5_core_cq *mcq = &cq->mcq;
1453259960Sjhb	int eqn_not_used;
1454259960Sjhb	int irqn;
1455238423Sjhb	int err;
1456238423Sjhb	u32 i;
1457259960Sjhb
1458259960Sjhb	param->wq.buf_numa_node = 0;
1459259960Sjhb	param->wq.db_numa_node = 0;
1460259960Sjhb
1461259960Sjhb	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1462259960Sjhb	    &cq->wq_ctrl);
1463259960Sjhb	if (err)
1464259960Sjhb		return (err);
1465238423Sjhb
1466238423Sjhb	mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1467238423Sjhb
1468238423Sjhb	mcq->cqe_sz = 64;
1469238423Sjhb	mcq->set_ci_db = cq->wq_ctrl.db.db;
1470238423Sjhb	mcq->arm_db = cq->wq_ctrl.db.db + 1;
1471238423Sjhb	*mcq->set_ci_db = 0;
1472238423Sjhb	*mcq->arm_db = 0;
1473238423Sjhb	mcq->vector = eq_ix;
1474238423Sjhb	mcq->comp = comp;
1475238423Sjhb	mcq->event = mlx5e_cq_error_event;
1476238423Sjhb	mcq->irqn = irqn;
1477238423Sjhb	mcq->uar = &priv->cq_uar;
1478238423Sjhb
1479238423Sjhb	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1480238423Sjhb		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1481238423Sjhb
1482238423Sjhb		cqe->op_own = 0xf1;
1483238423Sjhb	}
1484238423Sjhb
1485238423Sjhb	cq->priv = priv;
1486238423Sjhb
1487238423Sjhb	return (0);
1488238423Sjhb}
1489238423Sjhb
1490238423Sjhbstatic void
1491238423Sjhbmlx5e_destroy_cq(struct mlx5e_cq *cq)
1492238423Sjhb{
1493238423Sjhb	mlx5_wq_destroy(&cq->wq_ctrl);
1494238423Sjhb}
1495238423Sjhb
1496238423Sjhbstatic int
1497238423Sjhbmlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1498238423Sjhb{
1499238423Sjhb	struct mlx5_core_cq *mcq = &cq->mcq;
1500238423Sjhb	void *in;
1501238423Sjhb	void *cqc;
1502238423Sjhb	int inlen;
1503238423Sjhb	int irqn_not_used;
1504238423Sjhb	int eqn;
1505238423Sjhb	int err;
1506238423Sjhb
1507259960Sjhb	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1508259960Sjhb	    sizeof(u64) * cq->wq_ctrl.buf.npages;
1509259960Sjhb	in = mlx5_vzalloc(inlen);
1510259960Sjhb	if (in == NULL)
1511259960Sjhb		return (-ENOMEM);
1512259960Sjhb
1513238423Sjhb	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1514238423Sjhb
1515238423Sjhb	memcpy(cqc, param->cqc, sizeof(param->cqc));
1516238423Sjhb
1517238423Sjhb	mlx5_fill_page_array(&cq->wq_ctrl.buf,
1518238423Sjhb	    (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1519238423Sjhb
1520238423Sjhb	mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1521238423Sjhb
1522238423Sjhb	MLX5_SET(cqc, cqc, c_eqn, eqn);
1523238423Sjhb	MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1524238423Sjhb	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1525238423Sjhb	    PAGE_SHIFT);
1526238423Sjhb	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1527238423Sjhb
1528238423Sjhb	err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
1529238423Sjhb
1530238423Sjhb	kvfree(in);
1531238423Sjhb
1532238423Sjhb	if (err)
1533238423Sjhb		return (err);
1534238423Sjhb
1535238423Sjhb	mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
1536238423Sjhb
1537238423Sjhb	return (0);
1538238423Sjhb}
1539238423Sjhb
1540238423Sjhbstatic void
1541238423Sjhbmlx5e_disable_cq(struct mlx5e_cq *cq)
1542238423Sjhb{
1543238423Sjhb
1544238423Sjhb	mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
1545238423Sjhb}
1546238423Sjhb
1547238423Sjhbint
1548238423Sjhbmlx5e_open_cq(struct mlx5e_priv *priv,
1549238423Sjhb    struct mlx5e_cq_param *param,
1550259960Sjhb    struct mlx5e_cq *cq,
1551259960Sjhb    mlx5e_cq_comp_t *comp,
1552259960Sjhb    int eq_ix)
1553238423Sjhb{
1554238423Sjhb	int err;
1555238423Sjhb
1556238423Sjhb	err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
1557238423Sjhb	if (err)
1558238423Sjhb		return (err);
1559238423Sjhb
1560238423Sjhb	err = mlx5e_enable_cq(cq, param, eq_ix);
1561238423Sjhb	if (err)
1562238423Sjhb		goto err_destroy_cq;
1563238423Sjhb
1564238423Sjhb	return (0);
1565238423Sjhb
1566238423Sjhberr_destroy_cq:
1567238423Sjhb	mlx5e_destroy_cq(cq);
1568238423Sjhb
1569238423Sjhb	return (err);
1570238423Sjhb}
1571238423Sjhb
1572238423Sjhbvoid
1573238423Sjhbmlx5e_close_cq(struct mlx5e_cq *cq)
1574238423Sjhb{
1575238423Sjhb	mlx5e_disable_cq(cq);
1576238423Sjhb	mlx5e_destroy_cq(cq);
1577238423Sjhb}
1578238423Sjhb
1579238423Sjhbstatic int
1580238423Sjhbmlx5e_open_tx_cqs(struct mlx5e_channel *c,
1581238423Sjhb    struct mlx5e_channel_param *cparam)
1582238423Sjhb{
1583238423Sjhb	int err;
1584238423Sjhb	int tc;
1585238423Sjhb
1586238423Sjhb	for (tc = 0; tc < c->num_tc; tc++) {
1587238423Sjhb		/* open completion queue */
1588238423Sjhb		err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
1589238423Sjhb		    &mlx5e_tx_cq_comp, c->ix);
1590238423Sjhb		if (err)
1591238423Sjhb			goto err_close_tx_cqs;
1592238423Sjhb	}
1593238423Sjhb	return (0);
1594238423Sjhb
1595238423Sjhberr_close_tx_cqs:
1596238423Sjhb	for (tc--; tc >= 0; tc--)
1597238423Sjhb		mlx5e_close_cq(&c->sq[tc].cq);
1598238423Sjhb
1599238423Sjhb	return (err);
1600238423Sjhb}
1601238423Sjhb
1602238423Sjhbstatic void
1603238423Sjhbmlx5e_close_tx_cqs(struct mlx5e_channel *c)
1604238423Sjhb{
1605238423Sjhb	int tc;
1606238423Sjhb
1607238423Sjhb	for (tc = 0; tc < c->num_tc; tc++)
1608238423Sjhb		mlx5e_close_cq(&c->sq[tc].cq);
1609238423Sjhb}
1610238423Sjhb
1611238423Sjhbstatic int
1612238423Sjhbmlx5e_open_sqs(struct mlx5e_channel *c,
1613238423Sjhb    struct mlx5e_channel_param *cparam)
1614238423Sjhb{
1615238423Sjhb	int err;
1616238423Sjhb	int tc;
1617238423Sjhb
1618238423Sjhb	for (tc = 0; tc < c->num_tc; tc++) {
1619238423Sjhb		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1620238423Sjhb		if (err)
1621238423Sjhb			goto err_close_sqs;
1622238423Sjhb	}
1623238423Sjhb
1624238423Sjhb	return (0);
1625238423Sjhb
1626238423Sjhberr_close_sqs:
1627238423Sjhb	for (tc--; tc >= 0; tc--)
1628238423Sjhb		mlx5e_close_sq_wait(&c->sq[tc]);
1629238423Sjhb
1630259960Sjhb	return (err);
1631259960Sjhb}
1632238423Sjhb
1633238423Sjhbstatic void
1634238423Sjhbmlx5e_close_sqs_wait(struct mlx5e_channel *c)
1635238423Sjhb{
1636238423Sjhb	int tc;
1637238423Sjhb
1638238423Sjhb	for (tc = 0; tc < c->num_tc; tc++)
1639259960Sjhb		mlx5e_close_sq_wait(&c->sq[tc]);
1640259960Sjhb}
1641259960Sjhb
1642238423Sjhbstatic void
1643238423Sjhbmlx5e_chan_mtx_init(struct mlx5e_channel *c)
1644238423Sjhb{
1645238423Sjhb	int tc;
1646238423Sjhb
1647238423Sjhb	mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1648238423Sjhb
1649238423Sjhb	callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
1650238423Sjhb
1651238423Sjhb	for (tc = 0; tc < c->num_tc; tc++) {
1652238423Sjhb		struct mlx5e_sq *sq = c->sq + tc;
1653238423Sjhb
1654238423Sjhb		mtx_init(&sq->lock, "mlx5tx",
1655238423Sjhb		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1656238423Sjhb		mtx_init(&sq->comp_lock, "mlx5comp",
1657238423Sjhb		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1658238423Sjhb
1659238423Sjhb		callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
1660238423Sjhb
1661238423Sjhb		sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
1662238423Sjhb
1663238423Sjhb		/* ensure the TX completion event factor is not zero */
1664238423Sjhb		if (sq->cev_factor == 0)
1665238423Sjhb			sq->cev_factor = 1;
1666238423Sjhb	}
1667238423Sjhb}
1668238423Sjhb
1669238423Sjhbstatic void
1670238423Sjhbmlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
1671238423Sjhb{
1672238423Sjhb	int tc;
1673238423Sjhb
1674238423Sjhb	mtx_destroy(&c->rq.mtx);
1675238423Sjhb
1676238423Sjhb	for (tc = 0; tc < c->num_tc; tc++) {
1677238423Sjhb		mtx_destroy(&c->sq[tc].lock);
1678238423Sjhb		mtx_destroy(&c->sq[tc].comp_lock);
1679238423Sjhb	}
1680238423Sjhb}
1681238423Sjhb
1682238423Sjhbstatic int
1683238423Sjhbmlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1684238423Sjhb    struct mlx5e_channel_param *cparam,
1685238423Sjhb    struct mlx5e_channel *c)
1686238423Sjhb{
1687238423Sjhb	int err;
1688238423Sjhb
1689238423Sjhb	memset(c, 0, sizeof(*c));
1690238423Sjhb
1691238423Sjhb	c->priv = priv;
1692238423Sjhb	c->ix = ix;
1693238423Sjhb	c->ifp = priv->ifp;
1694238423Sjhb	c->mkey_be = cpu_to_be32(priv->mr.key);
1695238423Sjhb	c->num_tc = priv->num_tc;
1696238423Sjhb
1697238423Sjhb	/* init mutexes */
1698238423Sjhb	mlx5e_chan_mtx_init(c);
1699238423Sjhb
1700238423Sjhb	/* open transmit completion queue */
1701238423Sjhb	err = mlx5e_open_tx_cqs(c, cparam);
1702238423Sjhb	if (err)
1703238423Sjhb		goto err_free;
1704238423Sjhb
1705238423Sjhb	/* open receive completion queue */
1706238423Sjhb	err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
1707238423Sjhb	    &mlx5e_rx_cq_comp, c->ix);
1708238423Sjhb	if (err)
1709238423Sjhb		goto err_close_tx_cqs;
1710238423Sjhb
1711238423Sjhb	err = mlx5e_open_sqs(c, cparam);
1712238423Sjhb	if (err)
1713238423Sjhb		goto err_close_rx_cq;
1714238423Sjhb
1715238423Sjhb	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1716238423Sjhb	if (err)
1717238423Sjhb		goto err_close_sqs;
1718238423Sjhb
1719238423Sjhb	/* poll receive queue initially */
1720238423Sjhb	c->rq.cq.mcq.comp(&c->rq.cq.mcq);
1721238423Sjhb
1722238423Sjhb	return (0);
1723238423Sjhb
1724238423Sjhberr_close_sqs:
1725238423Sjhb	mlx5e_close_sqs_wait(c);
1726238423Sjhb
1727238423Sjhberr_close_rx_cq:
1728238423Sjhb	mlx5e_close_cq(&c->rq.cq);
1729238423Sjhb
1730238423Sjhberr_close_tx_cqs:
1731238423Sjhb	mlx5e_close_tx_cqs(c);
1732259960Sjhb
1733259960Sjhberr_free:
1734259960Sjhb	/* destroy mutexes */
1735238423Sjhb	mlx5e_chan_mtx_destroy(c);
1736238423Sjhb	return (err);
1737238423Sjhb}
1738238423Sjhb
1739238423Sjhbstatic void
1740238423Sjhbmlx5e_close_channel(struct mlx5e_channel *c)
1741238423Sjhb{
1742238423Sjhb	mlx5e_close_rq(&c->rq);
1743238423Sjhb}
1744238423Sjhb
1745238423Sjhbstatic void
1746259960Sjhbmlx5e_close_channel_wait(struct mlx5e_channel *c)
1747259960Sjhb{
1748259960Sjhb	mlx5e_close_rq_wait(&c->rq);
1749259960Sjhb	mlx5e_close_sqs_wait(c);
1750259960Sjhb	mlx5e_close_tx_cqs(c);
1751238423Sjhb	/* destroy mutexes */
1752259960Sjhb	mlx5e_chan_mtx_destroy(c);
1753259960Sjhb}
1754259960Sjhb
1755259960Sjhbstatic int
1756259960Sjhbmlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
1757259960Sjhb{
1758259960Sjhb	u32 r, n;
1759238423Sjhb
1760238423Sjhb	r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
1761238423Sjhb	    MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
1762238423Sjhb	if (r > MJUM16BYTES)
1763238423Sjhb		return (-ENOMEM);
1764259960Sjhb
1765238423Sjhb	if (r > MJUM9BYTES)
1766238423Sjhb		r = MJUM16BYTES;
1767238423Sjhb	else if (r > MJUMPAGESIZE)
1768238423Sjhb		r = MJUM9BYTES;
1769238423Sjhb	else if (r > MCLBYTES)
1770259960Sjhb		r = MJUMPAGESIZE;
1771259960Sjhb	else
1772259960Sjhb		r = MCLBYTES;
1773259960Sjhb
1774259960Sjhb	/*
1775259960Sjhb	 * n + 1 must be a power of two, because stride size must be.
1776259960Sjhb	 * Stride size is 16 * (n + 1), as the first segment is
1777259960Sjhb	 * control.
1778259960Sjhb	 */
1779238423Sjhb	for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
1780238423Sjhb		;
1781238423Sjhb
1782238423Sjhb	*wqe_sz = r;
1783238423Sjhb	*nsegs = n;
1784238423Sjhb	return (0);
1785238423Sjhb}
1786238423Sjhb
1787238423Sjhbstatic void
1788238423Sjhbmlx5e_build_rq_param(struct mlx5e_priv *priv,
1789238423Sjhb    struct mlx5e_rq_param *param)
1790238423Sjhb{
1791238423Sjhb	void *rqc = param->rqc;
1792238423Sjhb	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1793238423Sjhb	u32 wqe_sz, nsegs;
1794238423Sjhb
1795	mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
1796	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1797	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1798	MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
1799	    nsegs * sizeof(struct mlx5_wqe_data_seg)));
1800	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1801	MLX5_SET(wq, wq, pd, priv->pdn);
1802
1803	param->wq.buf_numa_node = 0;
1804	param->wq.db_numa_node = 0;
1805	param->wq.linear = 1;
1806}
1807
1808static void
1809mlx5e_build_sq_param(struct mlx5e_priv *priv,
1810    struct mlx5e_sq_param *param)
1811{
1812	void *sqc = param->sqc;
1813	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1814
1815	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1816	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1817	MLX5_SET(wq, wq, pd, priv->pdn);
1818
1819	param->wq.buf_numa_node = 0;
1820	param->wq.db_numa_node = 0;
1821	param->wq.linear = 1;
1822}
1823
1824static void
1825mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1826    struct mlx5e_cq_param *param)
1827{
1828	void *cqc = param->cqc;
1829
1830	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1831}
1832
1833static void
1834mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr)
1835{
1836
1837	*ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE);
1838
1839	/* apply LRO restrictions */
1840	if (priv->params.hw_lro_en &&
1841	    ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) {
1842		ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO;
1843	}
1844}
1845
1846static void
1847mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1848    struct mlx5e_cq_param *param)
1849{
1850	struct net_dim_cq_moder curr;
1851	void *cqc = param->cqc;
1852
1853
1854	/*
1855	 * TODO The sysctl to control on/off is a bool value for now, which means
1856	 * we only support CSUM, once HASH is implemnted we'll need to address that.
1857	 */
1858	if (priv->params.cqe_zipping_en) {
1859		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1860		MLX5_SET(cqc, cqc, cqe_compression_en, 1);
1861	}
1862
1863	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1864
1865	switch (priv->params.rx_cq_moderation_mode) {
1866	case 0:
1867		MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1868		MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1869		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1870		break;
1871	case 1:
1872		MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1873		MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1874		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1875			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1876		else
1877			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1878		break;
1879	case 2:
1880		mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr);
1881		MLX5_SET(cqc, cqc, cq_period, curr.usec);
1882		MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
1883		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1884		break;
1885	case 3:
1886		mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr);
1887		MLX5_SET(cqc, cqc, cq_period, curr.usec);
1888		MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
1889		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1890			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1891		else
1892			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1893		break;
1894	default:
1895		break;
1896	}
1897
1898	mlx5e_dim_build_cq_param(priv, param);
1899
1900	mlx5e_build_common_cq_param(priv, param);
1901}
1902
1903static void
1904mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1905    struct mlx5e_cq_param *param)
1906{
1907	void *cqc = param->cqc;
1908
1909	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1910	MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
1911	MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
1912
1913	switch (priv->params.tx_cq_moderation_mode) {
1914	case 0:
1915		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1916		break;
1917	default:
1918		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1919			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1920		else
1921			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1922		break;
1923	}
1924
1925	mlx5e_build_common_cq_param(priv, param);
1926}
1927
1928static void
1929mlx5e_build_channel_param(struct mlx5e_priv *priv,
1930    struct mlx5e_channel_param *cparam)
1931{
1932	memset(cparam, 0, sizeof(*cparam));
1933
1934	mlx5e_build_rq_param(priv, &cparam->rq);
1935	mlx5e_build_sq_param(priv, &cparam->sq);
1936	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1937	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1938}
1939
1940static int
1941mlx5e_open_channels(struct mlx5e_priv *priv)
1942{
1943	struct mlx5e_channel_param cparam;
1944	int err;
1945	int i;
1946	int j;
1947
1948	mlx5e_build_channel_param(priv, &cparam);
1949	for (i = 0; i < priv->params.num_channels; i++) {
1950		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1951		if (err)
1952			goto err_close_channels;
1953	}
1954
1955	for (j = 0; j < priv->params.num_channels; j++) {
1956		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq);
1957		if (err)
1958			goto err_close_channels;
1959	}
1960
1961	return (0);
1962
1963err_close_channels:
1964	while (i--) {
1965		mlx5e_close_channel(&priv->channel[i]);
1966		mlx5e_close_channel_wait(&priv->channel[i]);
1967	}
1968	return (err);
1969}
1970
1971static void
1972mlx5e_close_channels(struct mlx5e_priv *priv)
1973{
1974	int i;
1975
1976	for (i = 0; i < priv->params.num_channels; i++)
1977		mlx5e_close_channel(&priv->channel[i]);
1978	for (i = 0; i < priv->params.num_channels; i++)
1979		mlx5e_close_channel_wait(&priv->channel[i]);
1980}
1981
1982static int
1983mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
1984{
1985
1986	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1987		uint8_t cq_mode;
1988
1989		switch (priv->params.tx_cq_moderation_mode) {
1990		case 0:
1991		case 2:
1992			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1993			break;
1994		default:
1995			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1996			break;
1997		}
1998
1999		return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
2000		    priv->params.tx_cq_moderation_usec,
2001		    priv->params.tx_cq_moderation_pkts,
2002		    cq_mode));
2003	}
2004
2005	return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
2006	    priv->params.tx_cq_moderation_usec,
2007	    priv->params.tx_cq_moderation_pkts));
2008}
2009
2010static int
2011mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
2012{
2013
2014	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
2015		uint8_t cq_mode;
2016		uint8_t dim_mode;
2017		int retval;
2018
2019		switch (priv->params.rx_cq_moderation_mode) {
2020		case 0:
2021		case 2:
2022			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2023			dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2024			break;
2025		default:
2026			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2027			dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
2028			break;
2029		}
2030
2031		/* tear down dynamic interrupt moderation */
2032		mtx_lock(&rq->mtx);
2033		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
2034		mtx_unlock(&rq->mtx);
2035
2036		/* wait for dynamic interrupt moderation work task, if any */
2037		cancel_work_sync(&rq->dim.work);
2038
2039		if (priv->params.rx_cq_moderation_mode >= 2) {
2040			struct net_dim_cq_moder curr;
2041
2042			mlx5e_get_default_profile(priv, dim_mode, &curr);
2043
2044			retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2045			    curr.usec, curr.pkts, cq_mode);
2046
2047			/* set dynamic interrupt moderation mode and zero defaults */
2048			mtx_lock(&rq->mtx);
2049			rq->dim.mode = dim_mode;
2050			rq->dim.state = 0;
2051			rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE;
2052			mtx_unlock(&rq->mtx);
2053		} else {
2054			retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2055			    priv->params.rx_cq_moderation_usec,
2056			    priv->params.rx_cq_moderation_pkts,
2057			    cq_mode);
2058		}
2059		return (retval);
2060	}
2061
2062	return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
2063	    priv->params.rx_cq_moderation_usec,
2064	    priv->params.rx_cq_moderation_pkts));
2065}
2066
2067static int
2068mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
2069{
2070	int err;
2071	int i;
2072
2073	err = mlx5e_refresh_rq_params(priv, &c->rq);
2074	if (err)
2075		goto done;
2076
2077	for (i = 0; i != c->num_tc; i++) {
2078		err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
2079		if (err)
2080			goto done;
2081	}
2082done:
2083	return (err);
2084}
2085
2086int
2087mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
2088{
2089	int i;
2090
2091	/* check if channels are closed */
2092	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2093		return (EINVAL);
2094
2095	for (i = 0; i < priv->params.num_channels; i++) {
2096		int err;
2097
2098		err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]);
2099		if (err)
2100			return (err);
2101	}
2102	return (0);
2103}
2104
2105static int
2106mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
2107{
2108	struct mlx5_core_dev *mdev = priv->mdev;
2109	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
2110	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2111
2112	memset(in, 0, sizeof(in));
2113
2114	MLX5_SET(tisc, tisc, prio, tc);
2115	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
2116
2117	return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
2118}
2119
2120static void
2121mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
2122{
2123	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2124}
2125
2126static int
2127mlx5e_open_tises(struct mlx5e_priv *priv)
2128{
2129	int num_tc = priv->num_tc;
2130	int err;
2131	int tc;
2132
2133	for (tc = 0; tc < num_tc; tc++) {
2134		err = mlx5e_open_tis(priv, tc);
2135		if (err)
2136			goto err_close_tises;
2137	}
2138
2139	return (0);
2140
2141err_close_tises:
2142	for (tc--; tc >= 0; tc--)
2143		mlx5e_close_tis(priv, tc);
2144
2145	return (err);
2146}
2147
2148static void
2149mlx5e_close_tises(struct mlx5e_priv *priv)
2150{
2151	int num_tc = priv->num_tc;
2152	int tc;
2153
2154	for (tc = 0; tc < num_tc; tc++)
2155		mlx5e_close_tis(priv, tc);
2156}
2157
2158static int
2159mlx5e_open_rqt(struct mlx5e_priv *priv)
2160{
2161	struct mlx5_core_dev *mdev = priv->mdev;
2162	u32 *in;
2163	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
2164	void *rqtc;
2165	int inlen;
2166	int err;
2167	int sz;
2168	int i;
2169
2170	sz = 1 << priv->params.rx_hash_log_tbl_sz;
2171
2172	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2173	in = mlx5_vzalloc(inlen);
2174	if (in == NULL)
2175		return (-ENOMEM);
2176	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2177
2178	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2179	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2180
2181	for (i = 0; i < sz; i++) {
2182		int ix = i;
2183#ifdef RSS
2184		ix = rss_get_indirection_to_bucket(ix);
2185#endif
2186		/* ensure we don't overflow */
2187		ix %= priv->params.num_channels;
2188
2189		/* apply receive side scaling stride, if any */
2190		ix -= ix % (int)priv->params.channels_rsss;
2191
2192		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn);
2193	}
2194
2195	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2196
2197	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
2198	if (!err)
2199		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2200
2201	kvfree(in);
2202
2203	return (err);
2204}
2205
2206static void
2207mlx5e_close_rqt(struct mlx5e_priv *priv)
2208{
2209	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
2210	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
2211
2212	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2213	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2214
2215	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
2216}
2217
2218static void
2219mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2220{
2221	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2222	__be32 *hkey;
2223
2224	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2225
2226#define	ROUGH_MAX_L2_L3_HDR_SZ 256
2227
2228#define	MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2229			  MLX5_HASH_FIELD_SEL_DST_IP)
2230
2231#define	MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2232			  MLX5_HASH_FIELD_SEL_DST_IP   |\
2233			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2234			  MLX5_HASH_FIELD_SEL_L4_DPORT)
2235
2236#define	MLX5_HASH_IP_IPSEC_SPI	(MLX5_HASH_FIELD_SEL_SRC_IP   |\
2237				 MLX5_HASH_FIELD_SEL_DST_IP   |\
2238				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2239
2240	if (priv->params.hw_lro_en) {
2241		MLX5_SET(tirc, tirc, lro_enable_mask,
2242		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2243		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2244		MLX5_SET(tirc, tirc, lro_max_msg_sz,
2245		    (priv->params.lro_wqe_sz -
2246		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2247		/* TODO: add the option to choose timer value dynamically */
2248		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2249		    MLX5_CAP_ETH(priv->mdev,
2250		    lro_timer_supported_periods[2]));
2251	}
2252
2253	/* setup parameters for hashing TIR type, if any */
2254	switch (tt) {
2255	case MLX5E_TT_ANY:
2256		MLX5_SET(tirc, tirc, disp_type,
2257		    MLX5_TIRC_DISP_TYPE_DIRECT);
2258		MLX5_SET(tirc, tirc, inline_rqn,
2259		    priv->channel[0].rq.rqn);
2260		break;
2261	default:
2262		MLX5_SET(tirc, tirc, disp_type,
2263		    MLX5_TIRC_DISP_TYPE_INDIRECT);
2264		MLX5_SET(tirc, tirc, indirect_table,
2265		    priv->rqtn);
2266		MLX5_SET(tirc, tirc, rx_hash_fn,
2267		    MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2268		hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2269#ifdef RSS
2270		/*
2271		 * The FreeBSD RSS implementation does currently not
2272		 * support symmetric Toeplitz hashes:
2273		 */
2274		MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2275		rss_getkey((uint8_t *)hkey);
2276#else
2277		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2278		hkey[0] = cpu_to_be32(0xD181C62C);
2279		hkey[1] = cpu_to_be32(0xF7F4DB5B);
2280		hkey[2] = cpu_to_be32(0x1983A2FC);
2281		hkey[3] = cpu_to_be32(0x943E1ADB);
2282		hkey[4] = cpu_to_be32(0xD9389E6B);
2283		hkey[5] = cpu_to_be32(0xD1039C2C);
2284		hkey[6] = cpu_to_be32(0xA74499AD);
2285		hkey[7] = cpu_to_be32(0x593D56D9);
2286		hkey[8] = cpu_to_be32(0xF3253C06);
2287		hkey[9] = cpu_to_be32(0x2ADC1FFC);
2288#endif
2289		break;
2290	}
2291
2292	switch (tt) {
2293	case MLX5E_TT_IPV4_TCP:
2294		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2295		    MLX5_L3_PROT_TYPE_IPV4);
2296		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2297		    MLX5_L4_PROT_TYPE_TCP);
2298#ifdef RSS
2299		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2300			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2301			    MLX5_HASH_IP);
2302		} else
2303#endif
2304		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2305		    MLX5_HASH_ALL);
2306		break;
2307
2308	case MLX5E_TT_IPV6_TCP:
2309		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2310		    MLX5_L3_PROT_TYPE_IPV6);
2311		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2312		    MLX5_L4_PROT_TYPE_TCP);
2313#ifdef RSS
2314		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2315			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2316			    MLX5_HASH_IP);
2317		} else
2318#endif
2319		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2320		    MLX5_HASH_ALL);
2321		break;
2322
2323	case MLX5E_TT_IPV4_UDP:
2324		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2325		    MLX5_L3_PROT_TYPE_IPV4);
2326		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2327		    MLX5_L4_PROT_TYPE_UDP);
2328#ifdef RSS
2329		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2330			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2331			    MLX5_HASH_IP);
2332		} else
2333#endif
2334		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2335		    MLX5_HASH_ALL);
2336		break;
2337
2338	case MLX5E_TT_IPV6_UDP:
2339		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2340		    MLX5_L3_PROT_TYPE_IPV6);
2341		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2342		    MLX5_L4_PROT_TYPE_UDP);
2343#ifdef RSS
2344		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2345			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2346			    MLX5_HASH_IP);
2347		} else
2348#endif
2349		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2350		    MLX5_HASH_ALL);
2351		break;
2352
2353	case MLX5E_TT_IPV4_IPSEC_AH:
2354		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2355		    MLX5_L3_PROT_TYPE_IPV4);
2356		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2357		    MLX5_HASH_IP_IPSEC_SPI);
2358		break;
2359
2360	case MLX5E_TT_IPV6_IPSEC_AH:
2361		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2362		    MLX5_L3_PROT_TYPE_IPV6);
2363		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2364		    MLX5_HASH_IP_IPSEC_SPI);
2365		break;
2366
2367	case MLX5E_TT_IPV4_IPSEC_ESP:
2368		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2369		    MLX5_L3_PROT_TYPE_IPV4);
2370		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2371		    MLX5_HASH_IP_IPSEC_SPI);
2372		break;
2373
2374	case MLX5E_TT_IPV6_IPSEC_ESP:
2375		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2376		    MLX5_L3_PROT_TYPE_IPV6);
2377		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2378		    MLX5_HASH_IP_IPSEC_SPI);
2379		break;
2380
2381	case MLX5E_TT_IPV4:
2382		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2383		    MLX5_L3_PROT_TYPE_IPV4);
2384		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2385		    MLX5_HASH_IP);
2386		break;
2387
2388	case MLX5E_TT_IPV6:
2389		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2390		    MLX5_L3_PROT_TYPE_IPV6);
2391		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2392		    MLX5_HASH_IP);
2393		break;
2394
2395	default:
2396		break;
2397	}
2398}
2399
2400static int
2401mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2402{
2403	struct mlx5_core_dev *mdev = priv->mdev;
2404	u32 *in;
2405	void *tirc;
2406	int inlen;
2407	int err;
2408
2409	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2410	in = mlx5_vzalloc(inlen);
2411	if (in == NULL)
2412		return (-ENOMEM);
2413	tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2414
2415	mlx5e_build_tir_ctx(priv, tirc, tt);
2416
2417	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2418
2419	kvfree(in);
2420
2421	return (err);
2422}
2423
2424static void
2425mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2426{
2427	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2428}
2429
2430static int
2431mlx5e_open_tirs(struct mlx5e_priv *priv)
2432{
2433	int err;
2434	int i;
2435
2436	for (i = 0; i < MLX5E_NUM_TT; i++) {
2437		err = mlx5e_open_tir(priv, i);
2438		if (err)
2439			goto err_close_tirs;
2440	}
2441
2442	return (0);
2443
2444err_close_tirs:
2445	for (i--; i >= 0; i--)
2446		mlx5e_close_tir(priv, i);
2447
2448	return (err);
2449}
2450
2451static void
2452mlx5e_close_tirs(struct mlx5e_priv *priv)
2453{
2454	int i;
2455
2456	for (i = 0; i < MLX5E_NUM_TT; i++)
2457		mlx5e_close_tir(priv, i);
2458}
2459
2460/*
2461 * SW MTU does not include headers,
2462 * HW MTU includes all headers and checksums.
2463 */
2464static int
2465mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2466{
2467	struct mlx5e_priv *priv = ifp->if_softc;
2468	struct mlx5_core_dev *mdev = priv->mdev;
2469	int hw_mtu;
2470	int err;
2471
2472	hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2473
2474	err = mlx5_set_port_mtu(mdev, hw_mtu);
2475	if (err) {
2476		if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
2477		    __func__, sw_mtu, err);
2478		return (err);
2479	}
2480
2481	/* Update vport context MTU */
2482	err = mlx5_set_vport_mtu(mdev, hw_mtu);
2483	if (err) {
2484		if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n",
2485		    __func__, err);
2486	}
2487
2488	ifp->if_mtu = sw_mtu;
2489
2490	err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2491	if (err || !hw_mtu) {
2492		/* fallback to port oper mtu */
2493		err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2494	}
2495	if (err) {
2496		if_printf(ifp, "Query port MTU, after setting new "
2497		    "MTU value, failed\n");
2498		return (err);
2499	} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2500		err = -E2BIG,
2501		if_printf(ifp, "Port MTU %d is smaller than "
2502                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2503	} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
2504		err = -EINVAL;
2505                if_printf(ifp, "Port MTU %d is bigger than "
2506                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2507	}
2508	priv->params_ethtool.hw_mtu = hw_mtu;
2509
2510	return (err);
2511}
2512
2513int
2514mlx5e_open_locked(struct ifnet *ifp)
2515{
2516	struct mlx5e_priv *priv = ifp->if_softc;
2517	int err;
2518	u16 set_id;
2519
2520	/* check if already opened */
2521	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2522		return (0);
2523
2524#ifdef RSS
2525	if (rss_getnumbuckets() > priv->params.num_channels) {
2526		if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
2527		    "channels(%u) available\n", rss_getnumbuckets(),
2528		    priv->params.num_channels);
2529	}
2530#endif
2531	err = mlx5e_open_tises(priv);
2532	if (err) {
2533		if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
2534		    __func__, err);
2535		return (err);
2536	}
2537	err = mlx5_vport_alloc_q_counter(priv->mdev,
2538	    MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
2539	if (err) {
2540		if_printf(priv->ifp,
2541		    "%s: mlx5_vport_alloc_q_counter failed: %d\n",
2542		    __func__, err);
2543		goto err_close_tises;
2544	}
2545	/* store counter set ID */
2546	priv->counter_set_id = set_id;
2547
2548	err = mlx5e_open_channels(priv);
2549	if (err) {
2550		if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
2551		    __func__, err);
2552		goto err_dalloc_q_counter;
2553	}
2554	err = mlx5e_open_rqt(priv);
2555	if (err) {
2556		if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
2557		    __func__, err);
2558		goto err_close_channels;
2559	}
2560	err = mlx5e_open_tirs(priv);
2561	if (err) {
2562		if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
2563		    __func__, err);
2564		goto err_close_rqls;
2565	}
2566	err = mlx5e_open_flow_table(priv);
2567	if (err) {
2568		if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
2569		    __func__, err);
2570		goto err_close_tirs;
2571	}
2572	err = mlx5e_add_all_vlan_rules(priv);
2573	if (err) {
2574		if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
2575		    __func__, err);
2576		goto err_close_flow_table;
2577	}
2578	set_bit(MLX5E_STATE_OPENED, &priv->state);
2579
2580	mlx5e_update_carrier(priv);
2581	mlx5e_set_rx_mode_core(priv);
2582
2583	return (0);
2584
2585err_close_flow_table:
2586	mlx5e_close_flow_table(priv);
2587
2588err_close_tirs:
2589	mlx5e_close_tirs(priv);
2590
2591err_close_rqls:
2592	mlx5e_close_rqt(priv);
2593
2594err_close_channels:
2595	mlx5e_close_channels(priv);
2596
2597err_dalloc_q_counter:
2598	mlx5_vport_dealloc_q_counter(priv->mdev,
2599	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2600
2601err_close_tises:
2602	mlx5e_close_tises(priv);
2603
2604	return (err);
2605}
2606
2607static void
2608mlx5e_open(void *arg)
2609{
2610	struct mlx5e_priv *priv = arg;
2611
2612	PRIV_LOCK(priv);
2613	if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2614		if_printf(priv->ifp,
2615		    "%s: Setting port status to up failed\n",
2616		    __func__);
2617
2618	mlx5e_open_locked(priv->ifp);
2619	priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2620	PRIV_UNLOCK(priv);
2621}
2622
2623int
2624mlx5e_close_locked(struct ifnet *ifp)
2625{
2626	struct mlx5e_priv *priv = ifp->if_softc;
2627
2628	/* check if already closed */
2629	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2630		return (0);
2631
2632	clear_bit(MLX5E_STATE_OPENED, &priv->state);
2633
2634	mlx5e_set_rx_mode_core(priv);
2635	mlx5e_del_all_vlan_rules(priv);
2636	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2637	mlx5e_close_flow_table(priv);
2638	mlx5e_close_tirs(priv);
2639	mlx5e_close_rqt(priv);
2640	mlx5e_close_channels(priv);
2641	mlx5_vport_dealloc_q_counter(priv->mdev,
2642	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2643	mlx5e_close_tises(priv);
2644
2645	return (0);
2646}
2647
2648#if (__FreeBSD_version >= 1100000)
2649static uint64_t
2650mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
2651{
2652	struct mlx5e_priv *priv = ifp->if_softc;
2653	u64 retval;
2654
2655	/* PRIV_LOCK(priv); XXX not allowed */
2656	switch (cnt) {
2657	case IFCOUNTER_IPACKETS:
2658		retval = priv->stats.vport.rx_packets;
2659		break;
2660	case IFCOUNTER_IERRORS:
2661		retval = priv->stats.vport.rx_error_packets +
2662		    priv->stats.pport.alignment_err +
2663		    priv->stats.pport.check_seq_err +
2664		    priv->stats.pport.crc_align_errors +
2665		    priv->stats.pport.in_range_len_errors +
2666		    priv->stats.pport.jabbers +
2667		    priv->stats.pport.out_of_range_len +
2668		    priv->stats.pport.oversize_pkts +
2669		    priv->stats.pport.symbol_err +
2670		    priv->stats.pport.too_long_errors +
2671		    priv->stats.pport.undersize_pkts +
2672		    priv->stats.pport.unsupported_op_rx;
2673		break;
2674	case IFCOUNTER_IQDROPS:
2675		retval = priv->stats.vport.rx_out_of_buffer +
2676		    priv->stats.pport.drop_events;
2677		break;
2678	case IFCOUNTER_OPACKETS:
2679		retval = priv->stats.vport.tx_packets;
2680		break;
2681	case IFCOUNTER_OERRORS:
2682		retval = priv->stats.vport.tx_error_packets;
2683		break;
2684	case IFCOUNTER_IBYTES:
2685		retval = priv->stats.vport.rx_bytes;
2686		break;
2687	case IFCOUNTER_OBYTES:
2688		retval = priv->stats.vport.tx_bytes;
2689		break;
2690	case IFCOUNTER_IMCASTS:
2691		retval = priv->stats.vport.rx_multicast_packets;
2692		break;
2693	case IFCOUNTER_OMCASTS:
2694		retval = priv->stats.vport.tx_multicast_packets;
2695		break;
2696	case IFCOUNTER_OQDROPS:
2697		retval = priv->stats.vport.tx_queue_dropped;
2698		break;
2699	case IFCOUNTER_COLLISIONS:
2700		retval = priv->stats.pport.collisions;
2701		break;
2702	default:
2703		retval = if_get_counter_default(ifp, cnt);
2704		break;
2705	}
2706	/* PRIV_UNLOCK(priv); XXX not allowed */
2707	return (retval);
2708}
2709#endif
2710
2711static void
2712mlx5e_set_rx_mode(struct ifnet *ifp)
2713{
2714	struct mlx5e_priv *priv = ifp->if_softc;
2715
2716	queue_work(priv->wq, &priv->set_rx_mode_work);
2717}
2718
2719static int
2720mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2721{
2722	struct mlx5e_priv *priv;
2723	struct ifreq *ifr;
2724	struct ifi2creq i2c;
2725	int error = 0;
2726	int mask = 0;
2727	int size_read = 0;
2728	int module_status;
2729	int module_num;
2730	int max_mtu;
2731	uint8_t read_addr;
2732
2733	priv = ifp->if_softc;
2734
2735	/* check if detaching */
2736	if (priv == NULL || priv->gone != 0)
2737		return (ENXIO);
2738
2739	switch (command) {
2740	case SIOCSIFMTU:
2741		ifr = (struct ifreq *)data;
2742
2743		PRIV_LOCK(priv);
2744		mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
2745
2746		if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
2747		    ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
2748			int was_opened;
2749
2750			was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2751			if (was_opened)
2752				mlx5e_close_locked(ifp);
2753
2754			/* set new MTU */
2755			mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
2756
2757			if (was_opened)
2758				mlx5e_open_locked(ifp);
2759		} else {
2760			error = EINVAL;
2761			if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
2762			    MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
2763		}
2764		PRIV_UNLOCK(priv);
2765		break;
2766	case SIOCSIFFLAGS:
2767		if ((ifp->if_flags & IFF_UP) &&
2768		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2769			mlx5e_set_rx_mode(ifp);
2770			break;
2771		}
2772		PRIV_LOCK(priv);
2773		if (ifp->if_flags & IFF_UP) {
2774			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2775				if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2776					mlx5e_open_locked(ifp);
2777				ifp->if_drv_flags |= IFF_DRV_RUNNING;
2778				mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
2779			}
2780		} else {
2781			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2782				mlx5_set_port_status(priv->mdev,
2783				    MLX5_PORT_DOWN);
2784				if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2785					mlx5e_close_locked(ifp);
2786				mlx5e_update_carrier(priv);
2787				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2788			}
2789		}
2790		PRIV_UNLOCK(priv);
2791		break;
2792	case SIOCADDMULTI:
2793	case SIOCDELMULTI:
2794		mlx5e_set_rx_mode(ifp);
2795		break;
2796	case SIOCSIFMEDIA:
2797	case SIOCGIFMEDIA:
2798	case SIOCGIFXMEDIA:
2799		ifr = (struct ifreq *)data;
2800		error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
2801		break;
2802	case SIOCSIFCAP:
2803		ifr = (struct ifreq *)data;
2804		PRIV_LOCK(priv);
2805		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2806
2807		if (mask & IFCAP_TXCSUM) {
2808			ifp->if_capenable ^= IFCAP_TXCSUM;
2809			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2810
2811			if (IFCAP_TSO4 & ifp->if_capenable &&
2812			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2813				ifp->if_capenable &= ~IFCAP_TSO4;
2814				ifp->if_hwassist &= ~CSUM_IP_TSO;
2815				if_printf(ifp,
2816				    "tso4 disabled due to -txcsum.\n");
2817			}
2818		}
2819		if (mask & IFCAP_TXCSUM_IPV6) {
2820			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2821			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2822
2823			if (IFCAP_TSO6 & ifp->if_capenable &&
2824			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2825				ifp->if_capenable &= ~IFCAP_TSO6;
2826				ifp->if_hwassist &= ~CSUM_IP6_TSO;
2827				if_printf(ifp,
2828				    "tso6 disabled due to -txcsum6.\n");
2829			}
2830		}
2831		if (mask & IFCAP_RXCSUM)
2832			ifp->if_capenable ^= IFCAP_RXCSUM;
2833		if (mask & IFCAP_RXCSUM_IPV6)
2834			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2835		if (mask & IFCAP_TSO4) {
2836			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2837			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2838				if_printf(ifp, "enable txcsum first.\n");
2839				error = EAGAIN;
2840				goto out;
2841			}
2842			ifp->if_capenable ^= IFCAP_TSO4;
2843			ifp->if_hwassist ^= CSUM_IP_TSO;
2844		}
2845		if (mask & IFCAP_TSO6) {
2846			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2847			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2848				if_printf(ifp, "enable txcsum6 first.\n");
2849				error = EAGAIN;
2850				goto out;
2851			}
2852			ifp->if_capenable ^= IFCAP_TSO6;
2853			ifp->if_hwassist ^= CSUM_IP6_TSO;
2854		}
2855		if (mask & IFCAP_VLAN_HWFILTER) {
2856			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2857				mlx5e_disable_vlan_filter(priv);
2858			else
2859				mlx5e_enable_vlan_filter(priv);
2860
2861			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2862		}
2863		if (mask & IFCAP_VLAN_HWTAGGING)
2864			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2865		if (mask & IFCAP_WOL_MAGIC)
2866			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2867
2868		VLAN_CAPABILITIES(ifp);
2869		/* turn off LRO means also turn of HW LRO - if it's on */
2870		if (mask & IFCAP_LRO) {
2871			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2872			bool need_restart = false;
2873
2874			ifp->if_capenable ^= IFCAP_LRO;
2875
2876			/* figure out if updating HW LRO is needed */
2877			if (!(ifp->if_capenable & IFCAP_LRO)) {
2878				if (priv->params.hw_lro_en) {
2879					priv->params.hw_lro_en = false;
2880					need_restart = true;
2881				}
2882			} else {
2883				if (priv->params.hw_lro_en == false &&
2884				    priv->params_ethtool.hw_lro != 0) {
2885					priv->params.hw_lro_en = true;
2886					need_restart = true;
2887				}
2888			}
2889			if (was_opened && need_restart) {
2890				mlx5e_close_locked(ifp);
2891				mlx5e_open_locked(ifp);
2892			}
2893		}
2894out:
2895		PRIV_UNLOCK(priv);
2896		break;
2897
2898	case SIOCGI2C:
2899		ifr = (struct ifreq *)data;
2900
2901		/*
2902		 * Copy from the user-space address ifr_data to the
2903		 * kernel-space address i2c
2904		 */
2905		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2906		if (error)
2907			break;
2908
2909		if (i2c.len > sizeof(i2c.data)) {
2910			error = EINVAL;
2911			break;
2912		}
2913
2914		PRIV_LOCK(priv);
2915		/* Get module_num which is required for the query_eeprom */
2916		error = mlx5_query_module_num(priv->mdev, &module_num);
2917		if (error) {
2918			if_printf(ifp, "Query module num failed, eeprom "
2919			    "reading is not supported\n");
2920			error = EINVAL;
2921			goto err_i2c;
2922		}
2923		/* Check if module is present before doing an access */
2924		module_status = mlx5_query_module_status(priv->mdev, module_num);
2925		if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED &&
2926		    module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) {
2927			error = EINVAL;
2928			goto err_i2c;
2929		}
2930		/*
2931		 * Currently 0XA0 and 0xA2 are the only addresses permitted.
2932		 * The internal conversion is as follows:
2933		 */
2934		if (i2c.dev_addr == 0xA0)
2935			read_addr = MLX5E_I2C_ADDR_LOW;
2936		else if (i2c.dev_addr == 0xA2)
2937			read_addr = MLX5E_I2C_ADDR_HIGH;
2938		else {
2939			if_printf(ifp, "Query eeprom failed, "
2940			    "Invalid Address: %X\n", i2c.dev_addr);
2941			error = EINVAL;
2942			goto err_i2c;
2943		}
2944		error = mlx5_query_eeprom(priv->mdev,
2945		    read_addr, MLX5E_EEPROM_LOW_PAGE,
2946		    (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
2947		    (uint32_t *)i2c.data, &size_read);
2948		if (error) {
2949			if_printf(ifp, "Query eeprom failed, eeprom "
2950			    "reading is not supported\n");
2951			error = EINVAL;
2952			goto err_i2c;
2953		}
2954
2955		if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
2956			error = mlx5_query_eeprom(priv->mdev,
2957			    read_addr, MLX5E_EEPROM_LOW_PAGE,
2958			    (uint32_t)(i2c.offset + size_read),
2959			    (uint32_t)(i2c.len - size_read), module_num,
2960			    (uint32_t *)(i2c.data + size_read), &size_read);
2961		}
2962		if (error) {
2963			if_printf(ifp, "Query eeprom failed, eeprom "
2964			    "reading is not supported\n");
2965			error = EINVAL;
2966			goto err_i2c;
2967		}
2968
2969		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2970err_i2c:
2971		PRIV_UNLOCK(priv);
2972		break;
2973
2974	default:
2975		error = ether_ioctl(ifp, command, data);
2976		break;
2977	}
2978	return (error);
2979}
2980
2981static int
2982mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2983{
2984	/*
2985	 * TODO: uncoment once FW really sets all these bits if
2986	 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
2987	 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
2988	 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
2989	 * -ENOTSUPP;
2990	 */
2991
2992	/* TODO: add more must-to-have features */
2993
2994	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2995		return (-ENODEV);
2996
2997	return (0);
2998}
2999
3000static u16
3001mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3002{
3003	uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U;
3004
3005	bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2;
3006
3007	/* verify against driver hardware limit */
3008	if (bf_buf_size > MLX5E_MAX_TX_INLINE)
3009		bf_buf_size = MLX5E_MAX_TX_INLINE;
3010
3011	return (bf_buf_size);
3012}
3013
3014static int
3015mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
3016    struct mlx5e_priv *priv,
3017    int num_comp_vectors)
3018{
3019	int err;
3020
3021	/*
3022	 * TODO: Consider link speed for setting "log_sq_size",
3023	 * "log_rq_size" and "cq_moderation_xxx":
3024	 */
3025	priv->params.log_sq_size =
3026	    MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3027	priv->params.log_rq_size =
3028	    MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
3029	priv->params.rx_cq_moderation_usec =
3030	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3031	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
3032	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3033	priv->params.rx_cq_moderation_mode =
3034	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
3035	priv->params.rx_cq_moderation_pkts =
3036	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3037	priv->params.tx_cq_moderation_usec =
3038	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3039	priv->params.tx_cq_moderation_pkts =
3040	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3041	priv->params.min_rx_wqes =
3042	    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
3043	priv->params.rx_hash_log_tbl_sz =
3044	    (order_base_2(num_comp_vectors) >
3045	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
3046	    order_base_2(num_comp_vectors) :
3047	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
3048	priv->params.num_tc = 1;
3049	priv->params.default_vlan_prio = 0;
3050	priv->counter_set_id = -1;
3051	priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3052
3053	err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
3054	if (err)
3055		return (err);
3056
3057	/*
3058	 * hw lro is currently defaulted to off. when it won't anymore we
3059	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
3060	 */
3061	priv->params.hw_lro_en = false;
3062	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
3063
3064	/*
3065	 * CQE zipping is currently defaulted to off. when it won't
3066	 * anymore we will consider the HW capability:
3067	 * "!!MLX5_CAP_GEN(mdev, cqe_compression)"
3068	 */
3069	priv->params.cqe_zipping_en = false;
3070
3071	priv->mdev = mdev;
3072	priv->params.num_channels = num_comp_vectors;
3073	priv->params.channels_rsss = 1;
3074	priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
3075	priv->queue_mapping_channel_mask =
3076	    roundup_pow_of_two(num_comp_vectors) - 1;
3077	priv->num_tc = priv->params.num_tc;
3078	priv->default_vlan_prio = priv->params.default_vlan_prio;
3079
3080	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3081	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3082	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3083
3084	return (0);
3085}
3086
3087static int
3088mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
3089		  struct mlx5_core_mr *mkey)
3090{
3091	struct ifnet *ifp = priv->ifp;
3092	struct mlx5_core_dev *mdev = priv->mdev;
3093	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3094	void *mkc;
3095	u32 *in;
3096	int err;
3097
3098	in = mlx5_vzalloc(inlen);
3099	if (in == NULL) {
3100		if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
3101		return (-ENOMEM);
3102	}
3103
3104	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3105	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
3106	MLX5_SET(mkc, mkc, lw, 1);
3107	MLX5_SET(mkc, mkc, lr, 1);
3108
3109	MLX5_SET(mkc, mkc, pd, pdn);
3110	MLX5_SET(mkc, mkc, length64, 1);
3111	MLX5_SET(mkc, mkc, qpn, 0xffffff);
3112
3113	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
3114	if (err)
3115		if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
3116		    __func__, err);
3117
3118	kvfree(in);
3119	return (err);
3120}
3121
3122static const char *mlx5e_vport_stats_desc[] = {
3123	MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
3124};
3125
3126static const char *mlx5e_pport_stats_desc[] = {
3127	MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
3128};
3129
3130static void
3131mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
3132{
3133	mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
3134	sx_init(&priv->state_lock, "mlx5state");
3135	callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
3136	MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
3137}
3138
3139static void
3140mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
3141{
3142	mtx_destroy(&priv->async_events_mtx);
3143	sx_destroy(&priv->state_lock);
3144}
3145
3146static int
3147sysctl_firmware(SYSCTL_HANDLER_ARGS)
3148{
3149	/*
3150	 * %d.%d%.d the string format.
3151	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
3152	 * We need at most 5 chars to store that.
3153	 * It also has: two "." and NULL at the end, which means we need 18
3154	 * (5*3 + 3) chars at most.
3155	 */
3156	char fw[18];
3157	struct mlx5e_priv *priv = arg1;
3158	int error;
3159
3160	snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3161	    fw_rev_sub(priv->mdev));
3162	error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3163	return (error);
3164}
3165
3166static void
3167mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
3168{
3169	int i;
3170
3171	for (i = 0; i < ch->num_tc; i++)
3172		mlx5e_drain_sq(&ch->sq[i]);
3173}
3174
3175static void
3176mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
3177{
3178
3179	sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
3180	sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
3181	mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
3182	sq->doorbell.d64 = 0;
3183}
3184
3185void
3186mlx5e_resume_sq(struct mlx5e_sq *sq)
3187{
3188	int err;
3189
3190	/* check if already enabled */
3191	if (READ_ONCE(sq->running) != 0)
3192		return;
3193
3194	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
3195	    MLX5_SQC_STATE_RST);
3196	if (err != 0) {
3197		if_printf(sq->ifp,
3198		    "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
3199	}
3200
3201	sq->cc = 0;
3202	sq->pc = 0;
3203
3204	/* reset doorbell prior to moving from RST to RDY */
3205	mlx5e_reset_sq_doorbell_record(sq);
3206
3207	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
3208	    MLX5_SQC_STATE_RDY);
3209	if (err != 0) {
3210		if_printf(sq->ifp,
3211		    "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
3212	}
3213
3214	sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
3215	WRITE_ONCE(sq->running, 1);
3216}
3217
3218static void
3219mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
3220{
3221        int i;
3222
3223	for (i = 0; i < ch->num_tc; i++)
3224		mlx5e_resume_sq(&ch->sq[i]);
3225}
3226
3227static void
3228mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
3229{
3230	struct mlx5e_rq *rq = &ch->rq;
3231	int err;
3232
3233	mtx_lock(&rq->mtx);
3234	rq->enabled = 0;
3235	callout_stop(&rq->watchdog);
3236	mtx_unlock(&rq->mtx);
3237
3238	callout_drain(&rq->watchdog);
3239
3240	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
3241	if (err != 0) {
3242		if_printf(rq->ifp,
3243		    "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
3244	}
3245
3246	while (!mlx5_wq_ll_is_empty(&rq->wq)) {
3247		msleep(1);
3248		rq->cq.mcq.comp(&rq->cq.mcq);
3249	}
3250
3251	/*
3252	 * Transitioning into RST state will allow the FW to track less ERR state queues,
3253	 * thus reducing the recv queue flushing time
3254	 */
3255	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
3256	if (err != 0) {
3257		if_printf(rq->ifp,
3258		    "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
3259	}
3260}
3261
3262static void
3263mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
3264{
3265	struct mlx5e_rq *rq = &ch->rq;
3266	int err;
3267
3268	rq->wq.wqe_ctr = 0;
3269	mlx5_wq_ll_update_db_record(&rq->wq);
3270	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3271	if (err != 0) {
3272		if_printf(rq->ifp,
3273		    "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
3274        }
3275
3276	rq->enabled = 1;
3277
3278	rq->cq.mcq.comp(&rq->cq.mcq);
3279}
3280
3281void
3282mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
3283{
3284	int i;
3285
3286	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3287		return;
3288
3289	for (i = 0; i < priv->params.num_channels; i++) {
3290		if (value)
3291			mlx5e_disable_tx_dma(&priv->channel[i]);
3292		else
3293			mlx5e_enable_tx_dma(&priv->channel[i]);
3294	}
3295}
3296
3297void
3298mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
3299{
3300	int i;
3301
3302	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3303		return;
3304
3305	for (i = 0; i < priv->params.num_channels; i++) {
3306		if (value)
3307			mlx5e_disable_rx_dma(&priv->channel[i]);
3308		else
3309			mlx5e_enable_rx_dma(&priv->channel[i]);
3310	}
3311}
3312
3313static void
3314mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3315{
3316	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3317	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3318	    sysctl_firmware, "A", "HCA firmware version");
3319
3320	SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3321	    OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3322	    "Board ID");
3323}
3324
3325static int
3326mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3327{
3328	struct mlx5e_priv *priv = arg1;
3329	uint8_t temp[MLX5E_MAX_PRIORITY];
3330	uint32_t tx_pfc;
3331	int err;
3332	int i;
3333
3334	PRIV_LOCK(priv);
3335
3336	tx_pfc = priv->params.tx_priority_flow_control;
3337
3338	for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
3339		temp[i] = (tx_pfc >> i) & 1;
3340
3341	err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
3342	if (err || !req->newptr)
3343		goto done;
3344	err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
3345	if (err)
3346		goto done;
3347
3348	priv->params.tx_priority_flow_control = 0;
3349
3350	/* range check input value */
3351	for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
3352		if (temp[i] > 1) {
3353			err = ERANGE;
3354			goto done;
3355		}
3356		priv->params.tx_priority_flow_control |= (temp[i] << i);
3357	}
3358
3359	/* check if update is required */
3360	if (tx_pfc != priv->params.tx_priority_flow_control)
3361		err = -mlx5e_set_port_pfc(priv);
3362done:
3363	if (err != 0)
3364		priv->params.tx_priority_flow_control= tx_pfc;
3365	PRIV_UNLOCK(priv);
3366
3367	return (err);
3368}
3369
3370static int
3371mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3372{
3373	struct mlx5e_priv *priv = arg1;
3374	uint8_t temp[MLX5E_MAX_PRIORITY];
3375	uint32_t rx_pfc;
3376	int err;
3377	int i;
3378
3379	PRIV_LOCK(priv);
3380
3381	rx_pfc = priv->params.rx_priority_flow_control;
3382
3383	for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
3384		temp[i] = (rx_pfc >> i) & 1;
3385
3386	err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
3387	if (err || !req->newptr)
3388		goto done;
3389	err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
3390	if (err)
3391		goto done;
3392
3393	priv->params.rx_priority_flow_control = 0;
3394
3395	/* range check input value */
3396	for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
3397		if (temp[i] > 1) {
3398			err = ERANGE;
3399			goto done;
3400		}
3401		priv->params.rx_priority_flow_control |= (temp[i] << i);
3402	}
3403
3404	/* check if update is required */
3405	if (rx_pfc != priv->params.rx_priority_flow_control)
3406		err = -mlx5e_set_port_pfc(priv);
3407done:
3408	if (err != 0)
3409		priv->params.rx_priority_flow_control= rx_pfc;
3410	PRIV_UNLOCK(priv);
3411
3412	return (err);
3413}
3414
3415static void
3416mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3417{
3418#if (__FreeBSD_version < 1100000)
3419	char path[96];
3420#endif
3421	int error;
3422
3423	/* enable pauseframes by default */
3424	priv->params.tx_pauseframe_control = 1;
3425	priv->params.rx_pauseframe_control = 1;
3426
3427	/* disable ports flow control, PFC, by default */
3428	priv->params.tx_priority_flow_control = 0;
3429	priv->params.rx_priority_flow_control = 0;
3430
3431#if (__FreeBSD_version < 1100000)
3432	/* compute path for sysctl */
3433	snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3434	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3435
3436	/* try to fetch tunable, if any */
3437	TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3438
3439	/* compute path for sysctl */
3440	snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3441	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3442
3443	/* try to fetch tunable, if any */
3444	TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3445#endif
3446
3447	/* register pauseframe SYSCTLs */
3448	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3449	    OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3450	    &priv->params.tx_pauseframe_control, 0,
3451	    "Set to enable TX pause frames. Clear to disable.");
3452
3453	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3454	    OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3455	    &priv->params.rx_pauseframe_control, 0,
3456	    "Set to enable RX pause frames. Clear to disable.");
3457
3458	/* register priority flow control, PFC, SYSCTLs */
3459	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3460	    OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
3461	    CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU",
3462	    "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable.");
3463
3464	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3465	    OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
3466	    CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU",
3467	    "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable.");
3468
3469	PRIV_LOCK(priv);
3470
3471	/* range check */
3472	priv->params.tx_pauseframe_control =
3473	    priv->params.tx_pauseframe_control ? 1 : 0;
3474	priv->params.rx_pauseframe_control =
3475	    priv->params.rx_pauseframe_control ? 1 : 0;
3476
3477	/* update firmware */
3478	error = mlx5e_set_port_pause_and_pfc(priv);
3479	if (error == -EINVAL) {
3480		if_printf(priv->ifp,
3481		    "Global pauseframes must be disabled before enabling PFC.\n");
3482		priv->params.rx_priority_flow_control = 0;
3483		priv->params.tx_priority_flow_control = 0;
3484
3485		/* update firmware */
3486		(void) mlx5e_set_port_pause_and_pfc(priv);
3487	}
3488	PRIV_UNLOCK(priv);
3489}
3490
3491static void *
3492mlx5e_create_ifp(struct mlx5_core_dev *mdev)
3493{
3494	struct ifnet *ifp;
3495	struct mlx5e_priv *priv;
3496	u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
3497	struct sysctl_oid_list *child;
3498	int ncv = mdev->priv.eq_table.num_comp_vectors;
3499	char unit[16];
3500	int err;
3501	int i;
3502	u32 eth_proto_cap;
3503
3504	if (mlx5e_check_required_hca_cap(mdev)) {
3505		mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
3506		return (NULL);
3507	}
3508	/*
3509	 * Try to allocate the priv and make room for worst-case
3510	 * number of channel structures:
3511	 */
3512	priv = malloc(sizeof(*priv) +
3513	    (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
3514	    M_MLX5EN, M_WAITOK | M_ZERO);
3515	mlx5e_priv_mtx_init(priv);
3516
3517	ifp = priv->ifp = if_alloc(IFT_ETHER);
3518	if (ifp == NULL) {
3519		mlx5_core_err(mdev, "if_alloc() failed\n");
3520		goto err_free_priv;
3521	}
3522	ifp->if_softc = priv;
3523	if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
3524	ifp->if_mtu = ETHERMTU;
3525	ifp->if_init = mlx5e_open;
3526	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3527	ifp->if_ioctl = mlx5e_ioctl;
3528	ifp->if_transmit = mlx5e_xmit;
3529	ifp->if_qflush = if_qflush;
3530#if (__FreeBSD_version >= 1100000)
3531	ifp->if_get_counter = mlx5e_get_counter;
3532#endif
3533	ifp->if_snd.ifq_maxlen = ifqmaxlen;
3534	/*
3535         * Set driver features
3536         */
3537	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
3538	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
3539	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
3540	ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
3541	ifp->if_capabilities |= IFCAP_LRO;
3542	ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
3543	ifp->if_capabilities |= IFCAP_HWSTATS;
3544
3545	/* set TSO limits so that we don't have to drop TX packets */
3546	ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
3547	ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
3548	ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
3549
3550	ifp->if_capenable = ifp->if_capabilities;
3551	ifp->if_hwassist = 0;
3552	if (ifp->if_capenable & IFCAP_TSO)
3553		ifp->if_hwassist |= CSUM_TSO;
3554	if (ifp->if_capenable & IFCAP_TXCSUM)
3555		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3556	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3557		ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3558
3559	/* ifnet sysctl tree */
3560	sysctl_ctx_init(&priv->sysctl_ctx);
3561	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
3562	    OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
3563	if (priv->sysctl_ifnet == NULL) {
3564		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3565		goto err_free_sysctl;
3566	}
3567	snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
3568	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3569	    OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
3570	if (priv->sysctl_ifnet == NULL) {
3571		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3572		goto err_free_sysctl;
3573	}
3574
3575	/* HW sysctl tree */
3576	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
3577	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
3578	    OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
3579	if (priv->sysctl_hw == NULL) {
3580		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3581		goto err_free_sysctl;
3582	}
3583
3584	err = mlx5e_build_ifp_priv(mdev, priv, ncv);
3585	if (err) {
3586		mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err);
3587		goto err_free_sysctl;
3588	}
3589
3590	snprintf(unit, sizeof(unit), "mce%u_wq",
3591	    device_get_unit(mdev->pdev->dev.bsddev));
3592	priv->wq = alloc_workqueue(unit, 0, 1);
3593	if (priv->wq == NULL) {
3594		if_printf(ifp, "%s: alloc_workqueue failed\n", __func__);
3595		goto err_free_sysctl;
3596	}
3597
3598	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
3599	if (err) {
3600		if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
3601		    __func__, err);
3602		goto err_free_wq;
3603	}
3604	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
3605	if (err) {
3606		if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
3607		    __func__, err);
3608		goto err_unmap_free_uar;
3609	}
3610	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
3611	if (err) {
3612		if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
3613		    __func__, err);
3614		goto err_dealloc_pd;
3615	}
3616	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
3617	if (err) {
3618		if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
3619		    __func__, err);
3620		goto err_dealloc_transport_domain;
3621	}
3622	mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
3623
3624	/* check if we should generate a random MAC address */
3625	if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
3626	    is_zero_ether_addr(dev_addr)) {
3627		random_ether_addr(dev_addr);
3628		if_printf(ifp, "Assigned random MAC address\n");
3629	}
3630
3631	/* set default MTU */
3632	mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
3633
3634	/* Set default media status */
3635	priv->media_status_last = IFM_AVALID;
3636	priv->media_active_last = IFM_ETHER | IFM_AUTO |
3637	    IFM_ETH_RXPAUSE | IFM_FDX;
3638
3639	/* setup default pauseframes configuration */
3640	mlx5e_setup_pauseframes(priv);
3641
3642	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
3643	if (err) {
3644		eth_proto_cap = 0;
3645		if_printf(ifp, "%s: Query port media capability failed, %d\n",
3646		    __func__, err);
3647	}
3648
3649	/* Setup supported medias */
3650	ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
3651	    mlx5e_media_change, mlx5e_media_status);
3652
3653	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
3654		if (mlx5e_mode_table[i].baudrate == 0)
3655			continue;
3656		if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
3657			ifmedia_add(&priv->media,
3658			    mlx5e_mode_table[i].subtype |
3659			    IFM_ETHER, 0, NULL);
3660			ifmedia_add(&priv->media,
3661			    mlx5e_mode_table[i].subtype |
3662			    IFM_ETHER | IFM_FDX |
3663			    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3664		}
3665	}
3666
3667	/* Additional supported medias */
3668	ifmedia_add(&priv->media, IFM_10G_LR | IFM_ETHER, 0, NULL);
3669	ifmedia_add(&priv->media, IFM_10G_LR |
3670	    IFM_ETHER | IFM_FDX |
3671	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3672
3673	ifmedia_add(&priv->media, IFM_40G_ER4 | IFM_ETHER, 0, NULL);
3674	ifmedia_add(&priv->media, IFM_40G_ER4 |
3675	    IFM_ETHER | IFM_FDX |
3676	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3677
3678	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3679	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3680	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3681
3682	/* Set autoselect by default */
3683	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3684	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
3685	ether_ifattach(ifp, dev_addr);
3686
3687	/* Register for VLAN events */
3688	priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
3689	    mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
3690	priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
3691	    mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
3692
3693	/* Link is down by default */
3694	if_link_state_change(ifp, LINK_STATE_DOWN);
3695
3696	mlx5e_enable_async_events(priv);
3697
3698	mlx5e_add_hw_stats(priv);
3699
3700	mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3701	    "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
3702	    priv->stats.vport.arg);
3703
3704	mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3705	    "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
3706	    priv->stats.pport.arg);
3707
3708	mlx5e_create_ethtool(priv);
3709
3710	mtx_lock(&priv->async_events_mtx);
3711	mlx5e_update_stats(priv);
3712	mtx_unlock(&priv->async_events_mtx);
3713
3714	return (priv);
3715
3716err_dealloc_transport_domain:
3717	mlx5_dealloc_transport_domain(mdev, priv->tdn);
3718
3719err_dealloc_pd:
3720	mlx5_core_dealloc_pd(mdev, priv->pdn);
3721
3722err_unmap_free_uar:
3723	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3724
3725err_free_wq:
3726	destroy_workqueue(priv->wq);
3727
3728err_free_sysctl:
3729	sysctl_ctx_free(&priv->sysctl_ctx);
3730	if (priv->sysctl_debug)
3731		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3732	if_free(ifp);
3733
3734err_free_priv:
3735	mlx5e_priv_mtx_destroy(priv);
3736	free(priv, M_MLX5EN);
3737	return (NULL);
3738}
3739
3740static void
3741mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
3742{
3743	struct mlx5e_priv *priv = vpriv;
3744	struct ifnet *ifp = priv->ifp;
3745
3746	/* don't allow more IOCTLs */
3747	priv->gone = 1;
3748
3749	/* XXX wait a bit to allow IOCTL handlers to complete */
3750	pause("W", hz);
3751
3752	/* stop watchdog timer */
3753	callout_drain(&priv->watchdog);
3754
3755	if (priv->vlan_attach != NULL)
3756		EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
3757	if (priv->vlan_detach != NULL)
3758		EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
3759
3760	/* make sure device gets closed */
3761	PRIV_LOCK(priv);
3762	mlx5e_close_locked(ifp);
3763	PRIV_UNLOCK(priv);
3764
3765	/* unregister device */
3766	ifmedia_removeall(&priv->media);
3767	ether_ifdetach(ifp);
3768	if_free(ifp);
3769
3770	/* destroy all remaining sysctl nodes */
3771	sysctl_ctx_free(&priv->stats.vport.ctx);
3772	sysctl_ctx_free(&priv->stats.pport.ctx);
3773	if (priv->sysctl_debug)
3774		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3775	sysctl_ctx_free(&priv->sysctl_ctx);
3776
3777	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3778	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
3779	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3780	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3781	mlx5e_disable_async_events(priv);
3782	destroy_workqueue(priv->wq);
3783	mlx5e_priv_mtx_destroy(priv);
3784	free(priv, M_MLX5EN);
3785}
3786
3787static void *
3788mlx5e_get_ifp(void *vpriv)
3789{
3790	struct mlx5e_priv *priv = vpriv;
3791
3792	return (priv->ifp);
3793}
3794
3795static struct mlx5_interface mlx5e_interface = {
3796	.add = mlx5e_create_ifp,
3797	.remove = mlx5e_destroy_ifp,
3798	.event = mlx5e_async_event,
3799	.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3800	.get_dev = mlx5e_get_ifp,
3801};
3802
3803void
3804mlx5e_init(void)
3805{
3806	mlx5_register_interface(&mlx5e_interface);
3807}
3808
3809void
3810mlx5e_cleanup(void)
3811{
3812	mlx5_unregister_interface(&mlx5e_interface);
3813}
3814
3815static void
3816mlx5e_show_version(void __unused *arg)
3817{
3818
3819	printf("%s", mlx5e_version);
3820}
3821SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL);
3822
3823module_init_order(mlx5e_init, SI_ORDER_THIRD);
3824module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
3825
3826#if (__FreeBSD_version >= 1100000)
3827MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
3828#endif
3829MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
3830MODULE_VERSION(mlx5en, 1);
3831