mlx5_en_main.c revision 341970
1/*-
2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 341970 2018-12-12 12:58:05Z hselasky $
26 */
27
28#include "en.h"
29
30#include <sys/sockio.h>
31#include <machine/atomic.h>
32
33#ifndef ETH_DRIVER_VERSION
34#define	ETH_DRIVER_VERSION	"3.4.2"
35#endif
36
37static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver "
38	ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
39
40static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
41
42struct mlx5e_channel_param {
43	struct mlx5e_rq_param rq;
44	struct mlx5e_sq_param sq;
45	struct mlx5e_cq_param rx_cq;
46	struct mlx5e_cq_param tx_cq;
47};
48
49static const struct {
50	u32	subtype;
51	u64	baudrate;
52}	mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
53
54	[MLX5E_1000BASE_CX_SGMII] = {
55		.subtype = IFM_1000_CX_SGMII,
56		.baudrate = IF_Mbps(1000ULL),
57	},
58	[MLX5E_1000BASE_KX] = {
59		.subtype = IFM_1000_KX,
60		.baudrate = IF_Mbps(1000ULL),
61	},
62	[MLX5E_10GBASE_CX4] = {
63		.subtype = IFM_10G_CX4,
64		.baudrate = IF_Gbps(10ULL),
65	},
66	[MLX5E_10GBASE_KX4] = {
67		.subtype = IFM_10G_KX4,
68		.baudrate = IF_Gbps(10ULL),
69	},
70	[MLX5E_10GBASE_KR] = {
71		.subtype = IFM_10G_KR,
72		.baudrate = IF_Gbps(10ULL),
73	},
74	[MLX5E_20GBASE_KR2] = {
75		.subtype = IFM_20G_KR2,
76		.baudrate = IF_Gbps(20ULL),
77	},
78	[MLX5E_40GBASE_CR4] = {
79		.subtype = IFM_40G_CR4,
80		.baudrate = IF_Gbps(40ULL),
81	},
82	[MLX5E_40GBASE_KR4] = {
83		.subtype = IFM_40G_KR4,
84		.baudrate = IF_Gbps(40ULL),
85	},
86	[MLX5E_56GBASE_R4] = {
87		.subtype = IFM_56G_R4,
88		.baudrate = IF_Gbps(56ULL),
89	},
90	[MLX5E_10GBASE_CR] = {
91		.subtype = IFM_10G_CR1,
92		.baudrate = IF_Gbps(10ULL),
93	},
94	[MLX5E_10GBASE_SR] = {
95		.subtype = IFM_10G_SR,
96		.baudrate = IF_Gbps(10ULL),
97	},
98	[MLX5E_10GBASE_ER] = {
99		.subtype = IFM_10G_ER,
100		.baudrate = IF_Gbps(10ULL),
101	},
102	[MLX5E_40GBASE_SR4] = {
103		.subtype = IFM_40G_SR4,
104		.baudrate = IF_Gbps(40ULL),
105	},
106	[MLX5E_40GBASE_LR4] = {
107		.subtype = IFM_40G_LR4,
108		.baudrate = IF_Gbps(40ULL),
109	},
110	[MLX5E_100GBASE_CR4] = {
111		.subtype = IFM_100G_CR4,
112		.baudrate = IF_Gbps(100ULL),
113	},
114	[MLX5E_100GBASE_SR4] = {
115		.subtype = IFM_100G_SR4,
116		.baudrate = IF_Gbps(100ULL),
117	},
118	[MLX5E_100GBASE_KR4] = {
119		.subtype = IFM_100G_KR4,
120		.baudrate = IF_Gbps(100ULL),
121	},
122	[MLX5E_100GBASE_LR4] = {
123		.subtype = IFM_100G_LR4,
124		.baudrate = IF_Gbps(100ULL),
125	},
126	[MLX5E_100BASE_TX] = {
127		.subtype = IFM_100_TX,
128		.baudrate = IF_Mbps(100ULL),
129	},
130	[MLX5E_1000BASE_T] = {
131		.subtype = IFM_1000_T,
132		.baudrate = IF_Mbps(1000ULL),
133	},
134	[MLX5E_10GBASE_T] = {
135		.subtype = IFM_10G_T,
136		.baudrate = IF_Gbps(10ULL),
137	},
138	[MLX5E_25GBASE_CR] = {
139		.subtype = IFM_25G_CR,
140		.baudrate = IF_Gbps(25ULL),
141	},
142	[MLX5E_25GBASE_KR] = {
143		.subtype = IFM_25G_KR,
144		.baudrate = IF_Gbps(25ULL),
145	},
146	[MLX5E_25GBASE_SR] = {
147		.subtype = IFM_25G_SR,
148		.baudrate = IF_Gbps(25ULL),
149	},
150	[MLX5E_50GBASE_CR2] = {
151		.subtype = IFM_50G_CR2,
152		.baudrate = IF_Gbps(50ULL),
153	},
154	[MLX5E_50GBASE_KR2] = {
155		.subtype = IFM_50G_KR2,
156		.baudrate = IF_Gbps(50ULL),
157	},
158};
159
160MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
161
162static void
163mlx5e_update_carrier(struct mlx5e_priv *priv)
164{
165	struct mlx5_core_dev *mdev = priv->mdev;
166	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
167	u32 eth_proto_oper;
168	int error;
169	u8 port_state;
170	u8 i;
171
172	port_state = mlx5_query_vport_state(mdev,
173	    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
174
175	if (port_state == VPORT_STATE_UP) {
176		priv->media_status_last |= IFM_ACTIVE;
177	} else {
178		priv->media_status_last &= ~IFM_ACTIVE;
179		priv->media_active_last = IFM_ETHER;
180		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
181		return;
182	}
183
184	error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
185	if (error) {
186		priv->media_active_last = IFM_ETHER;
187		priv->ifp->if_baudrate = 1;
188		if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
189		    __func__, error);
190		return;
191	}
192	eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
193
194	for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
195		if (mlx5e_mode_table[i].baudrate == 0)
196			continue;
197		if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
198			priv->ifp->if_baudrate =
199			    mlx5e_mode_table[i].baudrate;
200			priv->media_active_last =
201			    mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX;
202		}
203	}
204	if_link_state_change(priv->ifp, LINK_STATE_UP);
205}
206
207static void
208mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
209{
210	struct mlx5e_priv *priv = dev->if_softc;
211
212	ifmr->ifm_status = priv->media_status_last;
213	ifmr->ifm_active = priv->media_active_last |
214	    (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
215	    (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
216
217}
218
219static u32
220mlx5e_find_link_mode(u32 subtype)
221{
222	u32 i;
223	u32 link_mode = 0;
224
225	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
226		if (mlx5e_mode_table[i].baudrate == 0)
227			continue;
228		if (mlx5e_mode_table[i].subtype == subtype)
229			link_mode |= MLX5E_PROT_MASK(i);
230	}
231
232	return (link_mode);
233}
234
235static int
236mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
237{
238	return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
239	    priv->params.rx_pauseframe_control,
240	    priv->params.tx_pauseframe_control,
241	    priv->params.rx_priority_flow_control,
242	    priv->params.tx_priority_flow_control));
243}
244
245static int
246mlx5e_set_port_pfc(struct mlx5e_priv *priv)
247{
248	int error;
249
250	if (priv->params.rx_pauseframe_control ||
251	    priv->params.tx_pauseframe_control) {
252		if_printf(priv->ifp,
253		    "Global pauseframes must be disabled before enabling PFC.\n");
254		error = -EINVAL;
255	} else {
256		error = mlx5e_set_port_pause_and_pfc(priv);
257	}
258	return (error);
259}
260
261static int
262mlx5e_media_change(struct ifnet *dev)
263{
264	struct mlx5e_priv *priv = dev->if_softc;
265	struct mlx5_core_dev *mdev = priv->mdev;
266	u32 eth_proto_cap;
267	u32 link_mode;
268	int was_opened;
269	int locked;
270	int error;
271
272	locked = PRIV_LOCKED(priv);
273	if (!locked)
274		PRIV_LOCK(priv);
275
276	if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
277		error = EINVAL;
278		goto done;
279	}
280	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
281
282	/* query supported capabilities */
283	error = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
284	if (error != 0) {
285		if_printf(dev, "Query port media capability failed\n");
286		goto done;
287	}
288	/* check for autoselect */
289	if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
290		link_mode = eth_proto_cap;
291		if (link_mode == 0) {
292			if_printf(dev, "Port media capability is zero\n");
293			error = EINVAL;
294			goto done;
295		}
296	} else {
297		link_mode = link_mode & eth_proto_cap;
298		if (link_mode == 0) {
299			if_printf(dev, "Not supported link mode requested\n");
300			error = EINVAL;
301			goto done;
302		}
303	}
304	if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
305		/* check if PFC is enabled */
306		if (priv->params.rx_priority_flow_control ||
307		    priv->params.tx_priority_flow_control) {
308			if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n");
309			error = EINVAL;
310			goto done;
311		}
312	}
313	/* update pauseframe control bits */
314	priv->params.rx_pauseframe_control =
315	    (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
316	priv->params.tx_pauseframe_control =
317	    (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
318
319	/* check if device is opened */
320	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
321
322	/* reconfigure the hardware */
323	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
324	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
325	error = -mlx5e_set_port_pause_and_pfc(priv);
326	if (was_opened)
327		mlx5_set_port_status(mdev, MLX5_PORT_UP);
328
329done:
330	if (!locked)
331		PRIV_UNLOCK(priv);
332	return (error);
333}
334
335static void
336mlx5e_update_carrier_work(struct work_struct *work)
337{
338	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
339	    update_carrier_work);
340
341	PRIV_LOCK(priv);
342	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
343		mlx5e_update_carrier(priv);
344	PRIV_UNLOCK(priv);
345}
346
347/*
348 * This function reads the physical port counters from the firmware
349 * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
350 * macros. The output is converted from big-endian 64-bit values into
351 * host endian ones and stored in the "priv->stats.pport" structure.
352 */
353static void
354mlx5e_update_pport_counters(struct mlx5e_priv *priv)
355{
356	struct mlx5_core_dev *mdev = priv->mdev;
357	struct mlx5e_pport_stats *s = &priv->stats.pport;
358	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
359	u32 *in;
360	u32 *out;
361	const u64 *ptr;
362	unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
363	unsigned x;
364	unsigned y;
365	unsigned z;
366
367	/* allocate firmware request structures */
368	in = mlx5_vzalloc(sz);
369	out = mlx5_vzalloc(sz);
370	if (in == NULL || out == NULL)
371		goto free_out;
372
373	/*
374	 * Get pointer to the 64-bit counter set which is located at a
375	 * fixed offset in the output firmware request structure:
376	 */
377	ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
378
379	MLX5_SET(ppcnt_reg, in, local_port, 1);
380
381	/* read IEEE802_3 counter group using predefined counter layout */
382	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
383	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
384	for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
385	     x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
386		s->arg[y] = be64toh(ptr[x]);
387
388	/* read RFC2819 counter group using predefined counter layout */
389	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
390	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
391	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
392		s->arg[y] = be64toh(ptr[x]);
393	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
394	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
395		s_debug->arg[y] = be64toh(ptr[x]);
396
397	/* read RFC2863 counter group using predefined counter layout */
398	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
399	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
400	for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
401		s_debug->arg[y] = be64toh(ptr[x]);
402
403	/* read physical layer stats counter group using predefined counter layout */
404	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
405	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
406	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
407		s_debug->arg[y] = be64toh(ptr[x]);
408
409	/* read per-priority counters */
410	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
411
412	/* iterate all the priorities */
413	for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
414		MLX5_SET(ppcnt_reg, in, prio_tc, z);
415		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
416
417		/* read per priority stats counter group using predefined counter layout */
418		for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
419		    MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
420			s->arg[y] = be64toh(ptr[x]);
421	}
422free_out:
423	/* free firmware request structures */
424	kvfree(in);
425	kvfree(out);
426}
427
428/*
429 * This function is called regularly to collect all statistics
430 * counters from the firmware. The values can be viewed through the
431 * sysctl interface. Execution is serialized using the priv's global
432 * configuration lock.
433 */
434static void
435mlx5e_update_stats_work(struct work_struct *work)
436{
437	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
438	    update_stats_work);
439	struct mlx5_core_dev *mdev = priv->mdev;
440	struct mlx5e_vport_stats *s = &priv->stats.vport;
441	struct mlx5e_rq_stats *rq_stats;
442	struct mlx5e_sq_stats *sq_stats;
443	struct buf_ring *sq_br;
444#if (__FreeBSD_version < 1100000)
445	struct ifnet *ifp = priv->ifp;
446#endif
447
448	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
449	u32 *out;
450	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
451	u64 tso_packets = 0;
452	u64 tso_bytes = 0;
453	u64 tx_queue_dropped = 0;
454	u64 tx_defragged = 0;
455	u64 tx_offload_none = 0;
456	u64 lro_packets = 0;
457	u64 lro_bytes = 0;
458	u64 sw_lro_queued = 0;
459	u64 sw_lro_flushed = 0;
460	u64 rx_csum_none = 0;
461	u64 rx_wqe_err = 0;
462	u32 rx_out_of_buffer = 0;
463	int i;
464	int j;
465
466	PRIV_LOCK(priv);
467	out = mlx5_vzalloc(outlen);
468	if (out == NULL)
469		goto free_out;
470	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
471		goto free_out;
472
473	/* Collect firts the SW counters and then HW for consistency */
474	for (i = 0; i < priv->params.num_channels; i++) {
475		struct mlx5e_rq *rq = &priv->channel[i]->rq;
476
477		rq_stats = &priv->channel[i]->rq.stats;
478
479		/* collect stats from LRO */
480		rq_stats->sw_lro_queued = rq->lro.lro_queued;
481		rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
482		sw_lro_queued += rq_stats->sw_lro_queued;
483		sw_lro_flushed += rq_stats->sw_lro_flushed;
484		lro_packets += rq_stats->lro_packets;
485		lro_bytes += rq_stats->lro_bytes;
486		rx_csum_none += rq_stats->csum_none;
487		rx_wqe_err += rq_stats->wqe_err;
488
489		for (j = 0; j < priv->num_tc; j++) {
490			sq_stats = &priv->channel[i]->sq[j].stats;
491			sq_br = priv->channel[i]->sq[j].br;
492
493			tso_packets += sq_stats->tso_packets;
494			tso_bytes += sq_stats->tso_bytes;
495			tx_queue_dropped += sq_stats->dropped;
496			if (sq_br != NULL)
497				tx_queue_dropped += sq_br->br_drops;
498			tx_defragged += sq_stats->defragged;
499			tx_offload_none += sq_stats->csum_offload_none;
500		}
501	}
502
503	s->tx_jumbo_packets =
504	    priv->stats.port_stats_debug.p1519to2047octets +
505	    priv->stats.port_stats_debug.p2048to4095octets +
506	    priv->stats.port_stats_debug.p4096to8191octets +
507	    priv->stats.port_stats_debug.p8192to10239octets;
508
509	/* update counters */
510	s->tso_packets = tso_packets;
511	s->tso_bytes = tso_bytes;
512	s->tx_queue_dropped = tx_queue_dropped;
513	s->tx_defragged = tx_defragged;
514	s->lro_packets = lro_packets;
515	s->lro_bytes = lro_bytes;
516	s->sw_lro_queued = sw_lro_queued;
517	s->sw_lro_flushed = sw_lro_flushed;
518	s->rx_csum_none = rx_csum_none;
519	s->rx_wqe_err = rx_wqe_err;
520
521	/* HW counters */
522	memset(in, 0, sizeof(in));
523
524	MLX5_SET(query_vport_counter_in, in, opcode,
525	    MLX5_CMD_OP_QUERY_VPORT_COUNTER);
526	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
527	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
528
529	memset(out, 0, outlen);
530
531	/* get number of out-of-buffer drops first */
532	if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
533	    &rx_out_of_buffer))
534		goto free_out;
535
536	/* accumulate difference into a 64-bit counter */
537	s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev);
538	s->rx_out_of_buffer_prev = rx_out_of_buffer;
539
540	/* get port statistics */
541	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
542		goto free_out;
543
544#define	MLX5_GET_CTR(out, x) \
545	MLX5_GET64(query_vport_counter_out, out, x)
546
547	s->rx_error_packets =
548	    MLX5_GET_CTR(out, received_errors.packets);
549	s->rx_error_bytes =
550	    MLX5_GET_CTR(out, received_errors.octets);
551	s->tx_error_packets =
552	    MLX5_GET_CTR(out, transmit_errors.packets);
553	s->tx_error_bytes =
554	    MLX5_GET_CTR(out, transmit_errors.octets);
555
556	s->rx_unicast_packets =
557	    MLX5_GET_CTR(out, received_eth_unicast.packets);
558	s->rx_unicast_bytes =
559	    MLX5_GET_CTR(out, received_eth_unicast.octets);
560	s->tx_unicast_packets =
561	    MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
562	s->tx_unicast_bytes =
563	    MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
564
565	s->rx_multicast_packets =
566	    MLX5_GET_CTR(out, received_eth_multicast.packets);
567	s->rx_multicast_bytes =
568	    MLX5_GET_CTR(out, received_eth_multicast.octets);
569	s->tx_multicast_packets =
570	    MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
571	s->tx_multicast_bytes =
572	    MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
573
574	s->rx_broadcast_packets =
575	    MLX5_GET_CTR(out, received_eth_broadcast.packets);
576	s->rx_broadcast_bytes =
577	    MLX5_GET_CTR(out, received_eth_broadcast.octets);
578	s->tx_broadcast_packets =
579	    MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
580	s->tx_broadcast_bytes =
581	    MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
582
583	s->rx_packets =
584	    s->rx_unicast_packets +
585	    s->rx_multicast_packets +
586	    s->rx_broadcast_packets -
587	    s->rx_out_of_buffer;
588	s->rx_bytes =
589	    s->rx_unicast_bytes +
590	    s->rx_multicast_bytes +
591	    s->rx_broadcast_bytes;
592	s->tx_packets =
593	    s->tx_unicast_packets +
594	    s->tx_multicast_packets +
595	    s->tx_broadcast_packets;
596	s->tx_bytes =
597	    s->tx_unicast_bytes +
598	    s->tx_multicast_bytes +
599	    s->tx_broadcast_bytes;
600
601	/* Update calculated offload counters */
602	s->tx_csum_offload = s->tx_packets - tx_offload_none;
603	s->rx_csum_good = s->rx_packets - s->rx_csum_none;
604
605	/* Get physical port counters */
606	mlx5e_update_pport_counters(priv);
607
608#if (__FreeBSD_version < 1100000)
609	/* no get_counters interface in fbsd 10 */
610	ifp->if_ipackets = s->rx_packets;
611	ifp->if_ierrors = s->rx_error_packets +
612	    priv->stats.pport.alignment_err +
613	    priv->stats.pport.check_seq_err +
614	    priv->stats.pport.crc_align_errors +
615	    priv->stats.pport.in_range_len_errors +
616	    priv->stats.pport.jabbers +
617	    priv->stats.pport.out_of_range_len +
618	    priv->stats.pport.oversize_pkts +
619	    priv->stats.pport.symbol_err +
620	    priv->stats.pport.too_long_errors +
621	    priv->stats.pport.undersize_pkts +
622	    priv->stats.pport.unsupported_op_rx;
623	ifp->if_iqdrops = s->rx_out_of_buffer +
624	    priv->stats.pport.drop_events;
625	ifp->if_opackets = s->tx_packets;
626	ifp->if_oerrors = s->tx_error_packets;
627	ifp->if_snd.ifq_drops = s->tx_queue_dropped;
628	ifp->if_ibytes = s->rx_bytes;
629	ifp->if_obytes = s->tx_bytes;
630	ifp->if_collisions =
631	    priv->stats.pport.collisions;
632#endif
633
634free_out:
635	kvfree(out);
636
637	/* Update diagnostics, if any */
638	if (priv->params_ethtool.diag_pci_enable ||
639	    priv->params_ethtool.diag_general_enable) {
640		int error = mlx5_core_get_diagnostics_full(mdev,
641		    priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
642		    priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
643		if (error != 0)
644			if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error);
645	}
646	PRIV_UNLOCK(priv);
647}
648
649static void
650mlx5e_update_stats(void *arg)
651{
652	struct mlx5e_priv *priv = arg;
653
654	queue_work(priv->wq, &priv->update_stats_work);
655
656	callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
657}
658
659static void
660mlx5e_async_event_sub(struct mlx5e_priv *priv,
661    enum mlx5_dev_event event)
662{
663	switch (event) {
664	case MLX5_DEV_EVENT_PORT_UP:
665	case MLX5_DEV_EVENT_PORT_DOWN:
666		queue_work(priv->wq, &priv->update_carrier_work);
667		break;
668
669	default:
670		break;
671	}
672}
673
674static void
675mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
676    enum mlx5_dev_event event, unsigned long param)
677{
678	struct mlx5e_priv *priv = vpriv;
679
680	mtx_lock(&priv->async_events_mtx);
681	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
682		mlx5e_async_event_sub(priv, event);
683	mtx_unlock(&priv->async_events_mtx);
684}
685
686static void
687mlx5e_enable_async_events(struct mlx5e_priv *priv)
688{
689	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
690}
691
692static void
693mlx5e_disable_async_events(struct mlx5e_priv *priv)
694{
695	mtx_lock(&priv->async_events_mtx);
696	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
697	mtx_unlock(&priv->async_events_mtx);
698}
699
700static const char *mlx5e_rq_stats_desc[] = {
701	MLX5E_RQ_STATS(MLX5E_STATS_DESC)
702};
703
704static int
705mlx5e_create_rq(struct mlx5e_channel *c,
706    struct mlx5e_rq_param *param,
707    struct mlx5e_rq *rq)
708{
709	struct mlx5e_priv *priv = c->priv;
710	struct mlx5_core_dev *mdev = priv->mdev;
711	char buffer[16];
712	void *rqc = param->rqc;
713	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
714	int wq_sz;
715	int err;
716	int i;
717	u32 nsegs, wqe_sz;
718
719	err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
720	if (err != 0)
721		goto done;
722
723	/* Create DMA descriptor TAG */
724	if ((err = -bus_dma_tag_create(
725	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
726	    1,				/* any alignment */
727	    0,				/* no boundary */
728	    BUS_SPACE_MAXADDR,		/* lowaddr */
729	    BUS_SPACE_MAXADDR,		/* highaddr */
730	    NULL, NULL,			/* filter, filterarg */
731	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsize */
732	    nsegs,			/* nsegments */
733	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsegsize */
734	    0,				/* flags */
735	    NULL, NULL,			/* lockfunc, lockfuncarg */
736	    &rq->dma_tag)))
737		goto done;
738
739	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
740	    &rq->wq_ctrl);
741	if (err)
742		goto err_free_dma_tag;
743
744	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
745
746	err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
747	if (err != 0)
748		goto err_rq_wq_destroy;
749
750	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
751
752	err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz);
753	if (err)
754		goto err_rq_wq_destroy;
755
756	rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
757	for (i = 0; i != wq_sz; i++) {
758		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
759#if (MLX5E_MAX_RX_SEGS == 1)
760		uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
761#else
762		int j;
763#endif
764
765		err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
766		if (err != 0) {
767			while (i--)
768				bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
769			goto err_rq_mbuf_free;
770		}
771
772		/* set value for constant fields */
773#if (MLX5E_MAX_RX_SEGS == 1)
774		wqe->data[0].lkey = c->mkey_be;
775		wqe->data[0].byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
776#else
777		for (j = 0; j < rq->nsegs; j++)
778			wqe->data[j].lkey = c->mkey_be;
779#endif
780	}
781
782	rq->ifp = c->ifp;
783	rq->channel = c;
784	rq->ix = c->ix;
785
786	snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
787	mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
788	    buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
789	    rq->stats.arg);
790	return (0);
791
792err_rq_mbuf_free:
793	free(rq->mbuf, M_MLX5EN);
794	tcp_lro_free(&rq->lro);
795err_rq_wq_destroy:
796	mlx5_wq_destroy(&rq->wq_ctrl);
797err_free_dma_tag:
798	bus_dma_tag_destroy(rq->dma_tag);
799done:
800	return (err);
801}
802
803static void
804mlx5e_destroy_rq(struct mlx5e_rq *rq)
805{
806	int wq_sz;
807	int i;
808
809	/* destroy all sysctl nodes */
810	sysctl_ctx_free(&rq->stats.ctx);
811
812	/* free leftover LRO packets, if any */
813	tcp_lro_free(&rq->lro);
814
815	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
816	for (i = 0; i != wq_sz; i++) {
817		if (rq->mbuf[i].mbuf != NULL) {
818			bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
819			m_freem(rq->mbuf[i].mbuf);
820		}
821		bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
822	}
823	free(rq->mbuf, M_MLX5EN);
824	mlx5_wq_destroy(&rq->wq_ctrl);
825}
826
827static int
828mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
829{
830	struct mlx5e_channel *c = rq->channel;
831	struct mlx5e_priv *priv = c->priv;
832	struct mlx5_core_dev *mdev = priv->mdev;
833
834	void *in;
835	void *rqc;
836	void *wq;
837	int inlen;
838	int err;
839
840	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
841	    sizeof(u64) * rq->wq_ctrl.buf.npages;
842	in = mlx5_vzalloc(inlen);
843	if (in == NULL)
844		return (-ENOMEM);
845
846	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
847	wq = MLX5_ADDR_OF(rqc, rqc, wq);
848
849	memcpy(rqc, param->rqc, sizeof(param->rqc));
850
851	MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
852	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
853	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
854	if (priv->counter_set_id >= 0)
855		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
856	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
857	    PAGE_SHIFT);
858	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
859
860	mlx5_fill_page_array(&rq->wq_ctrl.buf,
861	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
862
863	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
864
865	kvfree(in);
866
867	return (err);
868}
869
870static int
871mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
872{
873	struct mlx5e_channel *c = rq->channel;
874	struct mlx5e_priv *priv = c->priv;
875	struct mlx5_core_dev *mdev = priv->mdev;
876
877	void *in;
878	void *rqc;
879	int inlen;
880	int err;
881
882	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
883	in = mlx5_vzalloc(inlen);
884	if (in == NULL)
885		return (-ENOMEM);
886
887	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
888
889	MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
890	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
891	MLX5_SET(rqc, rqc, state, next_state);
892
893	err = mlx5_core_modify_rq(mdev, in, inlen);
894
895	kvfree(in);
896
897	return (err);
898}
899
900static void
901mlx5e_disable_rq(struct mlx5e_rq *rq)
902{
903	struct mlx5e_channel *c = rq->channel;
904	struct mlx5e_priv *priv = c->priv;
905	struct mlx5_core_dev *mdev = priv->mdev;
906
907	mlx5_core_destroy_rq(mdev, rq->rqn);
908}
909
910static int
911mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
912{
913	struct mlx5e_channel *c = rq->channel;
914	struct mlx5e_priv *priv = c->priv;
915	struct mlx5_wq_ll *wq = &rq->wq;
916	int i;
917
918	for (i = 0; i < 1000; i++) {
919		if (wq->cur_sz >= priv->params.min_rx_wqes)
920			return (0);
921
922		msleep(4);
923	}
924	return (-ETIMEDOUT);
925}
926
927static int
928mlx5e_open_rq(struct mlx5e_channel *c,
929    struct mlx5e_rq_param *param,
930    struct mlx5e_rq *rq)
931{
932	int err;
933
934	err = mlx5e_create_rq(c, param, rq);
935	if (err)
936		return (err);
937
938	err = mlx5e_enable_rq(rq, param);
939	if (err)
940		goto err_destroy_rq;
941
942	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
943	if (err)
944		goto err_disable_rq;
945
946	c->rq.enabled = 1;
947
948	return (0);
949
950err_disable_rq:
951	mlx5e_disable_rq(rq);
952err_destroy_rq:
953	mlx5e_destroy_rq(rq);
954
955	return (err);
956}
957
958static void
959mlx5e_close_rq(struct mlx5e_rq *rq)
960{
961	mtx_lock(&rq->mtx);
962	rq->enabled = 0;
963	callout_stop(&rq->watchdog);
964	mtx_unlock(&rq->mtx);
965
966	callout_drain(&rq->watchdog);
967
968	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
969}
970
971static void
972mlx5e_close_rq_wait(struct mlx5e_rq *rq)
973{
974	struct mlx5_core_dev *mdev = rq->channel->priv->mdev;
975
976	/* wait till RQ is empty */
977	while (!mlx5_wq_ll_is_empty(&rq->wq) &&
978	       (mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
979		msleep(4);
980		rq->cq.mcq.comp(&rq->cq.mcq);
981	}
982
983	mlx5e_disable_rq(rq);
984	mlx5e_destroy_rq(rq);
985}
986
987void
988mlx5e_free_sq_db(struct mlx5e_sq *sq)
989{
990	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
991	int x;
992
993	for (x = 0; x != wq_sz; x++)
994		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
995	free(sq->mbuf, M_MLX5EN);
996}
997
998int
999mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
1000{
1001	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1002	int err;
1003	int x;
1004
1005	sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1006
1007	/* Create DMA descriptor MAPs */
1008	for (x = 0; x != wq_sz; x++) {
1009		err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
1010		if (err != 0) {
1011			while (x--)
1012				bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1013			free(sq->mbuf, M_MLX5EN);
1014			return (err);
1015		}
1016	}
1017	return (0);
1018}
1019
1020static const char *mlx5e_sq_stats_desc[] = {
1021	MLX5E_SQ_STATS(MLX5E_STATS_DESC)
1022};
1023
1024static int
1025mlx5e_create_sq(struct mlx5e_channel *c,
1026    int tc,
1027    struct mlx5e_sq_param *param,
1028    struct mlx5e_sq *sq)
1029{
1030	struct mlx5e_priv *priv = c->priv;
1031	struct mlx5_core_dev *mdev = priv->mdev;
1032	char buffer[16];
1033	void *sqc = param->sqc;
1034	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
1035	int err;
1036
1037	/* Create DMA descriptor TAG */
1038	if ((err = -bus_dma_tag_create(
1039	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1040	    1,				/* any alignment */
1041	    0,				/* no boundary */
1042	    BUS_SPACE_MAXADDR,		/* lowaddr */
1043	    BUS_SPACE_MAXADDR,		/* highaddr */
1044	    NULL, NULL,			/* filter, filterarg */
1045	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
1046	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
1047	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
1048	    0,				/* flags */
1049	    NULL, NULL,			/* lockfunc, lockfuncarg */
1050	    &sq->dma_tag)))
1051		goto done;
1052
1053	err = mlx5_alloc_map_uar(mdev, &sq->uar);
1054	if (err)
1055		goto err_free_dma_tag;
1056
1057	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1058	    &sq->wq_ctrl);
1059	if (err)
1060		goto err_unmap_free_uar;
1061
1062	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1063	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1064
1065	err = mlx5e_alloc_sq_db(sq);
1066	if (err)
1067		goto err_sq_wq_destroy;
1068
1069	sq->mkey_be = c->mkey_be;
1070	sq->ifp = priv->ifp;
1071	sq->priv = priv;
1072	sq->tc = tc;
1073	sq->max_inline = priv->params.tx_max_inline;
1074	sq->min_inline_mode = priv->params.tx_min_inline_mode;
1075	sq->vlan_inline_cap = MLX5_CAP_ETH(mdev, wqe_vlan_insert);
1076
1077	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1078	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1079	    buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1080	    sq->stats.arg);
1081
1082	return (0);
1083
1084err_sq_wq_destroy:
1085	mlx5_wq_destroy(&sq->wq_ctrl);
1086
1087err_unmap_free_uar:
1088	mlx5_unmap_free_uar(mdev, &sq->uar);
1089
1090err_free_dma_tag:
1091	bus_dma_tag_destroy(sq->dma_tag);
1092done:
1093	return (err);
1094}
1095
1096static void
1097mlx5e_destroy_sq(struct mlx5e_sq *sq)
1098{
1099	/* destroy all sysctl nodes */
1100	sysctl_ctx_free(&sq->stats.ctx);
1101
1102	mlx5e_free_sq_db(sq);
1103	mlx5_wq_destroy(&sq->wq_ctrl);
1104	mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1105}
1106
1107int
1108mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1109    int tis_num)
1110{
1111	void *in;
1112	void *sqc;
1113	void *wq;
1114	int inlen;
1115	int err;
1116
1117	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1118	    sizeof(u64) * sq->wq_ctrl.buf.npages;
1119	in = mlx5_vzalloc(inlen);
1120	if (in == NULL)
1121		return (-ENOMEM);
1122
1123	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1124	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1125
1126	memcpy(sqc, param->sqc, sizeof(param->sqc));
1127
1128	MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1129	MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1130	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1131	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1132	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1133
1134	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1135	MLX5_SET(wq, wq, uar_page, sq->uar.index);
1136	MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1137	    PAGE_SHIFT);
1138	MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1139
1140	mlx5_fill_page_array(&sq->wq_ctrl.buf,
1141	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1142
1143	err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1144
1145	kvfree(in);
1146
1147	return (err);
1148}
1149
1150int
1151mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1152{
1153	void *in;
1154	void *sqc;
1155	int inlen;
1156	int err;
1157
1158	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1159	in = mlx5_vzalloc(inlen);
1160	if (in == NULL)
1161		return (-ENOMEM);
1162
1163	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1164
1165	MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1166	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1167	MLX5_SET(sqc, sqc, state, next_state);
1168
1169	err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1170
1171	kvfree(in);
1172
1173	return (err);
1174}
1175
1176void
1177mlx5e_disable_sq(struct mlx5e_sq *sq)
1178{
1179
1180	mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1181}
1182
1183static int
1184mlx5e_open_sq(struct mlx5e_channel *c,
1185    int tc,
1186    struct mlx5e_sq_param *param,
1187    struct mlx5e_sq *sq)
1188{
1189	int err;
1190
1191	err = mlx5e_create_sq(c, tc, param, sq);
1192	if (err)
1193		return (err);
1194
1195	err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1196	if (err)
1197		goto err_destroy_sq;
1198
1199	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1200	if (err)
1201		goto err_disable_sq;
1202
1203	return (0);
1204
1205err_disable_sq:
1206	mlx5e_disable_sq(sq);
1207err_destroy_sq:
1208	mlx5e_destroy_sq(sq);
1209
1210	return (err);
1211}
1212
1213static void
1214mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1215{
1216	/* fill up remainder with NOPs */
1217	while (sq->cev_counter != 0) {
1218		while (!mlx5e_sq_has_room_for(sq, 1)) {
1219			if (can_sleep != 0) {
1220				mtx_unlock(&sq->lock);
1221				msleep(4);
1222				mtx_lock(&sq->lock);
1223			} else {
1224				goto done;
1225			}
1226		}
1227		/* send a single NOP */
1228		mlx5e_send_nop(sq, 1);
1229		atomic_thread_fence_rel();
1230	}
1231done:
1232	/* Check if we need to write the doorbell */
1233	if (likely(sq->doorbell.d64 != 0)) {
1234		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1235		sq->doorbell.d64 = 0;
1236	}
1237}
1238
1239void
1240mlx5e_sq_cev_timeout(void *arg)
1241{
1242	struct mlx5e_sq *sq = arg;
1243
1244	mtx_assert(&sq->lock, MA_OWNED);
1245
1246	/* check next state */
1247	switch (sq->cev_next_state) {
1248	case MLX5E_CEV_STATE_SEND_NOPS:
1249		/* fill TX ring with NOPs, if any */
1250		mlx5e_sq_send_nops_locked(sq, 0);
1251
1252		/* check if completed */
1253		if (sq->cev_counter == 0) {
1254			sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1255			return;
1256		}
1257		break;
1258	default:
1259		/* send NOPs on next timeout */
1260		sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1261		break;
1262	}
1263
1264	/* restart timer */
1265	callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1266}
1267
1268void
1269mlx5e_drain_sq(struct mlx5e_sq *sq)
1270{
1271	int error;
1272	struct mlx5_core_dev *mdev= sq->priv->mdev;
1273
1274	/*
1275	 * Check if already stopped.
1276	 *
1277	 * NOTE: The "stopped" variable is only written when both the
1278	 * priv's configuration lock and the SQ's lock is locked. It
1279	 * can therefore safely be read when only one of the two locks
1280	 * is locked. This function is always called when the priv's
1281	 * configuration lock is locked.
1282	 */
1283	if (sq->stopped != 0)
1284		return;
1285
1286	mtx_lock(&sq->lock);
1287
1288	/* don't put more packets into the SQ */
1289	sq->stopped = 1;
1290
1291	/* teardown event factor timer, if any */
1292	sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1293	callout_stop(&sq->cev_callout);
1294
1295	/* send dummy NOPs in order to flush the transmit ring */
1296	mlx5e_sq_send_nops_locked(sq, 1);
1297	mtx_unlock(&sq->lock);
1298
1299	/* make sure it is safe to free the callout */
1300	callout_drain(&sq->cev_callout);
1301
1302	/* wait till SQ is empty or link is down */
1303	mtx_lock(&sq->lock);
1304	while (sq->cc != sq->pc &&
1305	    (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1306	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1307		mtx_unlock(&sq->lock);
1308		msleep(1);
1309		sq->cq.mcq.comp(&sq->cq.mcq);
1310		mtx_lock(&sq->lock);
1311	}
1312	mtx_unlock(&sq->lock);
1313
1314	/* error out remaining requests */
1315	error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1316	if (error != 0) {
1317		if_printf(sq->ifp,
1318		    "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1319	}
1320
1321	/* wait till SQ is empty */
1322	mtx_lock(&sq->lock);
1323	while (sq->cc != sq->pc &&
1324	       mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1325		mtx_unlock(&sq->lock);
1326		msleep(1);
1327		sq->cq.mcq.comp(&sq->cq.mcq);
1328		mtx_lock(&sq->lock);
1329	}
1330	mtx_unlock(&sq->lock);
1331}
1332
1333static void
1334mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1335{
1336
1337	mlx5e_drain_sq(sq);
1338	mlx5e_disable_sq(sq);
1339	mlx5e_destroy_sq(sq);
1340}
1341
1342static int
1343mlx5e_create_cq(struct mlx5e_priv *priv,
1344    struct mlx5e_cq_param *param,
1345    struct mlx5e_cq *cq,
1346    mlx5e_cq_comp_t *comp,
1347    int eq_ix)
1348{
1349	struct mlx5_core_dev *mdev = priv->mdev;
1350	struct mlx5_core_cq *mcq = &cq->mcq;
1351	int eqn_not_used;
1352	int irqn;
1353	int err;
1354	u32 i;
1355
1356	param->wq.buf_numa_node = 0;
1357	param->wq.db_numa_node = 0;
1358
1359	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1360	    &cq->wq_ctrl);
1361	if (err)
1362		return (err);
1363
1364	mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1365
1366	mcq->cqe_sz = 64;
1367	mcq->set_ci_db = cq->wq_ctrl.db.db;
1368	mcq->arm_db = cq->wq_ctrl.db.db + 1;
1369	*mcq->set_ci_db = 0;
1370	*mcq->arm_db = 0;
1371	mcq->vector = eq_ix;
1372	mcq->comp = comp;
1373	mcq->event = mlx5e_cq_error_event;
1374	mcq->irqn = irqn;
1375	mcq->uar = &priv->cq_uar;
1376
1377	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1378		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1379
1380		cqe->op_own = 0xf1;
1381	}
1382
1383	cq->priv = priv;
1384
1385	return (0);
1386}
1387
1388static void
1389mlx5e_destroy_cq(struct mlx5e_cq *cq)
1390{
1391	mlx5_wq_destroy(&cq->wq_ctrl);
1392}
1393
1394static int
1395mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1396{
1397	struct mlx5_core_cq *mcq = &cq->mcq;
1398	void *in;
1399	void *cqc;
1400	int inlen;
1401	int irqn_not_used;
1402	int eqn;
1403	int err;
1404
1405	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1406	    sizeof(u64) * cq->wq_ctrl.buf.npages;
1407	in = mlx5_vzalloc(inlen);
1408	if (in == NULL)
1409		return (-ENOMEM);
1410
1411	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1412
1413	memcpy(cqc, param->cqc, sizeof(param->cqc));
1414
1415	mlx5_fill_page_array(&cq->wq_ctrl.buf,
1416	    (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1417
1418	mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1419
1420	MLX5_SET(cqc, cqc, c_eqn, eqn);
1421	MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1422	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1423	    PAGE_SHIFT);
1424	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1425
1426	err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
1427
1428	kvfree(in);
1429
1430	if (err)
1431		return (err);
1432
1433	mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
1434
1435	return (0);
1436}
1437
1438static void
1439mlx5e_disable_cq(struct mlx5e_cq *cq)
1440{
1441
1442	mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
1443}
1444
1445int
1446mlx5e_open_cq(struct mlx5e_priv *priv,
1447    struct mlx5e_cq_param *param,
1448    struct mlx5e_cq *cq,
1449    mlx5e_cq_comp_t *comp,
1450    int eq_ix)
1451{
1452	int err;
1453
1454	err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
1455	if (err)
1456		return (err);
1457
1458	err = mlx5e_enable_cq(cq, param, eq_ix);
1459	if (err)
1460		goto err_destroy_cq;
1461
1462	return (0);
1463
1464err_destroy_cq:
1465	mlx5e_destroy_cq(cq);
1466
1467	return (err);
1468}
1469
1470void
1471mlx5e_close_cq(struct mlx5e_cq *cq)
1472{
1473	mlx5e_disable_cq(cq);
1474	mlx5e_destroy_cq(cq);
1475}
1476
1477static int
1478mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1479    struct mlx5e_channel_param *cparam)
1480{
1481	int err;
1482	int tc;
1483
1484	for (tc = 0; tc < c->num_tc; tc++) {
1485		/* open completion queue */
1486		err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
1487		    &mlx5e_tx_cq_comp, c->ix);
1488		if (err)
1489			goto err_close_tx_cqs;
1490	}
1491	return (0);
1492
1493err_close_tx_cqs:
1494	for (tc--; tc >= 0; tc--)
1495		mlx5e_close_cq(&c->sq[tc].cq);
1496
1497	return (err);
1498}
1499
1500static void
1501mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1502{
1503	int tc;
1504
1505	for (tc = 0; tc < c->num_tc; tc++)
1506		mlx5e_close_cq(&c->sq[tc].cq);
1507}
1508
1509static int
1510mlx5e_open_sqs(struct mlx5e_channel *c,
1511    struct mlx5e_channel_param *cparam)
1512{
1513	int err;
1514	int tc;
1515
1516	for (tc = 0; tc < c->num_tc; tc++) {
1517		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1518		if (err)
1519			goto err_close_sqs;
1520	}
1521
1522	return (0);
1523
1524err_close_sqs:
1525	for (tc--; tc >= 0; tc--)
1526		mlx5e_close_sq_wait(&c->sq[tc]);
1527
1528	return (err);
1529}
1530
1531static void
1532mlx5e_close_sqs_wait(struct mlx5e_channel *c)
1533{
1534	int tc;
1535
1536	for (tc = 0; tc < c->num_tc; tc++)
1537		mlx5e_close_sq_wait(&c->sq[tc]);
1538}
1539
1540static void
1541mlx5e_chan_mtx_init(struct mlx5e_channel *c)
1542{
1543	int tc;
1544
1545	mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1546
1547	callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
1548
1549	for (tc = 0; tc < c->num_tc; tc++) {
1550		struct mlx5e_sq *sq = c->sq + tc;
1551
1552		mtx_init(&sq->lock, "mlx5tx",
1553		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1554		mtx_init(&sq->comp_lock, "mlx5comp",
1555		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1556
1557		callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
1558
1559		sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
1560
1561		/* ensure the TX completion event factor is not zero */
1562		if (sq->cev_factor == 0)
1563			sq->cev_factor = 1;
1564	}
1565}
1566
1567static void
1568mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
1569{
1570	int tc;
1571
1572	mtx_destroy(&c->rq.mtx);
1573
1574	for (tc = 0; tc < c->num_tc; tc++) {
1575		mtx_destroy(&c->sq[tc].lock);
1576		mtx_destroy(&c->sq[tc].comp_lock);
1577	}
1578}
1579
1580static int
1581mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1582    struct mlx5e_channel_param *cparam,
1583    struct mlx5e_channel *volatile *cp)
1584{
1585	struct mlx5e_channel *c;
1586	int err;
1587
1588	c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO);
1589	c->priv = priv;
1590	c->ix = ix;
1591	c->cpu = 0;
1592	c->ifp = priv->ifp;
1593	c->mkey_be = cpu_to_be32(priv->mr.key);
1594	c->num_tc = priv->num_tc;
1595
1596	/* init mutexes */
1597	mlx5e_chan_mtx_init(c);
1598
1599	/* open transmit completion queue */
1600	err = mlx5e_open_tx_cqs(c, cparam);
1601	if (err)
1602		goto err_free;
1603
1604	/* open receive completion queue */
1605	err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
1606	    &mlx5e_rx_cq_comp, c->ix);
1607	if (err)
1608		goto err_close_tx_cqs;
1609
1610	err = mlx5e_open_sqs(c, cparam);
1611	if (err)
1612		goto err_close_rx_cq;
1613
1614	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1615	if (err)
1616		goto err_close_sqs;
1617
1618	/* store channel pointer */
1619	*cp = c;
1620
1621	/* poll receive queue initially */
1622	c->rq.cq.mcq.comp(&c->rq.cq.mcq);
1623
1624	return (0);
1625
1626err_close_sqs:
1627	mlx5e_close_sqs_wait(c);
1628
1629err_close_rx_cq:
1630	mlx5e_close_cq(&c->rq.cq);
1631
1632err_close_tx_cqs:
1633	mlx5e_close_tx_cqs(c);
1634
1635err_free:
1636	/* destroy mutexes */
1637	mlx5e_chan_mtx_destroy(c);
1638	free(c, M_MLX5EN);
1639	return (err);
1640}
1641
1642static void
1643mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
1644{
1645	struct mlx5e_channel *c = *pp;
1646
1647	/* check if channel is already closed */
1648	if (c == NULL)
1649		return;
1650	mlx5e_close_rq(&c->rq);
1651}
1652
1653static void
1654mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
1655{
1656	struct mlx5e_channel *c = *pp;
1657
1658	/* check if channel is already closed */
1659	if (c == NULL)
1660		return;
1661	/* ensure channel pointer is no longer used */
1662	*pp = NULL;
1663
1664	mlx5e_close_rq_wait(&c->rq);
1665	mlx5e_close_sqs_wait(c);
1666	mlx5e_close_cq(&c->rq.cq);
1667	mlx5e_close_tx_cqs(c);
1668	/* destroy mutexes */
1669	mlx5e_chan_mtx_destroy(c);
1670	free(c, M_MLX5EN);
1671}
1672
1673static int
1674mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
1675{
1676	u32 r, n;
1677
1678	r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
1679	    MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
1680	if (r > MJUM16BYTES)
1681		return (-ENOMEM);
1682
1683	if (r > MJUM9BYTES)
1684		r = MJUM16BYTES;
1685	else if (r > MJUMPAGESIZE)
1686		r = MJUM9BYTES;
1687	else if (r > MCLBYTES)
1688		r = MJUMPAGESIZE;
1689	else
1690		r = MCLBYTES;
1691
1692	/*
1693	 * n + 1 must be a power of two, because stride size must be.
1694	 * Stride size is 16 * (n + 1), as the first segment is
1695	 * control.
1696	 */
1697	for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
1698		;
1699
1700	*wqe_sz = r;
1701	*nsegs = n;
1702	return (0);
1703}
1704
1705static void
1706mlx5e_build_rq_param(struct mlx5e_priv *priv,
1707    struct mlx5e_rq_param *param)
1708{
1709	void *rqc = param->rqc;
1710	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1711	u32 wqe_sz, nsegs;
1712
1713	mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
1714	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1715	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1716	MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
1717	    nsegs * sizeof(struct mlx5_wqe_data_seg)));
1718	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1719	MLX5_SET(wq, wq, pd, priv->pdn);
1720
1721	param->wq.buf_numa_node = 0;
1722	param->wq.db_numa_node = 0;
1723	param->wq.linear = 1;
1724}
1725
1726static void
1727mlx5e_build_sq_param(struct mlx5e_priv *priv,
1728    struct mlx5e_sq_param *param)
1729{
1730	void *sqc = param->sqc;
1731	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1732
1733	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1734	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1735	MLX5_SET(wq, wq, pd, priv->pdn);
1736
1737	param->wq.buf_numa_node = 0;
1738	param->wq.db_numa_node = 0;
1739	param->wq.linear = 1;
1740}
1741
1742static void
1743mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1744    struct mlx5e_cq_param *param)
1745{
1746	void *cqc = param->cqc;
1747
1748	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1749}
1750
1751static void
1752mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1753    struct mlx5e_cq_param *param)
1754{
1755	void *cqc = param->cqc;
1756
1757
1758	/*
1759	 * TODO The sysctl to control on/off is a bool value for now, which means
1760	 * we only support CSUM, once HASH is implemnted we'll need to address that.
1761	 */
1762	if (priv->params.cqe_zipping_en) {
1763		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1764		MLX5_SET(cqc, cqc, cqe_compression_en, 1);
1765	}
1766
1767	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1768	MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1769	MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1770
1771	switch (priv->params.rx_cq_moderation_mode) {
1772	case 0:
1773		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1774		break;
1775	default:
1776		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1777			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1778		else
1779			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1780		break;
1781	}
1782
1783	mlx5e_build_common_cq_param(priv, param);
1784}
1785
1786static void
1787mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1788    struct mlx5e_cq_param *param)
1789{
1790	void *cqc = param->cqc;
1791
1792	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1793	MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
1794	MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
1795
1796	switch (priv->params.tx_cq_moderation_mode) {
1797	case 0:
1798		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1799		break;
1800	default:
1801		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1802			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1803		else
1804			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1805		break;
1806	}
1807
1808	mlx5e_build_common_cq_param(priv, param);
1809}
1810
1811static void
1812mlx5e_build_channel_param(struct mlx5e_priv *priv,
1813    struct mlx5e_channel_param *cparam)
1814{
1815	memset(cparam, 0, sizeof(*cparam));
1816
1817	mlx5e_build_rq_param(priv, &cparam->rq);
1818	mlx5e_build_sq_param(priv, &cparam->sq);
1819	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1820	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1821}
1822
1823static int
1824mlx5e_open_channels(struct mlx5e_priv *priv)
1825{
1826	struct mlx5e_channel_param cparam;
1827	void *ptr;
1828	int err;
1829	int i;
1830	int j;
1831
1832	priv->channel = malloc(priv->params.num_channels *
1833	    sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO);
1834
1835	mlx5e_build_channel_param(priv, &cparam);
1836	for (i = 0; i < priv->params.num_channels; i++) {
1837		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1838		if (err)
1839			goto err_close_channels;
1840	}
1841
1842	for (j = 0; j < priv->params.num_channels; j++) {
1843		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1844		if (err)
1845			goto err_close_channels;
1846	}
1847
1848	return (0);
1849
1850err_close_channels:
1851	for (i--; i >= 0; i--) {
1852		mlx5e_close_channel(&priv->channel[i]);
1853		mlx5e_close_channel_wait(&priv->channel[i]);
1854	}
1855
1856	/* remove "volatile" attribute from "channel" pointer */
1857	ptr = __DECONST(void *, priv->channel);
1858	priv->channel = NULL;
1859
1860	free(ptr, M_MLX5EN);
1861
1862	return (err);
1863}
1864
1865static void
1866mlx5e_close_channels(struct mlx5e_priv *priv)
1867{
1868	void *ptr;
1869	int i;
1870
1871	if (priv->channel == NULL)
1872		return;
1873
1874	for (i = 0; i < priv->params.num_channels; i++)
1875		mlx5e_close_channel(&priv->channel[i]);
1876	for (i = 0; i < priv->params.num_channels; i++)
1877		mlx5e_close_channel_wait(&priv->channel[i]);
1878
1879	/* remove "volatile" attribute from "channel" pointer */
1880	ptr = __DECONST(void *, priv->channel);
1881	priv->channel = NULL;
1882
1883	free(ptr, M_MLX5EN);
1884}
1885
1886static int
1887mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
1888{
1889
1890	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1891		uint8_t cq_mode;
1892
1893		switch (priv->params.tx_cq_moderation_mode) {
1894		case 0:
1895			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1896			break;
1897		default:
1898			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1899			break;
1900		}
1901
1902		return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
1903		    priv->params.tx_cq_moderation_usec,
1904		    priv->params.tx_cq_moderation_pkts,
1905		    cq_mode));
1906	}
1907
1908	return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
1909	    priv->params.tx_cq_moderation_usec,
1910	    priv->params.tx_cq_moderation_pkts));
1911}
1912
1913static int
1914mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
1915{
1916
1917	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1918		uint8_t cq_mode;
1919		int retval;
1920
1921		switch (priv->params.rx_cq_moderation_mode) {
1922		case 0:
1923			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1924			break;
1925		default:
1926			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1927			break;
1928		}
1929
1930		retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
1931		    priv->params.rx_cq_moderation_usec,
1932		    priv->params.rx_cq_moderation_pkts,
1933		    cq_mode);
1934
1935		return (retval);
1936	}
1937
1938	return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
1939	    priv->params.rx_cq_moderation_usec,
1940	    priv->params.rx_cq_moderation_pkts));
1941}
1942
1943static int
1944mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1945{
1946	int err;
1947	int i;
1948
1949	if (c == NULL)
1950		return (EINVAL);
1951
1952	err = mlx5e_refresh_rq_params(priv, &c->rq);
1953	if (err)
1954		goto done;
1955
1956	for (i = 0; i != c->num_tc; i++) {
1957		err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
1958		if (err)
1959			goto done;
1960	}
1961done:
1962	return (err);
1963}
1964
1965int
1966mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
1967{
1968	int i;
1969
1970	if (priv->channel == NULL)
1971		return (EINVAL);
1972
1973	for (i = 0; i < priv->params.num_channels; i++) {
1974		int err;
1975
1976		err = mlx5e_refresh_channel_params_sub(priv, priv->channel[i]);
1977		if (err)
1978			return (err);
1979	}
1980	return (0);
1981}
1982
1983static int
1984mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
1985{
1986	struct mlx5_core_dev *mdev = priv->mdev;
1987	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1988	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1989
1990	memset(in, 0, sizeof(in));
1991
1992	MLX5_SET(tisc, tisc, prio, tc);
1993	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1994
1995	return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
1996}
1997
1998static void
1999mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
2000{
2001	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2002}
2003
2004static int
2005mlx5e_open_tises(struct mlx5e_priv *priv)
2006{
2007	int num_tc = priv->num_tc;
2008	int err;
2009	int tc;
2010
2011	for (tc = 0; tc < num_tc; tc++) {
2012		err = mlx5e_open_tis(priv, tc);
2013		if (err)
2014			goto err_close_tises;
2015	}
2016
2017	return (0);
2018
2019err_close_tises:
2020	for (tc--; tc >= 0; tc--)
2021		mlx5e_close_tis(priv, tc);
2022
2023	return (err);
2024}
2025
2026static void
2027mlx5e_close_tises(struct mlx5e_priv *priv)
2028{
2029	int num_tc = priv->num_tc;
2030	int tc;
2031
2032	for (tc = 0; tc < num_tc; tc++)
2033		mlx5e_close_tis(priv, tc);
2034}
2035
2036static int
2037mlx5e_open_rqt(struct mlx5e_priv *priv)
2038{
2039	struct mlx5_core_dev *mdev = priv->mdev;
2040	u32 *in;
2041	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
2042	void *rqtc;
2043	int inlen;
2044	int err;
2045	int sz;
2046	int i;
2047
2048	sz = 1 << priv->params.rx_hash_log_tbl_sz;
2049
2050	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2051	in = mlx5_vzalloc(inlen);
2052	if (in == NULL)
2053		return (-ENOMEM);
2054	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2055
2056	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2057	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2058
2059	for (i = 0; i < sz; i++) {
2060		int ix = i;
2061#ifdef RSS
2062		ix = rss_get_indirection_to_bucket(ix);
2063#endif
2064		/* ensure we don't overflow */
2065		ix %= priv->params.num_channels;
2066
2067		/* apply receive side scaling stride, if any */
2068		ix -= ix % (int)priv->params.channels_rsss;
2069
2070		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
2071	}
2072
2073	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2074
2075	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
2076	if (!err)
2077		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2078
2079	kvfree(in);
2080
2081	return (err);
2082}
2083
2084static void
2085mlx5e_close_rqt(struct mlx5e_priv *priv)
2086{
2087	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
2088	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
2089
2090	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2091	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2092
2093	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
2094}
2095
2096static void
2097mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2098{
2099	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2100	__be32 *hkey;
2101
2102	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2103
2104#define	ROUGH_MAX_L2_L3_HDR_SZ 256
2105
2106#define	MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2107			  MLX5_HASH_FIELD_SEL_DST_IP)
2108
2109#define	MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2110			  MLX5_HASH_FIELD_SEL_DST_IP   |\
2111			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2112			  MLX5_HASH_FIELD_SEL_L4_DPORT)
2113
2114#define	MLX5_HASH_IP_IPSEC_SPI	(MLX5_HASH_FIELD_SEL_SRC_IP   |\
2115				 MLX5_HASH_FIELD_SEL_DST_IP   |\
2116				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2117
2118	if (priv->params.hw_lro_en) {
2119		MLX5_SET(tirc, tirc, lro_enable_mask,
2120		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2121		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2122		MLX5_SET(tirc, tirc, lro_max_msg_sz,
2123		    (priv->params.lro_wqe_sz -
2124		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2125		/* TODO: add the option to choose timer value dynamically */
2126		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2127		    MLX5_CAP_ETH(priv->mdev,
2128		    lro_timer_supported_periods[2]));
2129	}
2130
2131	/* setup parameters for hashing TIR type, if any */
2132	switch (tt) {
2133	case MLX5E_TT_ANY:
2134		MLX5_SET(tirc, tirc, disp_type,
2135		    MLX5_TIRC_DISP_TYPE_DIRECT);
2136		MLX5_SET(tirc, tirc, inline_rqn,
2137		    priv->channel[0]->rq.rqn);
2138		break;
2139	default:
2140		MLX5_SET(tirc, tirc, disp_type,
2141		    MLX5_TIRC_DISP_TYPE_INDIRECT);
2142		MLX5_SET(tirc, tirc, indirect_table,
2143		    priv->rqtn);
2144		MLX5_SET(tirc, tirc, rx_hash_fn,
2145		    MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2146		hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2147#ifdef RSS
2148		/*
2149		 * The FreeBSD RSS implementation does currently not
2150		 * support symmetric Toeplitz hashes:
2151		 */
2152		MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2153		rss_getkey((uint8_t *)hkey);
2154#else
2155		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2156		hkey[0] = cpu_to_be32(0xD181C62C);
2157		hkey[1] = cpu_to_be32(0xF7F4DB5B);
2158		hkey[2] = cpu_to_be32(0x1983A2FC);
2159		hkey[3] = cpu_to_be32(0x943E1ADB);
2160		hkey[4] = cpu_to_be32(0xD9389E6B);
2161		hkey[5] = cpu_to_be32(0xD1039C2C);
2162		hkey[6] = cpu_to_be32(0xA74499AD);
2163		hkey[7] = cpu_to_be32(0x593D56D9);
2164		hkey[8] = cpu_to_be32(0xF3253C06);
2165		hkey[9] = cpu_to_be32(0x2ADC1FFC);
2166#endif
2167		break;
2168	}
2169
2170	switch (tt) {
2171	case MLX5E_TT_IPV4_TCP:
2172		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2173		    MLX5_L3_PROT_TYPE_IPV4);
2174		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2175		    MLX5_L4_PROT_TYPE_TCP);
2176#ifdef RSS
2177		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2178			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2179			    MLX5_HASH_IP);
2180		} else
2181#endif
2182		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2183		    MLX5_HASH_ALL);
2184		break;
2185
2186	case MLX5E_TT_IPV6_TCP:
2187		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2188		    MLX5_L3_PROT_TYPE_IPV6);
2189		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2190		    MLX5_L4_PROT_TYPE_TCP);
2191#ifdef RSS
2192		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2193			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2194			    MLX5_HASH_IP);
2195		} else
2196#endif
2197		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2198		    MLX5_HASH_ALL);
2199		break;
2200
2201	case MLX5E_TT_IPV4_UDP:
2202		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2203		    MLX5_L3_PROT_TYPE_IPV4);
2204		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2205		    MLX5_L4_PROT_TYPE_UDP);
2206#ifdef RSS
2207		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2208			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2209			    MLX5_HASH_IP);
2210		} else
2211#endif
2212		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2213		    MLX5_HASH_ALL);
2214		break;
2215
2216	case MLX5E_TT_IPV6_UDP:
2217		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2218		    MLX5_L3_PROT_TYPE_IPV6);
2219		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2220		    MLX5_L4_PROT_TYPE_UDP);
2221#ifdef RSS
2222		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2223			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2224			    MLX5_HASH_IP);
2225		} else
2226#endif
2227		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2228		    MLX5_HASH_ALL);
2229		break;
2230
2231	case MLX5E_TT_IPV4_IPSEC_AH:
2232		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2233		    MLX5_L3_PROT_TYPE_IPV4);
2234		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2235		    MLX5_HASH_IP_IPSEC_SPI);
2236		break;
2237
2238	case MLX5E_TT_IPV6_IPSEC_AH:
2239		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2240		    MLX5_L3_PROT_TYPE_IPV6);
2241		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2242		    MLX5_HASH_IP_IPSEC_SPI);
2243		break;
2244
2245	case MLX5E_TT_IPV4_IPSEC_ESP:
2246		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2247		    MLX5_L3_PROT_TYPE_IPV4);
2248		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2249		    MLX5_HASH_IP_IPSEC_SPI);
2250		break;
2251
2252	case MLX5E_TT_IPV6_IPSEC_ESP:
2253		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2254		    MLX5_L3_PROT_TYPE_IPV6);
2255		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2256		    MLX5_HASH_IP_IPSEC_SPI);
2257		break;
2258
2259	case MLX5E_TT_IPV4:
2260		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2261		    MLX5_L3_PROT_TYPE_IPV4);
2262		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2263		    MLX5_HASH_IP);
2264		break;
2265
2266	case MLX5E_TT_IPV6:
2267		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2268		    MLX5_L3_PROT_TYPE_IPV6);
2269		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2270		    MLX5_HASH_IP);
2271		break;
2272
2273	default:
2274		break;
2275	}
2276}
2277
2278static int
2279mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2280{
2281	struct mlx5_core_dev *mdev = priv->mdev;
2282	u32 *in;
2283	void *tirc;
2284	int inlen;
2285	int err;
2286
2287	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2288	in = mlx5_vzalloc(inlen);
2289	if (in == NULL)
2290		return (-ENOMEM);
2291	tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2292
2293	mlx5e_build_tir_ctx(priv, tirc, tt);
2294
2295	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2296
2297	kvfree(in);
2298
2299	return (err);
2300}
2301
2302static void
2303mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2304{
2305	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2306}
2307
2308static int
2309mlx5e_open_tirs(struct mlx5e_priv *priv)
2310{
2311	int err;
2312	int i;
2313
2314	for (i = 0; i < MLX5E_NUM_TT; i++) {
2315		err = mlx5e_open_tir(priv, i);
2316		if (err)
2317			goto err_close_tirs;
2318	}
2319
2320	return (0);
2321
2322err_close_tirs:
2323	for (i--; i >= 0; i--)
2324		mlx5e_close_tir(priv, i);
2325
2326	return (err);
2327}
2328
2329static void
2330mlx5e_close_tirs(struct mlx5e_priv *priv)
2331{
2332	int i;
2333
2334	for (i = 0; i < MLX5E_NUM_TT; i++)
2335		mlx5e_close_tir(priv, i);
2336}
2337
2338/*
2339 * SW MTU does not include headers,
2340 * HW MTU includes all headers and checksums.
2341 */
2342static int
2343mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2344{
2345	struct mlx5e_priv *priv = ifp->if_softc;
2346	struct mlx5_core_dev *mdev = priv->mdev;
2347	int hw_mtu;
2348	int err;
2349
2350	hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2351
2352	err = mlx5_set_port_mtu(mdev, hw_mtu);
2353	if (err) {
2354		if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
2355		    __func__, sw_mtu, err);
2356		return (err);
2357	}
2358
2359	/* Update vport context MTU */
2360	err = mlx5_set_vport_mtu(mdev, hw_mtu);
2361	if (err) {
2362		if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n",
2363		    __func__, err);
2364	}
2365
2366	ifp->if_mtu = sw_mtu;
2367
2368	err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2369	if (err || !hw_mtu) {
2370		/* fallback to port oper mtu */
2371		err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2372	}
2373	if (err) {
2374		if_printf(ifp, "Query port MTU, after setting new "
2375		    "MTU value, failed\n");
2376		return (err);
2377	} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2378		err = -E2BIG,
2379		if_printf(ifp, "Port MTU %d is smaller than "
2380                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2381	} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
2382		err = -EINVAL;
2383                if_printf(ifp, "Port MTU %d is bigger than "
2384                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2385	}
2386	priv->params_ethtool.hw_mtu = hw_mtu;
2387
2388	return (err);
2389}
2390
2391int
2392mlx5e_open_locked(struct ifnet *ifp)
2393{
2394	struct mlx5e_priv *priv = ifp->if_softc;
2395	int err;
2396	u16 set_id;
2397
2398	/* check if already opened */
2399	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2400		return (0);
2401
2402#ifdef RSS
2403	if (rss_getnumbuckets() > priv->params.num_channels) {
2404		if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
2405		    "channels(%u) available\n", rss_getnumbuckets(),
2406		    priv->params.num_channels);
2407	}
2408#endif
2409	err = mlx5e_open_tises(priv);
2410	if (err) {
2411		if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
2412		    __func__, err);
2413		return (err);
2414	}
2415	err = mlx5_vport_alloc_q_counter(priv->mdev,
2416	    MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
2417	if (err) {
2418		if_printf(priv->ifp,
2419		    "%s: mlx5_vport_alloc_q_counter failed: %d\n",
2420		    __func__, err);
2421		goto err_close_tises;
2422	}
2423	/* store counter set ID */
2424	priv->counter_set_id = set_id;
2425
2426	err = mlx5e_open_channels(priv);
2427	if (err) {
2428		if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
2429		    __func__, err);
2430		goto err_dalloc_q_counter;
2431	}
2432	err = mlx5e_open_rqt(priv);
2433	if (err) {
2434		if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
2435		    __func__, err);
2436		goto err_close_channels;
2437	}
2438	err = mlx5e_open_tirs(priv);
2439	if (err) {
2440		if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
2441		    __func__, err);
2442		goto err_close_rqls;
2443	}
2444	err = mlx5e_open_flow_table(priv);
2445	if (err) {
2446		if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
2447		    __func__, err);
2448		goto err_close_tirs;
2449	}
2450	err = mlx5e_add_all_vlan_rules(priv);
2451	if (err) {
2452		if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
2453		    __func__, err);
2454		goto err_close_flow_table;
2455	}
2456	set_bit(MLX5E_STATE_OPENED, &priv->state);
2457
2458	mlx5e_update_carrier(priv);
2459	mlx5e_set_rx_mode_core(priv);
2460
2461	return (0);
2462
2463err_close_flow_table:
2464	mlx5e_close_flow_table(priv);
2465
2466err_close_tirs:
2467	mlx5e_close_tirs(priv);
2468
2469err_close_rqls:
2470	mlx5e_close_rqt(priv);
2471
2472err_close_channels:
2473	mlx5e_close_channels(priv);
2474
2475err_dalloc_q_counter:
2476	mlx5_vport_dealloc_q_counter(priv->mdev,
2477	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2478
2479err_close_tises:
2480	mlx5e_close_tises(priv);
2481
2482	return (err);
2483}
2484
2485static void
2486mlx5e_open(void *arg)
2487{
2488	struct mlx5e_priv *priv = arg;
2489
2490	PRIV_LOCK(priv);
2491	if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2492		if_printf(priv->ifp,
2493		    "%s: Setting port status to up failed\n",
2494		    __func__);
2495
2496	mlx5e_open_locked(priv->ifp);
2497	priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2498	PRIV_UNLOCK(priv);
2499}
2500
2501int
2502mlx5e_close_locked(struct ifnet *ifp)
2503{
2504	struct mlx5e_priv *priv = ifp->if_softc;
2505
2506	/* check if already closed */
2507	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2508		return (0);
2509
2510	clear_bit(MLX5E_STATE_OPENED, &priv->state);
2511
2512	mlx5e_set_rx_mode_core(priv);
2513	mlx5e_del_all_vlan_rules(priv);
2514	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2515	mlx5e_close_flow_table(priv);
2516	mlx5e_close_tirs(priv);
2517	mlx5e_close_rqt(priv);
2518	mlx5e_close_channels(priv);
2519	mlx5_vport_dealloc_q_counter(priv->mdev,
2520	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2521	mlx5e_close_tises(priv);
2522
2523	return (0);
2524}
2525
2526#if (__FreeBSD_version >= 1100000)
2527static uint64_t
2528mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
2529{
2530	struct mlx5e_priv *priv = ifp->if_softc;
2531	u64 retval;
2532
2533	/* PRIV_LOCK(priv); XXX not allowed */
2534	switch (cnt) {
2535	case IFCOUNTER_IPACKETS:
2536		retval = priv->stats.vport.rx_packets;
2537		break;
2538	case IFCOUNTER_IERRORS:
2539		retval = priv->stats.vport.rx_error_packets +
2540		    priv->stats.pport.alignment_err +
2541		    priv->stats.pport.check_seq_err +
2542		    priv->stats.pport.crc_align_errors +
2543		    priv->stats.pport.in_range_len_errors +
2544		    priv->stats.pport.jabbers +
2545		    priv->stats.pport.out_of_range_len +
2546		    priv->stats.pport.oversize_pkts +
2547		    priv->stats.pport.symbol_err +
2548		    priv->stats.pport.too_long_errors +
2549		    priv->stats.pport.undersize_pkts +
2550		    priv->stats.pport.unsupported_op_rx;
2551		break;
2552	case IFCOUNTER_IQDROPS:
2553		retval = priv->stats.vport.rx_out_of_buffer +
2554		    priv->stats.pport.drop_events;
2555		break;
2556	case IFCOUNTER_OPACKETS:
2557		retval = priv->stats.vport.tx_packets;
2558		break;
2559	case IFCOUNTER_OERRORS:
2560		retval = priv->stats.vport.tx_error_packets;
2561		break;
2562	case IFCOUNTER_IBYTES:
2563		retval = priv->stats.vport.rx_bytes;
2564		break;
2565	case IFCOUNTER_OBYTES:
2566		retval = priv->stats.vport.tx_bytes;
2567		break;
2568	case IFCOUNTER_IMCASTS:
2569		retval = priv->stats.vport.rx_multicast_packets;
2570		break;
2571	case IFCOUNTER_OMCASTS:
2572		retval = priv->stats.vport.tx_multicast_packets;
2573		break;
2574	case IFCOUNTER_OQDROPS:
2575		retval = priv->stats.vport.tx_queue_dropped;
2576		break;
2577	case IFCOUNTER_COLLISIONS:
2578		retval = priv->stats.pport.collisions;
2579		break;
2580	default:
2581		retval = if_get_counter_default(ifp, cnt);
2582		break;
2583	}
2584	/* PRIV_UNLOCK(priv); XXX not allowed */
2585	return (retval);
2586}
2587#endif
2588
2589static void
2590mlx5e_set_rx_mode(struct ifnet *ifp)
2591{
2592	struct mlx5e_priv *priv = ifp->if_softc;
2593
2594	queue_work(priv->wq, &priv->set_rx_mode_work);
2595}
2596
2597static int
2598mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2599{
2600	struct mlx5e_priv *priv;
2601	struct ifreq *ifr;
2602	struct ifi2creq i2c;
2603	int error = 0;
2604	int mask = 0;
2605	int size_read = 0;
2606	int module_status;
2607	int module_num;
2608	int max_mtu;
2609	uint8_t read_addr;
2610
2611	priv = ifp->if_softc;
2612
2613	/* check if detaching */
2614	if (priv == NULL || priv->gone != 0)
2615		return (ENXIO);
2616
2617	switch (command) {
2618	case SIOCSIFMTU:
2619		ifr = (struct ifreq *)data;
2620
2621		PRIV_LOCK(priv);
2622		mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
2623
2624		if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
2625		    ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
2626			int was_opened;
2627
2628			was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2629			if (was_opened)
2630				mlx5e_close_locked(ifp);
2631
2632			/* set new MTU */
2633			mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
2634
2635			if (was_opened)
2636				mlx5e_open_locked(ifp);
2637		} else {
2638			error = EINVAL;
2639			if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
2640			    MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
2641		}
2642		PRIV_UNLOCK(priv);
2643		break;
2644	case SIOCSIFFLAGS:
2645		if ((ifp->if_flags & IFF_UP) &&
2646		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2647			mlx5e_set_rx_mode(ifp);
2648			break;
2649		}
2650		PRIV_LOCK(priv);
2651		if (ifp->if_flags & IFF_UP) {
2652			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2653				if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2654					mlx5e_open_locked(ifp);
2655				ifp->if_drv_flags |= IFF_DRV_RUNNING;
2656				mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
2657			}
2658		} else {
2659			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2660				mlx5_set_port_status(priv->mdev,
2661				    MLX5_PORT_DOWN);
2662				if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2663					mlx5e_close_locked(ifp);
2664				mlx5e_update_carrier(priv);
2665				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2666			}
2667		}
2668		PRIV_UNLOCK(priv);
2669		break;
2670	case SIOCADDMULTI:
2671	case SIOCDELMULTI:
2672		mlx5e_set_rx_mode(ifp);
2673		break;
2674	case SIOCSIFMEDIA:
2675	case SIOCGIFMEDIA:
2676	case SIOCGIFXMEDIA:
2677		ifr = (struct ifreq *)data;
2678		error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
2679		break;
2680	case SIOCSIFCAP:
2681		ifr = (struct ifreq *)data;
2682		PRIV_LOCK(priv);
2683		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2684
2685		if (mask & IFCAP_TXCSUM) {
2686			ifp->if_capenable ^= IFCAP_TXCSUM;
2687			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2688
2689			if (IFCAP_TSO4 & ifp->if_capenable &&
2690			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2691				ifp->if_capenable &= ~IFCAP_TSO4;
2692				ifp->if_hwassist &= ~CSUM_IP_TSO;
2693				if_printf(ifp,
2694				    "tso4 disabled due to -txcsum.\n");
2695			}
2696		}
2697		if (mask & IFCAP_TXCSUM_IPV6) {
2698			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2699			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2700
2701			if (IFCAP_TSO6 & ifp->if_capenable &&
2702			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2703				ifp->if_capenable &= ~IFCAP_TSO6;
2704				ifp->if_hwassist &= ~CSUM_IP6_TSO;
2705				if_printf(ifp,
2706				    "tso6 disabled due to -txcsum6.\n");
2707			}
2708		}
2709		if (mask & IFCAP_RXCSUM)
2710			ifp->if_capenable ^= IFCAP_RXCSUM;
2711		if (mask & IFCAP_RXCSUM_IPV6)
2712			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2713		if (mask & IFCAP_TSO4) {
2714			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2715			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2716				if_printf(ifp, "enable txcsum first.\n");
2717				error = EAGAIN;
2718				goto out;
2719			}
2720			ifp->if_capenable ^= IFCAP_TSO4;
2721			ifp->if_hwassist ^= CSUM_IP_TSO;
2722		}
2723		if (mask & IFCAP_TSO6) {
2724			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2725			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2726				if_printf(ifp, "enable txcsum6 first.\n");
2727				error = EAGAIN;
2728				goto out;
2729			}
2730			ifp->if_capenable ^= IFCAP_TSO6;
2731			ifp->if_hwassist ^= CSUM_IP6_TSO;
2732		}
2733		if (mask & IFCAP_VLAN_HWFILTER) {
2734			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2735				mlx5e_disable_vlan_filter(priv);
2736			else
2737				mlx5e_enable_vlan_filter(priv);
2738
2739			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2740		}
2741		if (mask & IFCAP_VLAN_HWTAGGING)
2742			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2743		if (mask & IFCAP_WOL_MAGIC)
2744			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2745
2746		VLAN_CAPABILITIES(ifp);
2747		/* turn off LRO means also turn of HW LRO - if it's on */
2748		if (mask & IFCAP_LRO) {
2749			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2750			bool need_restart = false;
2751
2752			ifp->if_capenable ^= IFCAP_LRO;
2753			if (!(ifp->if_capenable & IFCAP_LRO)) {
2754				if (priv->params.hw_lro_en) {
2755					priv->params.hw_lro_en = false;
2756					need_restart = true;
2757					/* Not sure this is the correct way */
2758					priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
2759				}
2760			}
2761			if (was_opened && need_restart) {
2762				mlx5e_close_locked(ifp);
2763				mlx5e_open_locked(ifp);
2764			}
2765		}
2766out:
2767		PRIV_UNLOCK(priv);
2768		break;
2769
2770	case SIOCGI2C:
2771		ifr = (struct ifreq *)data;
2772
2773		/*
2774		 * Copy from the user-space address ifr_data to the
2775		 * kernel-space address i2c
2776		 */
2777		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2778		if (error)
2779			break;
2780
2781		if (i2c.len > sizeof(i2c.data)) {
2782			error = EINVAL;
2783			break;
2784		}
2785
2786		PRIV_LOCK(priv);
2787		/* Get module_num which is required for the query_eeprom */
2788		error = mlx5_query_module_num(priv->mdev, &module_num);
2789		if (error) {
2790			if_printf(ifp, "Query module num failed, eeprom "
2791			    "reading is not supported\n");
2792			error = EINVAL;
2793			goto err_i2c;
2794		}
2795		/* Check if module is present before doing an access */
2796		module_status = mlx5_query_module_status(priv->mdev, module_num);
2797		if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED &&
2798		    module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) {
2799			error = EINVAL;
2800			goto err_i2c;
2801		}
2802		/*
2803		 * Currently 0XA0 and 0xA2 are the only addresses permitted.
2804		 * The internal conversion is as follows:
2805		 */
2806		if (i2c.dev_addr == 0xA0)
2807			read_addr = MLX5E_I2C_ADDR_LOW;
2808		else if (i2c.dev_addr == 0xA2)
2809			read_addr = MLX5E_I2C_ADDR_HIGH;
2810		else {
2811			if_printf(ifp, "Query eeprom failed, "
2812			    "Invalid Address: %X\n", i2c.dev_addr);
2813			error = EINVAL;
2814			goto err_i2c;
2815		}
2816		error = mlx5_query_eeprom(priv->mdev,
2817		    read_addr, MLX5E_EEPROM_LOW_PAGE,
2818		    (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
2819		    (uint32_t *)i2c.data, &size_read);
2820		if (error) {
2821			if_printf(ifp, "Query eeprom failed, eeprom "
2822			    "reading is not supported\n");
2823			error = EINVAL;
2824			goto err_i2c;
2825		}
2826
2827		if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
2828			error = mlx5_query_eeprom(priv->mdev,
2829			    read_addr, MLX5E_EEPROM_LOW_PAGE,
2830			    (uint32_t)(i2c.offset + size_read),
2831			    (uint32_t)(i2c.len - size_read), module_num,
2832			    (uint32_t *)(i2c.data + size_read), &size_read);
2833		}
2834		if (error) {
2835			if_printf(ifp, "Query eeprom failed, eeprom "
2836			    "reading is not supported\n");
2837			error = EINVAL;
2838			goto err_i2c;
2839		}
2840
2841		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2842err_i2c:
2843		PRIV_UNLOCK(priv);
2844		break;
2845
2846	default:
2847		error = ether_ioctl(ifp, command, data);
2848		break;
2849	}
2850	return (error);
2851}
2852
2853static int
2854mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2855{
2856	/*
2857	 * TODO: uncoment once FW really sets all these bits if
2858	 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
2859	 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
2860	 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
2861	 * -ENOTSUPP;
2862	 */
2863
2864	/* TODO: add more must-to-have features */
2865
2866	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2867		return (-ENODEV);
2868
2869	return (0);
2870}
2871
2872static u16
2873mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2874{
2875	int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2876
2877	return bf_buf_size -
2878	       sizeof(struct mlx5e_tx_wqe) +
2879	       2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2880}
2881
2882static void
2883mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
2884    struct mlx5e_priv *priv,
2885    int num_comp_vectors)
2886{
2887	/*
2888	 * TODO: Consider link speed for setting "log_sq_size",
2889	 * "log_rq_size" and "cq_moderation_xxx":
2890	 */
2891	priv->params.log_sq_size =
2892	    MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2893	priv->params.log_rq_size =
2894	    MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2895	priv->params.rx_cq_moderation_usec =
2896	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
2897	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
2898	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2899	priv->params.rx_cq_moderation_mode =
2900	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
2901	priv->params.rx_cq_moderation_pkts =
2902	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2903	priv->params.tx_cq_moderation_usec =
2904	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2905	priv->params.tx_cq_moderation_pkts =
2906	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
2907	priv->params.min_rx_wqes =
2908	    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
2909	priv->params.rx_hash_log_tbl_sz =
2910	    (order_base_2(num_comp_vectors) >
2911	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
2912	    order_base_2(num_comp_vectors) :
2913	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
2914	priv->params.num_tc = 1;
2915	priv->params.default_vlan_prio = 0;
2916	priv->counter_set_id = -1;
2917	priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
2918	mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
2919
2920	/*
2921	 * hw lro is currently defaulted to off. when it won't anymore we
2922	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
2923	 */
2924	priv->params.hw_lro_en = false;
2925	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2926
2927	priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression);
2928
2929	priv->mdev = mdev;
2930	priv->params.num_channels = num_comp_vectors;
2931	priv->params.channels_rsss = 1;
2932	priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
2933	priv->queue_mapping_channel_mask =
2934	    roundup_pow_of_two(num_comp_vectors) - 1;
2935	priv->num_tc = priv->params.num_tc;
2936	priv->default_vlan_prio = priv->params.default_vlan_prio;
2937
2938	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2939	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2940	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2941}
2942
2943static int
2944mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2945		  struct mlx5_core_mr *mkey)
2946{
2947	struct ifnet *ifp = priv->ifp;
2948	struct mlx5_core_dev *mdev = priv->mdev;
2949	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2950	void *mkc;
2951	u32 *in;
2952	int err;
2953
2954	in = mlx5_vzalloc(inlen);
2955	if (in == NULL) {
2956		if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
2957		return (-ENOMEM);
2958	}
2959
2960	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2961	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
2962	MLX5_SET(mkc, mkc, lw, 1);
2963	MLX5_SET(mkc, mkc, lr, 1);
2964
2965	MLX5_SET(mkc, mkc, pd, pdn);
2966	MLX5_SET(mkc, mkc, length64, 1);
2967	MLX5_SET(mkc, mkc, qpn, 0xffffff);
2968
2969	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
2970	if (err)
2971		if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
2972		    __func__, err);
2973
2974	kvfree(in);
2975	return (err);
2976}
2977
2978static const char *mlx5e_vport_stats_desc[] = {
2979	MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
2980};
2981
2982static const char *mlx5e_pport_stats_desc[] = {
2983	MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
2984};
2985
2986static void
2987mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
2988{
2989	mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
2990	sx_init(&priv->state_lock, "mlx5state");
2991	callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
2992	MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
2993}
2994
2995static void
2996mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
2997{
2998	mtx_destroy(&priv->async_events_mtx);
2999	sx_destroy(&priv->state_lock);
3000}
3001
3002static int
3003sysctl_firmware(SYSCTL_HANDLER_ARGS)
3004{
3005	/*
3006	 * %d.%d%.d the string format.
3007	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
3008	 * We need at most 5 chars to store that.
3009	 * It also has: two "." and NULL at the end, which means we need 18
3010	 * (5*3 + 3) chars at most.
3011	 */
3012	char fw[18];
3013	struct mlx5e_priv *priv = arg1;
3014	int error;
3015
3016	snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3017	    fw_rev_sub(priv->mdev));
3018	error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3019	return (error);
3020}
3021
3022static void
3023mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
3024{
3025	int i;
3026
3027	for (i = 0; i < ch->num_tc; i++)
3028		mlx5e_drain_sq(&ch->sq[i]);
3029}
3030
3031static void
3032mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
3033{
3034
3035	sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
3036	sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
3037	mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
3038	sq->doorbell.d64 = 0;
3039}
3040
3041void
3042mlx5e_resume_sq(struct mlx5e_sq *sq)
3043{
3044	int err;
3045
3046	/* check if already enabled */
3047	if (sq->stopped == 0)
3048		return;
3049
3050	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
3051	    MLX5_SQC_STATE_RST);
3052	if (err != 0) {
3053		if_printf(sq->ifp,
3054		    "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
3055	}
3056
3057	sq->cc = 0;
3058	sq->pc = 0;
3059
3060	/* reset doorbell prior to moving from RST to RDY */
3061	mlx5e_reset_sq_doorbell_record(sq);
3062
3063	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
3064	    MLX5_SQC_STATE_RDY);
3065	if (err != 0) {
3066		if_printf(sq->ifp,
3067		    "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
3068	}
3069
3070	mtx_lock(&sq->lock);
3071	sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
3072	sq->stopped = 0;
3073	mtx_unlock(&sq->lock);
3074
3075}
3076
3077static void
3078mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
3079{
3080        int i;
3081
3082	for (i = 0; i < ch->num_tc; i++)
3083		mlx5e_resume_sq(&ch->sq[i]);
3084}
3085
3086static void
3087mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
3088{
3089	struct mlx5e_rq *rq = &ch->rq;
3090	int err;
3091
3092	mtx_lock(&rq->mtx);
3093	rq->enabled = 0;
3094	callout_stop(&rq->watchdog);
3095	mtx_unlock(&rq->mtx);
3096
3097	callout_drain(&rq->watchdog);
3098
3099	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
3100	if (err != 0) {
3101		if_printf(rq->ifp,
3102		    "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
3103	}
3104
3105	while (!mlx5_wq_ll_is_empty(&rq->wq)) {
3106		msleep(1);
3107		rq->cq.mcq.comp(&rq->cq.mcq);
3108	}
3109
3110	/*
3111	 * Transitioning into RST state will allow the FW to track less ERR state queues,
3112	 * thus reducing the recv queue flushing time
3113	 */
3114	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
3115	if (err != 0) {
3116		if_printf(rq->ifp,
3117		    "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
3118	}
3119}
3120
3121static void
3122mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
3123{
3124	struct mlx5e_rq *rq = &ch->rq;
3125	int err;
3126
3127	rq->wq.wqe_ctr = 0;
3128	mlx5_wq_ll_update_db_record(&rq->wq);
3129	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3130	if (err != 0) {
3131		if_printf(rq->ifp,
3132		    "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
3133        }
3134
3135	rq->enabled = 1;
3136
3137	rq->cq.mcq.comp(&rq->cq.mcq);
3138}
3139
3140void
3141mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
3142{
3143	int i;
3144
3145	if (priv->channel == NULL)
3146		return;
3147
3148	for (i = 0; i < priv->params.num_channels; i++) {
3149
3150		if (!priv->channel[i])
3151			continue;
3152
3153		if (value)
3154			mlx5e_disable_tx_dma(priv->channel[i]);
3155		else
3156			mlx5e_enable_tx_dma(priv->channel[i]);
3157	}
3158}
3159
3160void
3161mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
3162{
3163	int i;
3164
3165	if (priv->channel == NULL)
3166		return;
3167
3168	for (i = 0; i < priv->params.num_channels; i++) {
3169
3170		if (!priv->channel[i])
3171			continue;
3172
3173		if (value)
3174			mlx5e_disable_rx_dma(priv->channel[i]);
3175		else
3176			mlx5e_enable_rx_dma(priv->channel[i]);
3177	}
3178}
3179
3180u8
3181mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
3182{
3183	u8 min_inline_mode;
3184
3185	min_inline_mode = MLX5_INLINE_MODE_L2;
3186	mlx5_query_min_inline(mdev, &min_inline_mode);
3187	if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
3188	    !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
3189		min_inline_mode = MLX5_INLINE_MODE_L2;
3190
3191	return (min_inline_mode);
3192}
3193
3194static void
3195mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3196{
3197	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3198	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3199	    sysctl_firmware, "A", "HCA firmware version");
3200
3201	SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3202	    OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3203	    "Board ID");
3204}
3205
3206static int
3207mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3208{
3209	struct mlx5e_priv *priv = arg1;
3210	uint32_t tx_pfc;
3211	uint32_t value;
3212	int error;
3213
3214	PRIV_LOCK(priv);
3215
3216	tx_pfc = priv->params.tx_priority_flow_control;
3217
3218	/* get current value */
3219	value = (tx_pfc >> arg2) & 1;
3220
3221	error = sysctl_handle_32(oidp, &value, 0, req);
3222
3223	/* range check value */
3224	if (value != 0)
3225		priv->params.tx_priority_flow_control |= (1 << arg2);
3226	else
3227		priv->params.tx_priority_flow_control &= ~(1 << arg2);
3228
3229	/* check if update is required */
3230	if (error == 0 && priv->gone == 0 &&
3231	    tx_pfc != priv->params.tx_priority_flow_control) {
3232		error = -mlx5e_set_port_pfc(priv);
3233		/* restore previous value */
3234		if (error != 0)
3235			priv->params.tx_priority_flow_control= tx_pfc;
3236	}
3237	PRIV_UNLOCK(priv);
3238
3239	return (error);
3240}
3241
3242static int
3243mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3244{
3245	struct mlx5e_priv *priv = arg1;
3246	uint32_t rx_pfc;
3247	uint32_t value;
3248	int error;
3249
3250	PRIV_LOCK(priv);
3251
3252	rx_pfc = priv->params.rx_priority_flow_control;
3253
3254	/* get current value */
3255	value = (rx_pfc >> arg2) & 1;
3256
3257	error = sysctl_handle_32(oidp, &value, 0, req);
3258
3259	/* range check value */
3260	if (value != 0)
3261		priv->params.rx_priority_flow_control |= (1 << arg2);
3262	else
3263		priv->params.rx_priority_flow_control &= ~(1 << arg2);
3264
3265	/* check if update is required */
3266	if (error == 0 && priv->gone == 0 &&
3267	    rx_pfc != priv->params.rx_priority_flow_control) {
3268		error = -mlx5e_set_port_pfc(priv);
3269		/* restore previous value */
3270		if (error != 0)
3271			priv->params.rx_priority_flow_control= rx_pfc;
3272	}
3273	PRIV_UNLOCK(priv);
3274
3275	return (error);
3276}
3277
3278static void
3279mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3280{
3281	unsigned int x;
3282	char path[96];
3283	int error;
3284
3285	/* enable pauseframes by default */
3286	priv->params.tx_pauseframe_control = 1;
3287	priv->params.rx_pauseframe_control = 1;
3288
3289	/* disable ports flow control, PFC, by default */
3290	priv->params.tx_priority_flow_control = 0;
3291	priv->params.rx_priority_flow_control = 0;
3292
3293#if (__FreeBSD_version < 1100000)
3294	/* compute path for sysctl */
3295	snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3296	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3297
3298	/* try to fetch tunable, if any */
3299	TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3300
3301	/* compute path for sysctl */
3302	snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3303	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3304
3305	/* try to fetch tunable, if any */
3306	TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3307
3308	for (x = 0; x != 8; x++) {
3309
3310		/* compute path for sysctl */
3311		snprintf(path, sizeof(path), "dev.mce.%d.tx_priority_flow_control_%u",
3312		    device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3313
3314		/* try to fetch tunable, if any */
3315		if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3316			priv->params.tx_priority_flow_control |= 1 << x;
3317
3318		/* compute path for sysctl */
3319		snprintf(path, sizeof(path), "dev.mce.%d.rx_priority_flow_control_%u",
3320		    device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3321
3322		/* try to fetch tunable, if any */
3323		if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3324			priv->params.rx_priority_flow_control |= 1 << x;
3325	}
3326#endif
3327
3328	/* register pauseframe SYSCTLs */
3329	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3330	    OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3331	    &priv->params.tx_pauseframe_control, 0,
3332	    "Set to enable TX pause frames. Clear to disable.");
3333
3334	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3335	    OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3336	    &priv->params.rx_pauseframe_control, 0,
3337	    "Set to enable RX pause frames. Clear to disable.");
3338
3339	/* register priority_flow control, PFC, SYSCTLs */
3340	for (x = 0; x != 8; x++) {
3341		snprintf(path, sizeof(path), "tx_priority_flow_control_%u", x);
3342
3343		SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3344		    OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3345		    CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_tx_priority_flow_control, "IU",
3346		    "Set to enable TX ports flow control frames for given priority. Clear to disable.");
3347
3348		snprintf(path, sizeof(path), "rx_priority_flow_control_%u", x);
3349
3350		SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3351		    OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3352		    CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_rx_priority_flow_control, "IU",
3353		    "Set to enable RX ports flow control frames for given priority. Clear to disable.");
3354	}
3355
3356	PRIV_LOCK(priv);
3357
3358	/* range check */
3359	priv->params.tx_pauseframe_control =
3360	    priv->params.tx_pauseframe_control ? 1 : 0;
3361	priv->params.rx_pauseframe_control =
3362	    priv->params.rx_pauseframe_control ? 1 : 0;
3363
3364	/* update firmware */
3365	error = mlx5e_set_port_pause_and_pfc(priv);
3366	if (error == -EINVAL) {
3367		if_printf(priv->ifp,
3368		    "Global pauseframes must be disabled before enabling PFC.\n");
3369		priv->params.rx_priority_flow_control = 0;
3370		priv->params.tx_priority_flow_control = 0;
3371
3372		/* update firmware */
3373		(void) mlx5e_set_port_pause_and_pfc(priv);
3374	}
3375	PRIV_UNLOCK(priv);
3376}
3377
3378static void *
3379mlx5e_create_ifp(struct mlx5_core_dev *mdev)
3380{
3381	struct ifnet *ifp;
3382	struct mlx5e_priv *priv;
3383	u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
3384	struct sysctl_oid_list *child;
3385	int ncv = mdev->priv.eq_table.num_comp_vectors;
3386	char unit[16];
3387	int err;
3388	int i;
3389	u32 eth_proto_cap;
3390
3391	if (mlx5e_check_required_hca_cap(mdev)) {
3392		mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
3393		return (NULL);
3394	}
3395	priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO);
3396	mlx5e_priv_mtx_init(priv);
3397
3398	ifp = priv->ifp = if_alloc(IFT_ETHER);
3399	if (ifp == NULL) {
3400		mlx5_core_err(mdev, "if_alloc() failed\n");
3401		goto err_free_priv;
3402	}
3403	ifp->if_softc = priv;
3404	if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
3405	ifp->if_mtu = ETHERMTU;
3406	ifp->if_init = mlx5e_open;
3407	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3408	ifp->if_ioctl = mlx5e_ioctl;
3409	ifp->if_transmit = mlx5e_xmit;
3410	ifp->if_qflush = if_qflush;
3411#if (__FreeBSD_version >= 1100000)
3412	ifp->if_get_counter = mlx5e_get_counter;
3413#endif
3414	ifp->if_snd.ifq_maxlen = ifqmaxlen;
3415	/*
3416         * Set driver features
3417         */
3418	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
3419	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
3420	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
3421	ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
3422	ifp->if_capabilities |= IFCAP_LRO;
3423	ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
3424	ifp->if_capabilities |= IFCAP_HWSTATS;
3425
3426	/* set TSO limits so that we don't have to drop TX packets */
3427	ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
3428	ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
3429	ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
3430
3431	ifp->if_capenable = ifp->if_capabilities;
3432	ifp->if_hwassist = 0;
3433	if (ifp->if_capenable & IFCAP_TSO)
3434		ifp->if_hwassist |= CSUM_TSO;
3435	if (ifp->if_capenable & IFCAP_TXCSUM)
3436		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3437	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3438		ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3439
3440	sysctl_ctx_init(&priv->sysctl_ctx_channel_debug);
3441
3442	/* ifnet sysctl tree */
3443	sysctl_ctx_init(&priv->sysctl_ctx);
3444	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
3445	    OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
3446	if (priv->sysctl_ifnet == NULL) {
3447		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3448		goto err_free_sysctl;
3449	}
3450	snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
3451	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3452	    OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
3453	if (priv->sysctl_ifnet == NULL) {
3454		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3455		goto err_free_sysctl;
3456	}
3457
3458	/* HW sysctl tree */
3459	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
3460	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
3461	    OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
3462	if (priv->sysctl_hw == NULL) {
3463		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3464		goto err_free_sysctl;
3465	}
3466	mlx5e_build_ifp_priv(mdev, priv, ncv);
3467
3468	snprintf(unit, sizeof(unit), "mce%u_wq",
3469	    device_get_unit(mdev->pdev->dev.bsddev));
3470	priv->wq = alloc_workqueue(unit, 0, 1);
3471	if (priv->wq == NULL) {
3472		if_printf(ifp, "%s: alloc_workqueue failed\n", __func__);
3473		goto err_free_sysctl;
3474	}
3475
3476	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
3477	if (err) {
3478		if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
3479		    __func__, err);
3480		goto err_free_wq;
3481	}
3482	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
3483	if (err) {
3484		if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
3485		    __func__, err);
3486		goto err_unmap_free_uar;
3487	}
3488	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
3489	if (err) {
3490		if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
3491		    __func__, err);
3492		goto err_dealloc_pd;
3493	}
3494	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
3495	if (err) {
3496		if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
3497		    __func__, err);
3498		goto err_dealloc_transport_domain;
3499	}
3500	mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
3501
3502	/* check if we should generate a random MAC address */
3503	if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
3504	    is_zero_ether_addr(dev_addr)) {
3505		random_ether_addr(dev_addr);
3506		if_printf(ifp, "Assigned random MAC address\n");
3507	}
3508
3509	/* set default MTU */
3510	mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
3511
3512	/* Set default media status */
3513	priv->media_status_last = IFM_AVALID;
3514	priv->media_active_last = IFM_ETHER | IFM_AUTO |
3515	    IFM_ETH_RXPAUSE | IFM_FDX;
3516
3517	/* setup default pauseframes configuration */
3518	mlx5e_setup_pauseframes(priv);
3519
3520	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
3521	if (err) {
3522		eth_proto_cap = 0;
3523		if_printf(ifp, "%s: Query port media capability failed, %d\n",
3524		    __func__, err);
3525	}
3526
3527	/* Setup supported medias */
3528	ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
3529	    mlx5e_media_change, mlx5e_media_status);
3530
3531	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
3532		if (mlx5e_mode_table[i].baudrate == 0)
3533			continue;
3534		if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
3535			ifmedia_add(&priv->media,
3536			    mlx5e_mode_table[i].subtype |
3537			    IFM_ETHER, 0, NULL);
3538			ifmedia_add(&priv->media,
3539			    mlx5e_mode_table[i].subtype |
3540			    IFM_ETHER | IFM_FDX |
3541			    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3542		}
3543	}
3544
3545	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3546	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3547	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3548
3549	/* Set autoselect by default */
3550	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3551	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
3552	ether_ifattach(ifp, dev_addr);
3553
3554	/* Register for VLAN events */
3555	priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
3556	    mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
3557	priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
3558	    mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
3559
3560	/* Link is down by default */
3561	if_link_state_change(ifp, LINK_STATE_DOWN);
3562
3563	mlx5e_enable_async_events(priv);
3564
3565	mlx5e_add_hw_stats(priv);
3566
3567	mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3568	    "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
3569	    priv->stats.vport.arg);
3570
3571	mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3572	    "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
3573	    priv->stats.pport.arg);
3574
3575	mlx5e_create_ethtool(priv);
3576
3577	mtx_lock(&priv->async_events_mtx);
3578	mlx5e_update_stats(priv);
3579	mtx_unlock(&priv->async_events_mtx);
3580
3581	return (priv);
3582
3583err_dealloc_transport_domain:
3584	mlx5_dealloc_transport_domain(mdev, priv->tdn);
3585
3586err_dealloc_pd:
3587	mlx5_core_dealloc_pd(mdev, priv->pdn);
3588
3589err_unmap_free_uar:
3590	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3591
3592err_free_wq:
3593	destroy_workqueue(priv->wq);
3594
3595err_free_sysctl:
3596	sysctl_ctx_free(&priv->sysctl_ctx);
3597	sysctl_ctx_free(&priv->sysctl_ctx_channel_debug);
3598
3599	if_free(ifp);
3600
3601err_free_priv:
3602	mlx5e_priv_mtx_destroy(priv);
3603	free(priv, M_MLX5EN);
3604	return (NULL);
3605}
3606
3607static void
3608mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
3609{
3610	struct mlx5e_priv *priv = vpriv;
3611	struct ifnet *ifp = priv->ifp;
3612
3613	/* don't allow more IOCTLs */
3614	priv->gone = 1;
3615
3616	/* XXX wait a bit to allow IOCTL handlers to complete */
3617	pause("W", hz);
3618
3619	/* stop watchdog timer */
3620	callout_drain(&priv->watchdog);
3621
3622	if (priv->vlan_attach != NULL)
3623		EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
3624	if (priv->vlan_detach != NULL)
3625		EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
3626
3627	/* make sure device gets closed */
3628	PRIV_LOCK(priv);
3629	mlx5e_close_locked(ifp);
3630	PRIV_UNLOCK(priv);
3631
3632	/* unregister device */
3633	ifmedia_removeall(&priv->media);
3634	ether_ifdetach(ifp);
3635	if_free(ifp);
3636
3637	/* destroy all remaining sysctl nodes */
3638	if (priv->sysctl_debug) {
3639		sysctl_ctx_free(&priv->sysctl_ctx_channel_debug);
3640		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3641	}
3642	sysctl_ctx_free(&priv->stats.vport.ctx);
3643	sysctl_ctx_free(&priv->stats.pport.ctx);
3644	sysctl_ctx_free(&priv->sysctl_ctx);
3645
3646	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3647	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
3648	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3649	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3650	mlx5e_disable_async_events(priv);
3651	destroy_workqueue(priv->wq);
3652	mlx5e_priv_mtx_destroy(priv);
3653	free(priv, M_MLX5EN);
3654}
3655
3656static void *
3657mlx5e_get_ifp(void *vpriv)
3658{
3659	struct mlx5e_priv *priv = vpriv;
3660
3661	return (priv->ifp);
3662}
3663
3664static struct mlx5_interface mlx5e_interface = {
3665	.add = mlx5e_create_ifp,
3666	.remove = mlx5e_destroy_ifp,
3667	.event = mlx5e_async_event,
3668	.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3669	.get_dev = mlx5e_get_ifp,
3670};
3671
3672void
3673mlx5e_init(void)
3674{
3675	mlx5_register_interface(&mlx5e_interface);
3676}
3677
3678void
3679mlx5e_cleanup(void)
3680{
3681	mlx5_unregister_interface(&mlx5e_interface);
3682}
3683
3684static void
3685mlx5e_show_version(void __unused *arg)
3686{
3687
3688	printf("%s", mlx5e_version);
3689}
3690SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL);
3691
3692module_init_order(mlx5e_init, SI_ORDER_THIRD);
3693module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
3694
3695#if (__FreeBSD_version >= 1100000)
3696MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
3697#endif
3698MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
3699MODULE_VERSION(mlx5en, 1);
3700