mlx5_en_main.c revision 337115
1/*-
2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 337115 2018-08-02 08:56:27Z hselasky $
26 */
27
28#include "en.h"
29
30#include <sys/sockio.h>
31#include <machine/atomic.h>
32
33#ifndef ETH_DRIVER_VERSION
34#define	ETH_DRIVER_VERSION	"3.4.2"
35#endif
36
37char mlx5e_version[] = "Mellanox Ethernet driver"
38    " (" ETH_DRIVER_VERSION ")";
39
40static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
41
42struct mlx5e_channel_param {
43	struct mlx5e_rq_param rq;
44	struct mlx5e_sq_param sq;
45	struct mlx5e_cq_param rx_cq;
46	struct mlx5e_cq_param tx_cq;
47};
48
49static const struct {
50	u32	subtype;
51	u64	baudrate;
52}	mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
53
54	[MLX5E_1000BASE_CX_SGMII] = {
55		.subtype = IFM_1000_CX_SGMII,
56		.baudrate = IF_Mbps(1000ULL),
57	},
58	[MLX5E_1000BASE_KX] = {
59		.subtype = IFM_1000_KX,
60		.baudrate = IF_Mbps(1000ULL),
61	},
62	[MLX5E_10GBASE_CX4] = {
63		.subtype = IFM_10G_CX4,
64		.baudrate = IF_Gbps(10ULL),
65	},
66	[MLX5E_10GBASE_KX4] = {
67		.subtype = IFM_10G_KX4,
68		.baudrate = IF_Gbps(10ULL),
69	},
70	[MLX5E_10GBASE_KR] = {
71		.subtype = IFM_10G_KR,
72		.baudrate = IF_Gbps(10ULL),
73	},
74	[MLX5E_20GBASE_KR2] = {
75		.subtype = IFM_20G_KR2,
76		.baudrate = IF_Gbps(20ULL),
77	},
78	[MLX5E_40GBASE_CR4] = {
79		.subtype = IFM_40G_CR4,
80		.baudrate = IF_Gbps(40ULL),
81	},
82	[MLX5E_40GBASE_KR4] = {
83		.subtype = IFM_40G_KR4,
84		.baudrate = IF_Gbps(40ULL),
85	},
86	[MLX5E_56GBASE_R4] = {
87		.subtype = IFM_56G_R4,
88		.baudrate = IF_Gbps(56ULL),
89	},
90	[MLX5E_10GBASE_CR] = {
91		.subtype = IFM_10G_CR1,
92		.baudrate = IF_Gbps(10ULL),
93	},
94	[MLX5E_10GBASE_SR] = {
95		.subtype = IFM_10G_SR,
96		.baudrate = IF_Gbps(10ULL),
97	},
98	[MLX5E_10GBASE_ER] = {
99		.subtype = IFM_10G_ER,
100		.baudrate = IF_Gbps(10ULL),
101	},
102	[MLX5E_40GBASE_SR4] = {
103		.subtype = IFM_40G_SR4,
104		.baudrate = IF_Gbps(40ULL),
105	},
106	[MLX5E_40GBASE_LR4] = {
107		.subtype = IFM_40G_LR4,
108		.baudrate = IF_Gbps(40ULL),
109	},
110	[MLX5E_100GBASE_CR4] = {
111		.subtype = IFM_100G_CR4,
112		.baudrate = IF_Gbps(100ULL),
113	},
114	[MLX5E_100GBASE_SR4] = {
115		.subtype = IFM_100G_SR4,
116		.baudrate = IF_Gbps(100ULL),
117	},
118	[MLX5E_100GBASE_KR4] = {
119		.subtype = IFM_100G_KR4,
120		.baudrate = IF_Gbps(100ULL),
121	},
122	[MLX5E_100GBASE_LR4] = {
123		.subtype = IFM_100G_LR4,
124		.baudrate = IF_Gbps(100ULL),
125	},
126	[MLX5E_100BASE_TX] = {
127		.subtype = IFM_100_TX,
128		.baudrate = IF_Mbps(100ULL),
129	},
130	[MLX5E_1000BASE_T] = {
131		.subtype = IFM_1000_T,
132		.baudrate = IF_Mbps(1000ULL),
133	},
134	[MLX5E_10GBASE_T] = {
135		.subtype = IFM_10G_T,
136		.baudrate = IF_Gbps(10ULL),
137	},
138	[MLX5E_25GBASE_CR] = {
139		.subtype = IFM_25G_CR,
140		.baudrate = IF_Gbps(25ULL),
141	},
142	[MLX5E_25GBASE_KR] = {
143		.subtype = IFM_25G_KR,
144		.baudrate = IF_Gbps(25ULL),
145	},
146	[MLX5E_25GBASE_SR] = {
147		.subtype = IFM_25G_SR,
148		.baudrate = IF_Gbps(25ULL),
149	},
150	[MLX5E_50GBASE_CR2] = {
151		.subtype = IFM_50G_CR2,
152		.baudrate = IF_Gbps(50ULL),
153	},
154	[MLX5E_50GBASE_KR2] = {
155		.subtype = IFM_50G_KR2,
156		.baudrate = IF_Gbps(50ULL),
157	},
158};
159
160MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
161
162static void
163mlx5e_update_carrier(struct mlx5e_priv *priv)
164{
165	struct mlx5_core_dev *mdev = priv->mdev;
166	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
167	u32 eth_proto_oper;
168	int error;
169	u8 port_state;
170	u8 i;
171
172	port_state = mlx5_query_vport_state(mdev,
173	    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
174
175	if (port_state == VPORT_STATE_UP) {
176		priv->media_status_last |= IFM_ACTIVE;
177	} else {
178		priv->media_status_last &= ~IFM_ACTIVE;
179		priv->media_active_last = IFM_ETHER;
180		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
181		return;
182	}
183
184	error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
185	if (error) {
186		priv->media_active_last = IFM_ETHER;
187		priv->ifp->if_baudrate = 1;
188		if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
189		    __func__, error);
190		return;
191	}
192	eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
193
194	for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
195		if (mlx5e_mode_table[i].baudrate == 0)
196			continue;
197		if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
198			priv->ifp->if_baudrate =
199			    mlx5e_mode_table[i].baudrate;
200			priv->media_active_last =
201			    mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX;
202		}
203	}
204	if_link_state_change(priv->ifp, LINK_STATE_UP);
205}
206
207static void
208mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
209{
210	struct mlx5e_priv *priv = dev->if_softc;
211
212	ifmr->ifm_status = priv->media_status_last;
213	ifmr->ifm_active = priv->media_active_last |
214	    (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
215	    (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
216
217}
218
219static u32
220mlx5e_find_link_mode(u32 subtype)
221{
222	u32 i;
223	u32 link_mode = 0;
224
225	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
226		if (mlx5e_mode_table[i].baudrate == 0)
227			continue;
228		if (mlx5e_mode_table[i].subtype == subtype)
229			link_mode |= MLX5E_PROT_MASK(i);
230	}
231
232	return (link_mode);
233}
234
235static int
236mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
237{
238	return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
239	    priv->params.rx_pauseframe_control,
240	    priv->params.tx_pauseframe_control,
241	    priv->params.rx_priority_flow_control,
242	    priv->params.tx_priority_flow_control));
243}
244
245static int
246mlx5e_set_port_pfc(struct mlx5e_priv *priv)
247{
248	int error;
249
250	if (priv->params.rx_pauseframe_control ||
251	    priv->params.tx_pauseframe_control) {
252		if_printf(priv->ifp,
253		    "Global pauseframes must be disabled before enabling PFC.\n");
254		error = -EINVAL;
255	} else {
256		error = mlx5e_set_port_pause_and_pfc(priv);
257	}
258	return (error);
259}
260
261static int
262mlx5e_media_change(struct ifnet *dev)
263{
264	struct mlx5e_priv *priv = dev->if_softc;
265	struct mlx5_core_dev *mdev = priv->mdev;
266	u32 eth_proto_cap;
267	u32 link_mode;
268	int was_opened;
269	int locked;
270	int error;
271
272	locked = PRIV_LOCKED(priv);
273	if (!locked)
274		PRIV_LOCK(priv);
275
276	if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
277		error = EINVAL;
278		goto done;
279	}
280	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
281
282	/* query supported capabilities */
283	error = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
284	if (error != 0) {
285		if_printf(dev, "Query port media capability failed\n");
286		goto done;
287	}
288	/* check for autoselect */
289	if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
290		link_mode = eth_proto_cap;
291		if (link_mode == 0) {
292			if_printf(dev, "Port media capability is zero\n");
293			error = EINVAL;
294			goto done;
295		}
296	} else {
297		link_mode = link_mode & eth_proto_cap;
298		if (link_mode == 0) {
299			if_printf(dev, "Not supported link mode requested\n");
300			error = EINVAL;
301			goto done;
302		}
303	}
304	if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
305		/* check if PFC is enabled */
306		if (priv->params.rx_priority_flow_control ||
307		    priv->params.tx_priority_flow_control) {
308			if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n");
309			error = EINVAL;
310			goto done;
311		}
312	}
313	/* update pauseframe control bits */
314	priv->params.rx_pauseframe_control =
315	    (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
316	priv->params.tx_pauseframe_control =
317	    (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
318
319	/* check if device is opened */
320	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
321
322	/* reconfigure the hardware */
323	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
324	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
325	error = -mlx5e_set_port_pause_and_pfc(priv);
326	if (was_opened)
327		mlx5_set_port_status(mdev, MLX5_PORT_UP);
328
329done:
330	if (!locked)
331		PRIV_UNLOCK(priv);
332	return (error);
333}
334
335static void
336mlx5e_update_carrier_work(struct work_struct *work)
337{
338	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
339	    update_carrier_work);
340
341	PRIV_LOCK(priv);
342	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
343		mlx5e_update_carrier(priv);
344	PRIV_UNLOCK(priv);
345}
346
347/*
348 * This function reads the physical port counters from the firmware
349 * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
350 * macros. The output is converted from big-endian 64-bit values into
351 * host endian ones and stored in the "priv->stats.pport" structure.
352 */
353static void
354mlx5e_update_pport_counters(struct mlx5e_priv *priv)
355{
356	struct mlx5_core_dev *mdev = priv->mdev;
357	struct mlx5e_pport_stats *s = &priv->stats.pport;
358	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
359	u32 *in;
360	u32 *out;
361	const u64 *ptr;
362	unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
363	unsigned x;
364	unsigned y;
365	unsigned z;
366
367	/* allocate firmware request structures */
368	in = mlx5_vzalloc(sz);
369	out = mlx5_vzalloc(sz);
370	if (in == NULL || out == NULL)
371		goto free_out;
372
373	/*
374	 * Get pointer to the 64-bit counter set which is located at a
375	 * fixed offset in the output firmware request structure:
376	 */
377	ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
378
379	MLX5_SET(ppcnt_reg, in, local_port, 1);
380
381	/* read IEEE802_3 counter group using predefined counter layout */
382	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
383	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
384	for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
385	     x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
386		s->arg[y] = be64toh(ptr[x]);
387
388	/* read RFC2819 counter group using predefined counter layout */
389	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
390	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
391	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
392		s->arg[y] = be64toh(ptr[x]);
393	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
394	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
395		s_debug->arg[y] = be64toh(ptr[x]);
396
397	/* read RFC2863 counter group using predefined counter layout */
398	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
399	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
400	for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
401		s_debug->arg[y] = be64toh(ptr[x]);
402
403	/* read physical layer stats counter group using predefined counter layout */
404	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
405	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
406	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
407		s_debug->arg[y] = be64toh(ptr[x]);
408
409	/* read per-priority counters */
410	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
411
412	/* iterate all the priorities */
413	for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
414		MLX5_SET(ppcnt_reg, in, prio_tc, z);
415		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
416
417		/* read per priority stats counter group using predefined counter layout */
418		for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
419		    MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
420			s->arg[y] = be64toh(ptr[x]);
421	}
422free_out:
423	/* free firmware request structures */
424	kvfree(in);
425	kvfree(out);
426}
427
428/*
429 * This function is called regularly to collect all statistics
430 * counters from the firmware. The values can be viewed through the
431 * sysctl interface. Execution is serialized using the priv's global
432 * configuration lock.
433 */
434static void
435mlx5e_update_stats_work(struct work_struct *work)
436{
437	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
438	    update_stats_work);
439	struct mlx5_core_dev *mdev = priv->mdev;
440	struct mlx5e_vport_stats *s = &priv->stats.vport;
441	struct mlx5e_rq_stats *rq_stats;
442	struct mlx5e_sq_stats *sq_stats;
443	struct buf_ring *sq_br;
444#if (__FreeBSD_version < 1100000)
445	struct ifnet *ifp = priv->ifp;
446#endif
447
448	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
449	u32 *out;
450	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
451	u64 tso_packets = 0;
452	u64 tso_bytes = 0;
453	u64 tx_queue_dropped = 0;
454	u64 tx_defragged = 0;
455	u64 tx_offload_none = 0;
456	u64 lro_packets = 0;
457	u64 lro_bytes = 0;
458	u64 sw_lro_queued = 0;
459	u64 sw_lro_flushed = 0;
460	u64 rx_csum_none = 0;
461	u64 rx_wqe_err = 0;
462	u32 rx_out_of_buffer = 0;
463	int i;
464	int j;
465
466	PRIV_LOCK(priv);
467	out = mlx5_vzalloc(outlen);
468	if (out == NULL)
469		goto free_out;
470	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
471		goto free_out;
472
473	/* Collect firts the SW counters and then HW for consistency */
474	for (i = 0; i < priv->params.num_channels; i++) {
475		struct mlx5e_rq *rq = &priv->channel[i]->rq;
476
477		rq_stats = &priv->channel[i]->rq.stats;
478
479		/* collect stats from LRO */
480		rq_stats->sw_lro_queued = rq->lro.lro_queued;
481		rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
482		sw_lro_queued += rq_stats->sw_lro_queued;
483		sw_lro_flushed += rq_stats->sw_lro_flushed;
484		lro_packets += rq_stats->lro_packets;
485		lro_bytes += rq_stats->lro_bytes;
486		rx_csum_none += rq_stats->csum_none;
487		rx_wqe_err += rq_stats->wqe_err;
488
489		for (j = 0; j < priv->num_tc; j++) {
490			sq_stats = &priv->channel[i]->sq[j].stats;
491			sq_br = priv->channel[i]->sq[j].br;
492
493			tso_packets += sq_stats->tso_packets;
494			tso_bytes += sq_stats->tso_bytes;
495			tx_queue_dropped += sq_stats->dropped;
496			if (sq_br != NULL)
497				tx_queue_dropped += sq_br->br_drops;
498			tx_defragged += sq_stats->defragged;
499			tx_offload_none += sq_stats->csum_offload_none;
500		}
501	}
502
503	/* update counters */
504	s->tso_packets = tso_packets;
505	s->tso_bytes = tso_bytes;
506	s->tx_queue_dropped = tx_queue_dropped;
507	s->tx_defragged = tx_defragged;
508	s->lro_packets = lro_packets;
509	s->lro_bytes = lro_bytes;
510	s->sw_lro_queued = sw_lro_queued;
511	s->sw_lro_flushed = sw_lro_flushed;
512	s->rx_csum_none = rx_csum_none;
513	s->rx_wqe_err = rx_wqe_err;
514
515	/* HW counters */
516	memset(in, 0, sizeof(in));
517
518	MLX5_SET(query_vport_counter_in, in, opcode,
519	    MLX5_CMD_OP_QUERY_VPORT_COUNTER);
520	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
521	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
522
523	memset(out, 0, outlen);
524
525	/* get number of out-of-buffer drops first */
526	if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
527	    &rx_out_of_buffer))
528		goto free_out;
529
530	/* accumulate difference into a 64-bit counter */
531	s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev);
532	s->rx_out_of_buffer_prev = rx_out_of_buffer;
533
534	/* get port statistics */
535	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
536		goto free_out;
537
538#define	MLX5_GET_CTR(out, x) \
539	MLX5_GET64(query_vport_counter_out, out, x)
540
541	s->rx_error_packets =
542	    MLX5_GET_CTR(out, received_errors.packets);
543	s->rx_error_bytes =
544	    MLX5_GET_CTR(out, received_errors.octets);
545	s->tx_error_packets =
546	    MLX5_GET_CTR(out, transmit_errors.packets);
547	s->tx_error_bytes =
548	    MLX5_GET_CTR(out, transmit_errors.octets);
549
550	s->rx_unicast_packets =
551	    MLX5_GET_CTR(out, received_eth_unicast.packets);
552	s->rx_unicast_bytes =
553	    MLX5_GET_CTR(out, received_eth_unicast.octets);
554	s->tx_unicast_packets =
555	    MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
556	s->tx_unicast_bytes =
557	    MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
558
559	s->rx_multicast_packets =
560	    MLX5_GET_CTR(out, received_eth_multicast.packets);
561	s->rx_multicast_bytes =
562	    MLX5_GET_CTR(out, received_eth_multicast.octets);
563	s->tx_multicast_packets =
564	    MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
565	s->tx_multicast_bytes =
566	    MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
567
568	s->rx_broadcast_packets =
569	    MLX5_GET_CTR(out, received_eth_broadcast.packets);
570	s->rx_broadcast_bytes =
571	    MLX5_GET_CTR(out, received_eth_broadcast.octets);
572	s->tx_broadcast_packets =
573	    MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
574	s->tx_broadcast_bytes =
575	    MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
576
577	s->rx_packets =
578	    s->rx_unicast_packets +
579	    s->rx_multicast_packets +
580	    s->rx_broadcast_packets -
581	    s->rx_out_of_buffer;
582	s->rx_bytes =
583	    s->rx_unicast_bytes +
584	    s->rx_multicast_bytes +
585	    s->rx_broadcast_bytes;
586	s->tx_packets =
587	    s->tx_unicast_packets +
588	    s->tx_multicast_packets +
589	    s->tx_broadcast_packets;
590	s->tx_bytes =
591	    s->tx_unicast_bytes +
592	    s->tx_multicast_bytes +
593	    s->tx_broadcast_bytes;
594
595	/* Update calculated offload counters */
596	s->tx_csum_offload = s->tx_packets - tx_offload_none;
597	s->rx_csum_good = s->rx_packets - s->rx_csum_none;
598
599	/* Get physical port counters */
600	mlx5e_update_pport_counters(priv);
601
602#if (__FreeBSD_version < 1100000)
603	/* no get_counters interface in fbsd 10 */
604	ifp->if_ipackets = s->rx_packets;
605	ifp->if_ierrors = s->rx_error_packets +
606	    priv->stats.pport.alignment_err +
607	    priv->stats.pport.check_seq_err +
608	    priv->stats.pport.crc_align_errors +
609	    priv->stats.pport.in_range_len_errors +
610	    priv->stats.pport.jabbers +
611	    priv->stats.pport.out_of_range_len +
612	    priv->stats.pport.oversize_pkts +
613	    priv->stats.pport.symbol_err +
614	    priv->stats.pport.too_long_errors +
615	    priv->stats.pport.undersize_pkts +
616	    priv->stats.pport.unsupported_op_rx;
617	ifp->if_iqdrops = s->rx_out_of_buffer +
618	    priv->stats.pport.drop_events;
619	ifp->if_opackets = s->tx_packets;
620	ifp->if_oerrors = s->tx_error_packets;
621	ifp->if_snd.ifq_drops = s->tx_queue_dropped;
622	ifp->if_ibytes = s->rx_bytes;
623	ifp->if_obytes = s->tx_bytes;
624	ifp->if_collisions =
625	    priv->stats.pport.collisions;
626#endif
627
628free_out:
629	kvfree(out);
630
631	/* Update diagnostics, if any */
632	if (priv->params_ethtool.diag_pci_enable ||
633	    priv->params_ethtool.diag_general_enable) {
634		int error = mlx5_core_get_diagnostics_full(mdev,
635		    priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
636		    priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
637		if (error != 0)
638			if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error);
639	}
640	PRIV_UNLOCK(priv);
641}
642
643static void
644mlx5e_update_stats(void *arg)
645{
646	struct mlx5e_priv *priv = arg;
647
648	queue_work(priv->wq, &priv->update_stats_work);
649
650	callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
651}
652
653static void
654mlx5e_async_event_sub(struct mlx5e_priv *priv,
655    enum mlx5_dev_event event)
656{
657	switch (event) {
658	case MLX5_DEV_EVENT_PORT_UP:
659	case MLX5_DEV_EVENT_PORT_DOWN:
660		queue_work(priv->wq, &priv->update_carrier_work);
661		break;
662
663	default:
664		break;
665	}
666}
667
668static void
669mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
670    enum mlx5_dev_event event, unsigned long param)
671{
672	struct mlx5e_priv *priv = vpriv;
673
674	mtx_lock(&priv->async_events_mtx);
675	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
676		mlx5e_async_event_sub(priv, event);
677	mtx_unlock(&priv->async_events_mtx);
678}
679
680static void
681mlx5e_enable_async_events(struct mlx5e_priv *priv)
682{
683	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
684}
685
686static void
687mlx5e_disable_async_events(struct mlx5e_priv *priv)
688{
689	mtx_lock(&priv->async_events_mtx);
690	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
691	mtx_unlock(&priv->async_events_mtx);
692}
693
694static const char *mlx5e_rq_stats_desc[] = {
695	MLX5E_RQ_STATS(MLX5E_STATS_DESC)
696};
697
698static int
699mlx5e_create_rq(struct mlx5e_channel *c,
700    struct mlx5e_rq_param *param,
701    struct mlx5e_rq *rq)
702{
703	struct mlx5e_priv *priv = c->priv;
704	struct mlx5_core_dev *mdev = priv->mdev;
705	char buffer[16];
706	void *rqc = param->rqc;
707	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
708	int wq_sz;
709	int err;
710	int i;
711	u32 nsegs, wqe_sz;
712
713	err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
714	if (err != 0)
715		goto done;
716
717	/* Create DMA descriptor TAG */
718	if ((err = -bus_dma_tag_create(
719	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
720	    1,				/* any alignment */
721	    0,				/* no boundary */
722	    BUS_SPACE_MAXADDR,		/* lowaddr */
723	    BUS_SPACE_MAXADDR,		/* highaddr */
724	    NULL, NULL,			/* filter, filterarg */
725	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsize */
726	    nsegs,			/* nsegments */
727	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsegsize */
728	    0,				/* flags */
729	    NULL, NULL,			/* lockfunc, lockfuncarg */
730	    &rq->dma_tag)))
731		goto done;
732
733	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
734	    &rq->wq_ctrl);
735	if (err)
736		goto err_free_dma_tag;
737
738	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
739
740	err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
741	if (err != 0)
742		goto err_rq_wq_destroy;
743
744	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
745
746	err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz);
747	if (err)
748		goto err_rq_wq_destroy;
749
750	rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
751	for (i = 0; i != wq_sz; i++) {
752		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
753#if (MLX5E_MAX_RX_SEGS == 1)
754		uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
755#else
756		int j;
757#endif
758
759		err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
760		if (err != 0) {
761			while (i--)
762				bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
763			goto err_rq_mbuf_free;
764		}
765
766		/* set value for constant fields */
767#if (MLX5E_MAX_RX_SEGS == 1)
768		wqe->data[0].lkey = c->mkey_be;
769		wqe->data[0].byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
770#else
771		for (j = 0; j < rq->nsegs; j++)
772			wqe->data[j].lkey = c->mkey_be;
773#endif
774	}
775
776	rq->ifp = c->ifp;
777	rq->channel = c;
778	rq->ix = c->ix;
779
780	snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
781	mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
782	    buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
783	    rq->stats.arg);
784	return (0);
785
786err_rq_mbuf_free:
787	free(rq->mbuf, M_MLX5EN);
788	tcp_lro_free(&rq->lro);
789err_rq_wq_destroy:
790	mlx5_wq_destroy(&rq->wq_ctrl);
791err_free_dma_tag:
792	bus_dma_tag_destroy(rq->dma_tag);
793done:
794	return (err);
795}
796
797static void
798mlx5e_destroy_rq(struct mlx5e_rq *rq)
799{
800	int wq_sz;
801	int i;
802
803	/* destroy all sysctl nodes */
804	sysctl_ctx_free(&rq->stats.ctx);
805
806	/* free leftover LRO packets, if any */
807	tcp_lro_free(&rq->lro);
808
809	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
810	for (i = 0; i != wq_sz; i++) {
811		if (rq->mbuf[i].mbuf != NULL) {
812			bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
813			m_freem(rq->mbuf[i].mbuf);
814		}
815		bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
816	}
817	free(rq->mbuf, M_MLX5EN);
818	mlx5_wq_destroy(&rq->wq_ctrl);
819}
820
821static int
822mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
823{
824	struct mlx5e_channel *c = rq->channel;
825	struct mlx5e_priv *priv = c->priv;
826	struct mlx5_core_dev *mdev = priv->mdev;
827
828	void *in;
829	void *rqc;
830	void *wq;
831	int inlen;
832	int err;
833
834	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
835	    sizeof(u64) * rq->wq_ctrl.buf.npages;
836	in = mlx5_vzalloc(inlen);
837	if (in == NULL)
838		return (-ENOMEM);
839
840	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
841	wq = MLX5_ADDR_OF(rqc, rqc, wq);
842
843	memcpy(rqc, param->rqc, sizeof(param->rqc));
844
845	MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
846	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
847	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
848	if (priv->counter_set_id >= 0)
849		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
850	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
851	    PAGE_SHIFT);
852	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
853
854	mlx5_fill_page_array(&rq->wq_ctrl.buf,
855	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
856
857	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
858
859	kvfree(in);
860
861	return (err);
862}
863
864static int
865mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
866{
867	struct mlx5e_channel *c = rq->channel;
868	struct mlx5e_priv *priv = c->priv;
869	struct mlx5_core_dev *mdev = priv->mdev;
870
871	void *in;
872	void *rqc;
873	int inlen;
874	int err;
875
876	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
877	in = mlx5_vzalloc(inlen);
878	if (in == NULL)
879		return (-ENOMEM);
880
881	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
882
883	MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
884	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
885	MLX5_SET(rqc, rqc, state, next_state);
886
887	err = mlx5_core_modify_rq(mdev, in, inlen);
888
889	kvfree(in);
890
891	return (err);
892}
893
894static void
895mlx5e_disable_rq(struct mlx5e_rq *rq)
896{
897	struct mlx5e_channel *c = rq->channel;
898	struct mlx5e_priv *priv = c->priv;
899	struct mlx5_core_dev *mdev = priv->mdev;
900
901	mlx5_core_destroy_rq(mdev, rq->rqn);
902}
903
904static int
905mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
906{
907	struct mlx5e_channel *c = rq->channel;
908	struct mlx5e_priv *priv = c->priv;
909	struct mlx5_wq_ll *wq = &rq->wq;
910	int i;
911
912	for (i = 0; i < 1000; i++) {
913		if (wq->cur_sz >= priv->params.min_rx_wqes)
914			return (0);
915
916		msleep(4);
917	}
918	return (-ETIMEDOUT);
919}
920
921static int
922mlx5e_open_rq(struct mlx5e_channel *c,
923    struct mlx5e_rq_param *param,
924    struct mlx5e_rq *rq)
925{
926	int err;
927
928	err = mlx5e_create_rq(c, param, rq);
929	if (err)
930		return (err);
931
932	err = mlx5e_enable_rq(rq, param);
933	if (err)
934		goto err_destroy_rq;
935
936	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
937	if (err)
938		goto err_disable_rq;
939
940	c->rq.enabled = 1;
941
942	return (0);
943
944err_disable_rq:
945	mlx5e_disable_rq(rq);
946err_destroy_rq:
947	mlx5e_destroy_rq(rq);
948
949	return (err);
950}
951
952static void
953mlx5e_close_rq(struct mlx5e_rq *rq)
954{
955	mtx_lock(&rq->mtx);
956	rq->enabled = 0;
957	callout_stop(&rq->watchdog);
958	mtx_unlock(&rq->mtx);
959
960	callout_drain(&rq->watchdog);
961
962	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
963}
964
965static void
966mlx5e_close_rq_wait(struct mlx5e_rq *rq)
967{
968	struct mlx5_core_dev *mdev = rq->channel->priv->mdev;
969
970	/* wait till RQ is empty */
971	while (!mlx5_wq_ll_is_empty(&rq->wq) &&
972	       (mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
973		msleep(4);
974		rq->cq.mcq.comp(&rq->cq.mcq);
975	}
976
977	mlx5e_disable_rq(rq);
978	mlx5e_destroy_rq(rq);
979}
980
981void
982mlx5e_free_sq_db(struct mlx5e_sq *sq)
983{
984	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
985	int x;
986
987	for (x = 0; x != wq_sz; x++)
988		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
989	free(sq->mbuf, M_MLX5EN);
990}
991
992int
993mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
994{
995	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
996	int err;
997	int x;
998
999	sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1000
1001	/* Create DMA descriptor MAPs */
1002	for (x = 0; x != wq_sz; x++) {
1003		err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
1004		if (err != 0) {
1005			while (x--)
1006				bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1007			free(sq->mbuf, M_MLX5EN);
1008			return (err);
1009		}
1010	}
1011	return (0);
1012}
1013
1014static const char *mlx5e_sq_stats_desc[] = {
1015	MLX5E_SQ_STATS(MLX5E_STATS_DESC)
1016};
1017
1018static int
1019mlx5e_create_sq(struct mlx5e_channel *c,
1020    int tc,
1021    struct mlx5e_sq_param *param,
1022    struct mlx5e_sq *sq)
1023{
1024	struct mlx5e_priv *priv = c->priv;
1025	struct mlx5_core_dev *mdev = priv->mdev;
1026	char buffer[16];
1027
1028	void *sqc = param->sqc;
1029	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
1030#ifdef RSS
1031	cpuset_t cpu_mask;
1032	int cpu_id;
1033#endif
1034	int err;
1035
1036	/* Create DMA descriptor TAG */
1037	if ((err = -bus_dma_tag_create(
1038	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1039	    1,				/* any alignment */
1040	    0,				/* no boundary */
1041	    BUS_SPACE_MAXADDR,		/* lowaddr */
1042	    BUS_SPACE_MAXADDR,		/* highaddr */
1043	    NULL, NULL,			/* filter, filterarg */
1044	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
1045	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
1046	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
1047	    0,				/* flags */
1048	    NULL, NULL,			/* lockfunc, lockfuncarg */
1049	    &sq->dma_tag)))
1050		goto done;
1051
1052	err = mlx5_alloc_map_uar(mdev, &sq->uar);
1053	if (err)
1054		goto err_free_dma_tag;
1055
1056	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1057	    &sq->wq_ctrl);
1058	if (err)
1059		goto err_unmap_free_uar;
1060
1061	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1062	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1063
1064	err = mlx5e_alloc_sq_db(sq);
1065	if (err)
1066		goto err_sq_wq_destroy;
1067
1068	sq->mkey_be = c->mkey_be;
1069	sq->ifp = priv->ifp;
1070	sq->priv = priv;
1071	sq->tc = tc;
1072	sq->max_inline = priv->params.tx_max_inline;
1073	sq->min_inline_mode = priv->params.tx_min_inline_mode;
1074	sq->vlan_inline_cap = MLX5_CAP_ETH(mdev, wqe_vlan_insert);
1075
1076	/* check if we should allocate a second packet buffer */
1077	if (priv->params_ethtool.tx_bufring_disable == 0) {
1078		sq->br = buf_ring_alloc(MLX5E_SQ_TX_QUEUE_SIZE, M_MLX5EN,
1079		    M_WAITOK, &sq->lock);
1080		if (sq->br == NULL) {
1081			if_printf(c->ifp, "%s: Failed allocating sq drbr buffer\n",
1082			    __func__);
1083			err = -ENOMEM;
1084			goto err_free_sq_db;
1085		}
1086
1087		sq->sq_tq = taskqueue_create_fast("mlx5e_que", M_WAITOK,
1088		    taskqueue_thread_enqueue, &sq->sq_tq);
1089		if (sq->sq_tq == NULL) {
1090			if_printf(c->ifp, "%s: Failed allocating taskqueue\n",
1091			    __func__);
1092			err = -ENOMEM;
1093			goto err_free_drbr;
1094		}
1095
1096		TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
1097#ifdef RSS
1098		cpu_id = rss_getcpu(c->ix % rss_getnumbuckets());
1099		CPU_SETOF(cpu_id, &cpu_mask);
1100		taskqueue_start_threads_cpuset(&sq->sq_tq, 1, PI_NET, &cpu_mask,
1101		    "%s TX SQ%d.%d CPU%d", c->ifp->if_xname, c->ix, tc, cpu_id);
1102#else
1103		taskqueue_start_threads(&sq->sq_tq, 1, PI_NET,
1104		    "%s TX SQ%d.%d", c->ifp->if_xname, c->ix, tc);
1105#endif
1106	}
1107	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1108	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1109	    buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1110	    sq->stats.arg);
1111
1112	return (0);
1113
1114err_free_drbr:
1115	buf_ring_free(sq->br, M_MLX5EN);
1116err_free_sq_db:
1117	mlx5e_free_sq_db(sq);
1118err_sq_wq_destroy:
1119	mlx5_wq_destroy(&sq->wq_ctrl);
1120
1121err_unmap_free_uar:
1122	mlx5_unmap_free_uar(mdev, &sq->uar);
1123
1124err_free_dma_tag:
1125	bus_dma_tag_destroy(sq->dma_tag);
1126done:
1127	return (err);
1128}
1129
1130static void
1131mlx5e_destroy_sq(struct mlx5e_sq *sq)
1132{
1133	/* destroy all sysctl nodes */
1134	sysctl_ctx_free(&sq->stats.ctx);
1135
1136	mlx5e_free_sq_db(sq);
1137	mlx5_wq_destroy(&sq->wq_ctrl);
1138	mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1139	if (sq->sq_tq != NULL) {
1140		taskqueue_drain(sq->sq_tq, &sq->sq_task);
1141		taskqueue_free(sq->sq_tq);
1142	}
1143	if (sq->br != NULL)
1144		buf_ring_free(sq->br, M_MLX5EN);
1145}
1146
1147int
1148mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1149    int tis_num)
1150{
1151	void *in;
1152	void *sqc;
1153	void *wq;
1154	int inlen;
1155	int err;
1156
1157	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1158	    sizeof(u64) * sq->wq_ctrl.buf.npages;
1159	in = mlx5_vzalloc(inlen);
1160	if (in == NULL)
1161		return (-ENOMEM);
1162
1163	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1164	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1165
1166	memcpy(sqc, param->sqc, sizeof(param->sqc));
1167
1168	MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1169	MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1170	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1171	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1172	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1173
1174	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1175	MLX5_SET(wq, wq, uar_page, sq->uar.index);
1176	MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1177	    PAGE_SHIFT);
1178	MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1179
1180	mlx5_fill_page_array(&sq->wq_ctrl.buf,
1181	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1182
1183	err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1184
1185	kvfree(in);
1186
1187	return (err);
1188}
1189
1190int
1191mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1192{
1193	void *in;
1194	void *sqc;
1195	int inlen;
1196	int err;
1197
1198	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1199	in = mlx5_vzalloc(inlen);
1200	if (in == NULL)
1201		return (-ENOMEM);
1202
1203	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1204
1205	MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1206	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1207	MLX5_SET(sqc, sqc, state, next_state);
1208
1209	err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1210
1211	kvfree(in);
1212
1213	return (err);
1214}
1215
1216void
1217mlx5e_disable_sq(struct mlx5e_sq *sq)
1218{
1219
1220	mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1221}
1222
1223static int
1224mlx5e_open_sq(struct mlx5e_channel *c,
1225    int tc,
1226    struct mlx5e_sq_param *param,
1227    struct mlx5e_sq *sq)
1228{
1229	int err;
1230
1231	err = mlx5e_create_sq(c, tc, param, sq);
1232	if (err)
1233		return (err);
1234
1235	err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1236	if (err)
1237		goto err_destroy_sq;
1238
1239	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1240	if (err)
1241		goto err_disable_sq;
1242
1243	WRITE_ONCE(sq->queue_state, MLX5E_SQ_READY);
1244
1245	return (0);
1246
1247err_disable_sq:
1248	mlx5e_disable_sq(sq);
1249err_destroy_sq:
1250	mlx5e_destroy_sq(sq);
1251
1252	return (err);
1253}
1254
1255static void
1256mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1257{
1258	/* fill up remainder with NOPs */
1259	while (sq->cev_counter != 0) {
1260		while (!mlx5e_sq_has_room_for(sq, 1)) {
1261			if (can_sleep != 0) {
1262				mtx_unlock(&sq->lock);
1263				msleep(4);
1264				mtx_lock(&sq->lock);
1265			} else {
1266				goto done;
1267			}
1268		}
1269		/* send a single NOP */
1270		mlx5e_send_nop(sq, 1);
1271		atomic_thread_fence_rel();
1272	}
1273done:
1274	/* Check if we need to write the doorbell */
1275	if (likely(sq->doorbell.d64 != 0)) {
1276		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1277		sq->doorbell.d64 = 0;
1278	}
1279}
1280
1281void
1282mlx5e_sq_cev_timeout(void *arg)
1283{
1284	struct mlx5e_sq *sq = arg;
1285
1286	mtx_assert(&sq->lock, MA_OWNED);
1287
1288	/* check next state */
1289	switch (sq->cev_next_state) {
1290	case MLX5E_CEV_STATE_SEND_NOPS:
1291		/* fill TX ring with NOPs, if any */
1292		mlx5e_sq_send_nops_locked(sq, 0);
1293
1294		/* check if completed */
1295		if (sq->cev_counter == 0) {
1296			sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1297			return;
1298		}
1299		break;
1300	default:
1301		/* send NOPs on next timeout */
1302		sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1303		break;
1304	}
1305
1306	/* restart timer */
1307	callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1308}
1309
1310void
1311mlx5e_drain_sq(struct mlx5e_sq *sq)
1312{
1313	int error;
1314	struct mlx5_core_dev *mdev= sq->priv->mdev;
1315
1316	/*
1317	 * Check if already stopped.
1318	 *
1319	 * NOTE: The "stopped" variable is only written when both the
1320	 * priv's configuration lock and the SQ's lock is locked. It
1321	 * can therefore safely be read when only one of the two locks
1322	 * is locked. This function is always called when the priv's
1323	 * configuration lock is locked.
1324	 */
1325	if (sq->stopped != 0)
1326		return;
1327
1328	mtx_lock(&sq->lock);
1329
1330	/* don't put more packets into the SQ */
1331	sq->stopped = 1;
1332
1333	/* teardown event factor timer, if any */
1334	sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1335	callout_stop(&sq->cev_callout);
1336
1337	/* send dummy NOPs in order to flush the transmit ring */
1338	mlx5e_sq_send_nops_locked(sq, 1);
1339	mtx_unlock(&sq->lock);
1340
1341	/* make sure it is safe to free the callout */
1342	callout_drain(&sq->cev_callout);
1343
1344	/* wait till SQ is empty or link is down */
1345	mtx_lock(&sq->lock);
1346	while (sq->cc != sq->pc &&
1347	    (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1348	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1349		mtx_unlock(&sq->lock);
1350		msleep(1);
1351		sq->cq.mcq.comp(&sq->cq.mcq);
1352		mtx_lock(&sq->lock);
1353	}
1354	mtx_unlock(&sq->lock);
1355
1356	/* error out remaining requests */
1357	error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1358	if (error != 0) {
1359		if_printf(sq->ifp,
1360		    "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1361	}
1362
1363	/* wait till SQ is empty */
1364	mtx_lock(&sq->lock);
1365	while (sq->cc != sq->pc &&
1366	       mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1367		mtx_unlock(&sq->lock);
1368		msleep(1);
1369		sq->cq.mcq.comp(&sq->cq.mcq);
1370		mtx_lock(&sq->lock);
1371	}
1372	mtx_unlock(&sq->lock);
1373}
1374
1375static void
1376mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1377{
1378
1379	mlx5e_drain_sq(sq);
1380	mlx5e_disable_sq(sq);
1381	mlx5e_destroy_sq(sq);
1382}
1383
1384static int
1385mlx5e_create_cq(struct mlx5e_priv *priv,
1386    struct mlx5e_cq_param *param,
1387    struct mlx5e_cq *cq,
1388    mlx5e_cq_comp_t *comp,
1389    int eq_ix)
1390{
1391	struct mlx5_core_dev *mdev = priv->mdev;
1392	struct mlx5_core_cq *mcq = &cq->mcq;
1393	int eqn_not_used;
1394	int irqn;
1395	int err;
1396	u32 i;
1397
1398	param->wq.buf_numa_node = 0;
1399	param->wq.db_numa_node = 0;
1400
1401	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1402	    &cq->wq_ctrl);
1403	if (err)
1404		return (err);
1405
1406	mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1407
1408	mcq->cqe_sz = 64;
1409	mcq->set_ci_db = cq->wq_ctrl.db.db;
1410	mcq->arm_db = cq->wq_ctrl.db.db + 1;
1411	*mcq->set_ci_db = 0;
1412	*mcq->arm_db = 0;
1413	mcq->vector = eq_ix;
1414	mcq->comp = comp;
1415	mcq->event = mlx5e_cq_error_event;
1416	mcq->irqn = irqn;
1417	mcq->uar = &priv->cq_uar;
1418
1419	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1420		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1421
1422		cqe->op_own = 0xf1;
1423	}
1424
1425	cq->priv = priv;
1426
1427	return (0);
1428}
1429
1430static void
1431mlx5e_destroy_cq(struct mlx5e_cq *cq)
1432{
1433	mlx5_wq_destroy(&cq->wq_ctrl);
1434}
1435
1436static int
1437mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1438{
1439	struct mlx5_core_cq *mcq = &cq->mcq;
1440	void *in;
1441	void *cqc;
1442	int inlen;
1443	int irqn_not_used;
1444	int eqn;
1445	int err;
1446
1447	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1448	    sizeof(u64) * cq->wq_ctrl.buf.npages;
1449	in = mlx5_vzalloc(inlen);
1450	if (in == NULL)
1451		return (-ENOMEM);
1452
1453	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1454
1455	memcpy(cqc, param->cqc, sizeof(param->cqc));
1456
1457	mlx5_fill_page_array(&cq->wq_ctrl.buf,
1458	    (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1459
1460	mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1461
1462	MLX5_SET(cqc, cqc, c_eqn, eqn);
1463	MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1464	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1465	    PAGE_SHIFT);
1466	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1467
1468	err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
1469
1470	kvfree(in);
1471
1472	if (err)
1473		return (err);
1474
1475	mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
1476
1477	return (0);
1478}
1479
1480static void
1481mlx5e_disable_cq(struct mlx5e_cq *cq)
1482{
1483
1484	mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
1485}
1486
1487int
1488mlx5e_open_cq(struct mlx5e_priv *priv,
1489    struct mlx5e_cq_param *param,
1490    struct mlx5e_cq *cq,
1491    mlx5e_cq_comp_t *comp,
1492    int eq_ix)
1493{
1494	int err;
1495
1496	err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
1497	if (err)
1498		return (err);
1499
1500	err = mlx5e_enable_cq(cq, param, eq_ix);
1501	if (err)
1502		goto err_destroy_cq;
1503
1504	return (0);
1505
1506err_destroy_cq:
1507	mlx5e_destroy_cq(cq);
1508
1509	return (err);
1510}
1511
1512void
1513mlx5e_close_cq(struct mlx5e_cq *cq)
1514{
1515	mlx5e_disable_cq(cq);
1516	mlx5e_destroy_cq(cq);
1517}
1518
1519static int
1520mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1521    struct mlx5e_channel_param *cparam)
1522{
1523	int err;
1524	int tc;
1525
1526	for (tc = 0; tc < c->num_tc; tc++) {
1527		/* open completion queue */
1528		err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
1529		    &mlx5e_tx_cq_comp, c->ix);
1530		if (err)
1531			goto err_close_tx_cqs;
1532	}
1533	return (0);
1534
1535err_close_tx_cqs:
1536	for (tc--; tc >= 0; tc--)
1537		mlx5e_close_cq(&c->sq[tc].cq);
1538
1539	return (err);
1540}
1541
1542static void
1543mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1544{
1545	int tc;
1546
1547	for (tc = 0; tc < c->num_tc; tc++)
1548		mlx5e_close_cq(&c->sq[tc].cq);
1549}
1550
1551static int
1552mlx5e_open_sqs(struct mlx5e_channel *c,
1553    struct mlx5e_channel_param *cparam)
1554{
1555	int err;
1556	int tc;
1557
1558	for (tc = 0; tc < c->num_tc; tc++) {
1559		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1560		if (err)
1561			goto err_close_sqs;
1562	}
1563
1564	return (0);
1565
1566err_close_sqs:
1567	for (tc--; tc >= 0; tc--)
1568		mlx5e_close_sq_wait(&c->sq[tc]);
1569
1570	return (err);
1571}
1572
1573static void
1574mlx5e_close_sqs_wait(struct mlx5e_channel *c)
1575{
1576	int tc;
1577
1578	for (tc = 0; tc < c->num_tc; tc++)
1579		mlx5e_close_sq_wait(&c->sq[tc]);
1580}
1581
1582static void
1583mlx5e_chan_mtx_init(struct mlx5e_channel *c)
1584{
1585	int tc;
1586
1587	mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1588
1589	callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
1590
1591	for (tc = 0; tc < c->num_tc; tc++) {
1592		struct mlx5e_sq *sq = c->sq + tc;
1593
1594		mtx_init(&sq->lock, "mlx5tx",
1595		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1596		mtx_init(&sq->comp_lock, "mlx5comp",
1597		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1598
1599		callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
1600
1601		sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
1602
1603		/* ensure the TX completion event factor is not zero */
1604		if (sq->cev_factor == 0)
1605			sq->cev_factor = 1;
1606	}
1607}
1608
1609static void
1610mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
1611{
1612	int tc;
1613
1614	mtx_destroy(&c->rq.mtx);
1615
1616	for (tc = 0; tc < c->num_tc; tc++) {
1617		mtx_destroy(&c->sq[tc].lock);
1618		mtx_destroy(&c->sq[tc].comp_lock);
1619	}
1620}
1621
1622static int
1623mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1624    struct mlx5e_channel_param *cparam,
1625    struct mlx5e_channel *volatile *cp)
1626{
1627	struct mlx5e_channel *c;
1628	int err;
1629
1630	c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO);
1631	c->priv = priv;
1632	c->ix = ix;
1633	c->cpu = 0;
1634	c->ifp = priv->ifp;
1635	c->mkey_be = cpu_to_be32(priv->mr.key);
1636	c->num_tc = priv->num_tc;
1637
1638	/* init mutexes */
1639	mlx5e_chan_mtx_init(c);
1640
1641	/* open transmit completion queue */
1642	err = mlx5e_open_tx_cqs(c, cparam);
1643	if (err)
1644		goto err_free;
1645
1646	/* open receive completion queue */
1647	err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
1648	    &mlx5e_rx_cq_comp, c->ix);
1649	if (err)
1650		goto err_close_tx_cqs;
1651
1652	err = mlx5e_open_sqs(c, cparam);
1653	if (err)
1654		goto err_close_rx_cq;
1655
1656	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1657	if (err)
1658		goto err_close_sqs;
1659
1660	/* store channel pointer */
1661	*cp = c;
1662
1663	/* poll receive queue initially */
1664	c->rq.cq.mcq.comp(&c->rq.cq.mcq);
1665
1666	return (0);
1667
1668err_close_sqs:
1669	mlx5e_close_sqs_wait(c);
1670
1671err_close_rx_cq:
1672	mlx5e_close_cq(&c->rq.cq);
1673
1674err_close_tx_cqs:
1675	mlx5e_close_tx_cqs(c);
1676
1677err_free:
1678	/* destroy mutexes */
1679	mlx5e_chan_mtx_destroy(c);
1680	free(c, M_MLX5EN);
1681	return (err);
1682}
1683
1684static void
1685mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
1686{
1687	struct mlx5e_channel *c = *pp;
1688
1689	/* check if channel is already closed */
1690	if (c == NULL)
1691		return;
1692	mlx5e_close_rq(&c->rq);
1693}
1694
1695static void
1696mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
1697{
1698	struct mlx5e_channel *c = *pp;
1699
1700	/* check if channel is already closed */
1701	if (c == NULL)
1702		return;
1703	/* ensure channel pointer is no longer used */
1704	*pp = NULL;
1705
1706	mlx5e_close_rq_wait(&c->rq);
1707	mlx5e_close_sqs_wait(c);
1708	mlx5e_close_cq(&c->rq.cq);
1709	mlx5e_close_tx_cqs(c);
1710	/* destroy mutexes */
1711	mlx5e_chan_mtx_destroy(c);
1712	free(c, M_MLX5EN);
1713}
1714
1715static int
1716mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
1717{
1718	u32 r, n;
1719
1720	r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
1721	    MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
1722	if (r > MJUM16BYTES)
1723		return (-ENOMEM);
1724
1725	if (r > MJUM9BYTES)
1726		r = MJUM16BYTES;
1727	else if (r > MJUMPAGESIZE)
1728		r = MJUM9BYTES;
1729	else if (r > MCLBYTES)
1730		r = MJUMPAGESIZE;
1731	else
1732		r = MCLBYTES;
1733
1734	/*
1735	 * n + 1 must be a power of two, because stride size must be.
1736	 * Stride size is 16 * (n + 1), as the first segment is
1737	 * control.
1738	 */
1739	for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
1740		;
1741
1742	*wqe_sz = r;
1743	*nsegs = n;
1744	return (0);
1745}
1746
1747static void
1748mlx5e_build_rq_param(struct mlx5e_priv *priv,
1749    struct mlx5e_rq_param *param)
1750{
1751	void *rqc = param->rqc;
1752	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1753	u32 wqe_sz, nsegs;
1754
1755	mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
1756	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1757	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1758	MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
1759	    nsegs * sizeof(struct mlx5_wqe_data_seg)));
1760	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1761	MLX5_SET(wq, wq, pd, priv->pdn);
1762
1763	param->wq.buf_numa_node = 0;
1764	param->wq.db_numa_node = 0;
1765	param->wq.linear = 1;
1766}
1767
1768static void
1769mlx5e_build_sq_param(struct mlx5e_priv *priv,
1770    struct mlx5e_sq_param *param)
1771{
1772	void *sqc = param->sqc;
1773	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1774
1775	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1776	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1777	MLX5_SET(wq, wq, pd, priv->pdn);
1778
1779	param->wq.buf_numa_node = 0;
1780	param->wq.db_numa_node = 0;
1781	param->wq.linear = 1;
1782}
1783
1784static void
1785mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1786    struct mlx5e_cq_param *param)
1787{
1788	void *cqc = param->cqc;
1789
1790	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1791}
1792
1793static void
1794mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1795    struct mlx5e_cq_param *param)
1796{
1797	void *cqc = param->cqc;
1798
1799
1800	/*
1801	 * TODO The sysctl to control on/off is a bool value for now, which means
1802	 * we only support CSUM, once HASH is implemnted we'll need to address that.
1803	 */
1804	if (priv->params.cqe_zipping_en) {
1805		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1806		MLX5_SET(cqc, cqc, cqe_compression_en, 1);
1807	}
1808
1809	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1810	MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1811	MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1812
1813	switch (priv->params.rx_cq_moderation_mode) {
1814	case 0:
1815		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1816		break;
1817	default:
1818		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1819			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1820		else
1821			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1822		break;
1823	}
1824
1825	mlx5e_build_common_cq_param(priv, param);
1826}
1827
1828static void
1829mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1830    struct mlx5e_cq_param *param)
1831{
1832	void *cqc = param->cqc;
1833
1834	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1835	MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
1836	MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
1837
1838	switch (priv->params.tx_cq_moderation_mode) {
1839	case 0:
1840		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1841		break;
1842	default:
1843		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1844			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1845		else
1846			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1847		break;
1848	}
1849
1850	mlx5e_build_common_cq_param(priv, param);
1851}
1852
1853static void
1854mlx5e_build_channel_param(struct mlx5e_priv *priv,
1855    struct mlx5e_channel_param *cparam)
1856{
1857	memset(cparam, 0, sizeof(*cparam));
1858
1859	mlx5e_build_rq_param(priv, &cparam->rq);
1860	mlx5e_build_sq_param(priv, &cparam->sq);
1861	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1862	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1863}
1864
1865static int
1866mlx5e_open_channels(struct mlx5e_priv *priv)
1867{
1868	struct mlx5e_channel_param cparam;
1869	void *ptr;
1870	int err;
1871	int i;
1872	int j;
1873
1874	priv->channel = malloc(priv->params.num_channels *
1875	    sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO);
1876
1877	mlx5e_build_channel_param(priv, &cparam);
1878	for (i = 0; i < priv->params.num_channels; i++) {
1879		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1880		if (err)
1881			goto err_close_channels;
1882	}
1883
1884	for (j = 0; j < priv->params.num_channels; j++) {
1885		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1886		if (err)
1887			goto err_close_channels;
1888	}
1889
1890	return (0);
1891
1892err_close_channels:
1893	for (i--; i >= 0; i--) {
1894		mlx5e_close_channel(&priv->channel[i]);
1895		mlx5e_close_channel_wait(&priv->channel[i]);
1896	}
1897
1898	/* remove "volatile" attribute from "channel" pointer */
1899	ptr = __DECONST(void *, priv->channel);
1900	priv->channel = NULL;
1901
1902	free(ptr, M_MLX5EN);
1903
1904	return (err);
1905}
1906
1907static void
1908mlx5e_close_channels(struct mlx5e_priv *priv)
1909{
1910	void *ptr;
1911	int i;
1912
1913	if (priv->channel == NULL)
1914		return;
1915
1916	for (i = 0; i < priv->params.num_channels; i++)
1917		mlx5e_close_channel(&priv->channel[i]);
1918	for (i = 0; i < priv->params.num_channels; i++)
1919		mlx5e_close_channel_wait(&priv->channel[i]);
1920
1921	/* remove "volatile" attribute from "channel" pointer */
1922	ptr = __DECONST(void *, priv->channel);
1923	priv->channel = NULL;
1924
1925	free(ptr, M_MLX5EN);
1926}
1927
1928static int
1929mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
1930{
1931
1932	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1933		uint8_t cq_mode;
1934
1935		switch (priv->params.tx_cq_moderation_mode) {
1936		case 0:
1937			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1938			break;
1939		default:
1940			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1941			break;
1942		}
1943
1944		return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
1945		    priv->params.tx_cq_moderation_usec,
1946		    priv->params.tx_cq_moderation_pkts,
1947		    cq_mode));
1948	}
1949
1950	return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
1951	    priv->params.tx_cq_moderation_usec,
1952	    priv->params.tx_cq_moderation_pkts));
1953}
1954
1955static int
1956mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
1957{
1958
1959	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1960		uint8_t cq_mode;
1961		int retval;
1962
1963		switch (priv->params.rx_cq_moderation_mode) {
1964		case 0:
1965			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1966			break;
1967		default:
1968			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1969			break;
1970		}
1971
1972		retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
1973		    priv->params.rx_cq_moderation_usec,
1974		    priv->params.rx_cq_moderation_pkts,
1975		    cq_mode);
1976
1977		return (retval);
1978	}
1979
1980	return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
1981	    priv->params.rx_cq_moderation_usec,
1982	    priv->params.rx_cq_moderation_pkts));
1983}
1984
1985static int
1986mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1987{
1988	int err;
1989	int i;
1990
1991	if (c == NULL)
1992		return (EINVAL);
1993
1994	err = mlx5e_refresh_rq_params(priv, &c->rq);
1995	if (err)
1996		goto done;
1997
1998	for (i = 0; i != c->num_tc; i++) {
1999		err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
2000		if (err)
2001			goto done;
2002	}
2003done:
2004	return (err);
2005}
2006
2007int
2008mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
2009{
2010	int i;
2011
2012	if (priv->channel == NULL)
2013		return (EINVAL);
2014
2015	for (i = 0; i < priv->params.num_channels; i++) {
2016		int err;
2017
2018		err = mlx5e_refresh_channel_params_sub(priv, priv->channel[i]);
2019		if (err)
2020			return (err);
2021	}
2022	return (0);
2023}
2024
2025static int
2026mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
2027{
2028	struct mlx5_core_dev *mdev = priv->mdev;
2029	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
2030	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2031
2032	memset(in, 0, sizeof(in));
2033
2034	MLX5_SET(tisc, tisc, prio, tc);
2035	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
2036
2037	return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
2038}
2039
2040static void
2041mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
2042{
2043	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2044}
2045
2046static int
2047mlx5e_open_tises(struct mlx5e_priv *priv)
2048{
2049	int num_tc = priv->num_tc;
2050	int err;
2051	int tc;
2052
2053	for (tc = 0; tc < num_tc; tc++) {
2054		err = mlx5e_open_tis(priv, tc);
2055		if (err)
2056			goto err_close_tises;
2057	}
2058
2059	return (0);
2060
2061err_close_tises:
2062	for (tc--; tc >= 0; tc--)
2063		mlx5e_close_tis(priv, tc);
2064
2065	return (err);
2066}
2067
2068static void
2069mlx5e_close_tises(struct mlx5e_priv *priv)
2070{
2071	int num_tc = priv->num_tc;
2072	int tc;
2073
2074	for (tc = 0; tc < num_tc; tc++)
2075		mlx5e_close_tis(priv, tc);
2076}
2077
2078static int
2079mlx5e_open_rqt(struct mlx5e_priv *priv)
2080{
2081	struct mlx5_core_dev *mdev = priv->mdev;
2082	u32 *in;
2083	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
2084	void *rqtc;
2085	int inlen;
2086	int err;
2087	int sz;
2088	int i;
2089
2090	sz = 1 << priv->params.rx_hash_log_tbl_sz;
2091
2092	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2093	in = mlx5_vzalloc(inlen);
2094	if (in == NULL)
2095		return (-ENOMEM);
2096	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2097
2098	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2099	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2100
2101	for (i = 0; i < sz; i++) {
2102		int ix;
2103#ifdef RSS
2104		ix = rss_get_indirection_to_bucket(i);
2105#else
2106		ix = i;
2107#endif
2108		/* ensure we don't overflow */
2109		ix %= priv->params.num_channels;
2110		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
2111	}
2112
2113	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2114
2115	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
2116	if (!err)
2117		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2118
2119	kvfree(in);
2120
2121	return (err);
2122}
2123
2124static void
2125mlx5e_close_rqt(struct mlx5e_priv *priv)
2126{
2127	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
2128	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
2129
2130	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2131	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2132
2133	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
2134}
2135
2136static void
2137mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2138{
2139	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2140	__be32 *hkey;
2141
2142	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2143
2144#define	ROUGH_MAX_L2_L3_HDR_SZ 256
2145
2146#define	MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2147			  MLX5_HASH_FIELD_SEL_DST_IP)
2148
2149#define	MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2150			  MLX5_HASH_FIELD_SEL_DST_IP   |\
2151			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2152			  MLX5_HASH_FIELD_SEL_L4_DPORT)
2153
2154#define	MLX5_HASH_IP_IPSEC_SPI	(MLX5_HASH_FIELD_SEL_SRC_IP   |\
2155				 MLX5_HASH_FIELD_SEL_DST_IP   |\
2156				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2157
2158	if (priv->params.hw_lro_en) {
2159		MLX5_SET(tirc, tirc, lro_enable_mask,
2160		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2161		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2162		MLX5_SET(tirc, tirc, lro_max_msg_sz,
2163		    (priv->params.lro_wqe_sz -
2164		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2165		/* TODO: add the option to choose timer value dynamically */
2166		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2167		    MLX5_CAP_ETH(priv->mdev,
2168		    lro_timer_supported_periods[2]));
2169	}
2170
2171	/* setup parameters for hashing TIR type, if any */
2172	switch (tt) {
2173	case MLX5E_TT_ANY:
2174		MLX5_SET(tirc, tirc, disp_type,
2175		    MLX5_TIRC_DISP_TYPE_DIRECT);
2176		MLX5_SET(tirc, tirc, inline_rqn,
2177		    priv->channel[0]->rq.rqn);
2178		break;
2179	default:
2180		MLX5_SET(tirc, tirc, disp_type,
2181		    MLX5_TIRC_DISP_TYPE_INDIRECT);
2182		MLX5_SET(tirc, tirc, indirect_table,
2183		    priv->rqtn);
2184		MLX5_SET(tirc, tirc, rx_hash_fn,
2185		    MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2186		hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2187#ifdef RSS
2188		/*
2189		 * The FreeBSD RSS implementation does currently not
2190		 * support symmetric Toeplitz hashes:
2191		 */
2192		MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2193		rss_getkey((uint8_t *)hkey);
2194#else
2195		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2196		hkey[0] = cpu_to_be32(0xD181C62C);
2197		hkey[1] = cpu_to_be32(0xF7F4DB5B);
2198		hkey[2] = cpu_to_be32(0x1983A2FC);
2199		hkey[3] = cpu_to_be32(0x943E1ADB);
2200		hkey[4] = cpu_to_be32(0xD9389E6B);
2201		hkey[5] = cpu_to_be32(0xD1039C2C);
2202		hkey[6] = cpu_to_be32(0xA74499AD);
2203		hkey[7] = cpu_to_be32(0x593D56D9);
2204		hkey[8] = cpu_to_be32(0xF3253C06);
2205		hkey[9] = cpu_to_be32(0x2ADC1FFC);
2206#endif
2207		break;
2208	}
2209
2210	switch (tt) {
2211	case MLX5E_TT_IPV4_TCP:
2212		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2213		    MLX5_L3_PROT_TYPE_IPV4);
2214		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2215		    MLX5_L4_PROT_TYPE_TCP);
2216#ifdef RSS
2217		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2218			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2219			    MLX5_HASH_IP);
2220		} else
2221#endif
2222		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2223		    MLX5_HASH_ALL);
2224		break;
2225
2226	case MLX5E_TT_IPV6_TCP:
2227		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2228		    MLX5_L3_PROT_TYPE_IPV6);
2229		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2230		    MLX5_L4_PROT_TYPE_TCP);
2231#ifdef RSS
2232		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2233			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2234			    MLX5_HASH_IP);
2235		} else
2236#endif
2237		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2238		    MLX5_HASH_ALL);
2239		break;
2240
2241	case MLX5E_TT_IPV4_UDP:
2242		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2243		    MLX5_L3_PROT_TYPE_IPV4);
2244		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2245		    MLX5_L4_PROT_TYPE_UDP);
2246#ifdef RSS
2247		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2248			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2249			    MLX5_HASH_IP);
2250		} else
2251#endif
2252		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2253		    MLX5_HASH_ALL);
2254		break;
2255
2256	case MLX5E_TT_IPV6_UDP:
2257		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2258		    MLX5_L3_PROT_TYPE_IPV6);
2259		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2260		    MLX5_L4_PROT_TYPE_UDP);
2261#ifdef RSS
2262		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2263			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2264			    MLX5_HASH_IP);
2265		} else
2266#endif
2267		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2268		    MLX5_HASH_ALL);
2269		break;
2270
2271	case MLX5E_TT_IPV4_IPSEC_AH:
2272		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2273		    MLX5_L3_PROT_TYPE_IPV4);
2274		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2275		    MLX5_HASH_IP_IPSEC_SPI);
2276		break;
2277
2278	case MLX5E_TT_IPV6_IPSEC_AH:
2279		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2280		    MLX5_L3_PROT_TYPE_IPV6);
2281		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2282		    MLX5_HASH_IP_IPSEC_SPI);
2283		break;
2284
2285	case MLX5E_TT_IPV4_IPSEC_ESP:
2286		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2287		    MLX5_L3_PROT_TYPE_IPV4);
2288		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2289		    MLX5_HASH_IP_IPSEC_SPI);
2290		break;
2291
2292	case MLX5E_TT_IPV6_IPSEC_ESP:
2293		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2294		    MLX5_L3_PROT_TYPE_IPV6);
2295		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2296		    MLX5_HASH_IP_IPSEC_SPI);
2297		break;
2298
2299	case MLX5E_TT_IPV4:
2300		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2301		    MLX5_L3_PROT_TYPE_IPV4);
2302		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2303		    MLX5_HASH_IP);
2304		break;
2305
2306	case MLX5E_TT_IPV6:
2307		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2308		    MLX5_L3_PROT_TYPE_IPV6);
2309		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2310		    MLX5_HASH_IP);
2311		break;
2312
2313	default:
2314		break;
2315	}
2316}
2317
2318static int
2319mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2320{
2321	struct mlx5_core_dev *mdev = priv->mdev;
2322	u32 *in;
2323	void *tirc;
2324	int inlen;
2325	int err;
2326
2327	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2328	in = mlx5_vzalloc(inlen);
2329	if (in == NULL)
2330		return (-ENOMEM);
2331	tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2332
2333	mlx5e_build_tir_ctx(priv, tirc, tt);
2334
2335	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2336
2337	kvfree(in);
2338
2339	return (err);
2340}
2341
2342static void
2343mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2344{
2345	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2346}
2347
2348static int
2349mlx5e_open_tirs(struct mlx5e_priv *priv)
2350{
2351	int err;
2352	int i;
2353
2354	for (i = 0; i < MLX5E_NUM_TT; i++) {
2355		err = mlx5e_open_tir(priv, i);
2356		if (err)
2357			goto err_close_tirs;
2358	}
2359
2360	return (0);
2361
2362err_close_tirs:
2363	for (i--; i >= 0; i--)
2364		mlx5e_close_tir(priv, i);
2365
2366	return (err);
2367}
2368
2369static void
2370mlx5e_close_tirs(struct mlx5e_priv *priv)
2371{
2372	int i;
2373
2374	for (i = 0; i < MLX5E_NUM_TT; i++)
2375		mlx5e_close_tir(priv, i);
2376}
2377
2378/*
2379 * SW MTU does not include headers,
2380 * HW MTU includes all headers and checksums.
2381 */
2382static int
2383mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2384{
2385	struct mlx5e_priv *priv = ifp->if_softc;
2386	struct mlx5_core_dev *mdev = priv->mdev;
2387	int hw_mtu;
2388	int err;
2389
2390	hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2391
2392	err = mlx5_set_port_mtu(mdev, hw_mtu);
2393	if (err) {
2394		if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
2395		    __func__, sw_mtu, err);
2396		return (err);
2397	}
2398
2399	/* Update vport context MTU */
2400	err = mlx5_set_vport_mtu(mdev, hw_mtu);
2401	if (err) {
2402		if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n",
2403		    __func__, err);
2404	}
2405
2406	ifp->if_mtu = sw_mtu;
2407
2408	err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2409	if (err || !hw_mtu) {
2410		/* fallback to port oper mtu */
2411		err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2412	}
2413	if (err) {
2414		if_printf(ifp, "Query port MTU, after setting new "
2415		    "MTU value, failed\n");
2416		return (err);
2417	} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2418		err = -E2BIG,
2419		if_printf(ifp, "Port MTU %d is smaller than "
2420                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2421	} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
2422		err = -EINVAL;
2423                if_printf(ifp, "Port MTU %d is bigger than "
2424                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2425	}
2426	priv->params_ethtool.hw_mtu = hw_mtu;
2427
2428	return (err);
2429}
2430
2431int
2432mlx5e_open_locked(struct ifnet *ifp)
2433{
2434	struct mlx5e_priv *priv = ifp->if_softc;
2435	int err;
2436	u16 set_id;
2437
2438	/* check if already opened */
2439	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2440		return (0);
2441
2442#ifdef RSS
2443	if (rss_getnumbuckets() > priv->params.num_channels) {
2444		if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
2445		    "channels(%u) available\n", rss_getnumbuckets(),
2446		    priv->params.num_channels);
2447	}
2448#endif
2449	err = mlx5e_open_tises(priv);
2450	if (err) {
2451		if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
2452		    __func__, err);
2453		return (err);
2454	}
2455	err = mlx5_vport_alloc_q_counter(priv->mdev,
2456	    MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
2457	if (err) {
2458		if_printf(priv->ifp,
2459		    "%s: mlx5_vport_alloc_q_counter failed: %d\n",
2460		    __func__, err);
2461		goto err_close_tises;
2462	}
2463	/* store counter set ID */
2464	priv->counter_set_id = set_id;
2465
2466	err = mlx5e_open_channels(priv);
2467	if (err) {
2468		if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
2469		    __func__, err);
2470		goto err_dalloc_q_counter;
2471	}
2472	err = mlx5e_open_rqt(priv);
2473	if (err) {
2474		if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
2475		    __func__, err);
2476		goto err_close_channels;
2477	}
2478	err = mlx5e_open_tirs(priv);
2479	if (err) {
2480		if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
2481		    __func__, err);
2482		goto err_close_rqls;
2483	}
2484	err = mlx5e_open_flow_table(priv);
2485	if (err) {
2486		if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
2487		    __func__, err);
2488		goto err_close_tirs;
2489	}
2490	err = mlx5e_add_all_vlan_rules(priv);
2491	if (err) {
2492		if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
2493		    __func__, err);
2494		goto err_close_flow_table;
2495	}
2496	set_bit(MLX5E_STATE_OPENED, &priv->state);
2497
2498	mlx5e_update_carrier(priv);
2499	mlx5e_set_rx_mode_core(priv);
2500
2501	return (0);
2502
2503err_close_flow_table:
2504	mlx5e_close_flow_table(priv);
2505
2506err_close_tirs:
2507	mlx5e_close_tirs(priv);
2508
2509err_close_rqls:
2510	mlx5e_close_rqt(priv);
2511
2512err_close_channels:
2513	mlx5e_close_channels(priv);
2514
2515err_dalloc_q_counter:
2516	mlx5_vport_dealloc_q_counter(priv->mdev,
2517	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2518
2519err_close_tises:
2520	mlx5e_close_tises(priv);
2521
2522	return (err);
2523}
2524
2525static void
2526mlx5e_open(void *arg)
2527{
2528	struct mlx5e_priv *priv = arg;
2529
2530	PRIV_LOCK(priv);
2531	if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2532		if_printf(priv->ifp,
2533		    "%s: Setting port status to up failed\n",
2534		    __func__);
2535
2536	mlx5e_open_locked(priv->ifp);
2537	priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2538	PRIV_UNLOCK(priv);
2539}
2540
2541int
2542mlx5e_close_locked(struct ifnet *ifp)
2543{
2544	struct mlx5e_priv *priv = ifp->if_softc;
2545
2546	/* check if already closed */
2547	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2548		return (0);
2549
2550	clear_bit(MLX5E_STATE_OPENED, &priv->state);
2551
2552	mlx5e_set_rx_mode_core(priv);
2553	mlx5e_del_all_vlan_rules(priv);
2554	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2555	mlx5e_close_flow_table(priv);
2556	mlx5e_close_tirs(priv);
2557	mlx5e_close_rqt(priv);
2558	mlx5e_close_channels(priv);
2559	mlx5_vport_dealloc_q_counter(priv->mdev,
2560	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2561	mlx5e_close_tises(priv);
2562
2563	return (0);
2564}
2565
2566#if (__FreeBSD_version >= 1100000)
2567static uint64_t
2568mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
2569{
2570	struct mlx5e_priv *priv = ifp->if_softc;
2571	u64 retval;
2572
2573	/* PRIV_LOCK(priv); XXX not allowed */
2574	switch (cnt) {
2575	case IFCOUNTER_IPACKETS:
2576		retval = priv->stats.vport.rx_packets;
2577		break;
2578	case IFCOUNTER_IERRORS:
2579		retval = priv->stats.vport.rx_error_packets +
2580		    priv->stats.pport.alignment_err +
2581		    priv->stats.pport.check_seq_err +
2582		    priv->stats.pport.crc_align_errors +
2583		    priv->stats.pport.in_range_len_errors +
2584		    priv->stats.pport.jabbers +
2585		    priv->stats.pport.out_of_range_len +
2586		    priv->stats.pport.oversize_pkts +
2587		    priv->stats.pport.symbol_err +
2588		    priv->stats.pport.too_long_errors +
2589		    priv->stats.pport.undersize_pkts +
2590		    priv->stats.pport.unsupported_op_rx;
2591		break;
2592	case IFCOUNTER_IQDROPS:
2593		retval = priv->stats.vport.rx_out_of_buffer +
2594		    priv->stats.pport.drop_events;
2595		break;
2596	case IFCOUNTER_OPACKETS:
2597		retval = priv->stats.vport.tx_packets;
2598		break;
2599	case IFCOUNTER_OERRORS:
2600		retval = priv->stats.vport.tx_error_packets;
2601		break;
2602	case IFCOUNTER_IBYTES:
2603		retval = priv->stats.vport.rx_bytes;
2604		break;
2605	case IFCOUNTER_OBYTES:
2606		retval = priv->stats.vport.tx_bytes;
2607		break;
2608	case IFCOUNTER_IMCASTS:
2609		retval = priv->stats.vport.rx_multicast_packets;
2610		break;
2611	case IFCOUNTER_OMCASTS:
2612		retval = priv->stats.vport.tx_multicast_packets;
2613		break;
2614	case IFCOUNTER_OQDROPS:
2615		retval = priv->stats.vport.tx_queue_dropped;
2616		break;
2617	case IFCOUNTER_COLLISIONS:
2618		retval = priv->stats.pport.collisions;
2619		break;
2620	default:
2621		retval = if_get_counter_default(ifp, cnt);
2622		break;
2623	}
2624	/* PRIV_UNLOCK(priv); XXX not allowed */
2625	return (retval);
2626}
2627#endif
2628
2629static void
2630mlx5e_set_rx_mode(struct ifnet *ifp)
2631{
2632	struct mlx5e_priv *priv = ifp->if_softc;
2633
2634	queue_work(priv->wq, &priv->set_rx_mode_work);
2635}
2636
2637static int
2638mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2639{
2640	struct mlx5e_priv *priv;
2641	struct ifreq *ifr;
2642	struct ifi2creq i2c;
2643	int error = 0;
2644	int mask = 0;
2645	int size_read = 0;
2646	int module_status;
2647	int module_num;
2648	int max_mtu;
2649	uint8_t read_addr;
2650
2651	priv = ifp->if_softc;
2652
2653	/* check if detaching */
2654	if (priv == NULL || priv->gone != 0)
2655		return (ENXIO);
2656
2657	switch (command) {
2658	case SIOCSIFMTU:
2659		ifr = (struct ifreq *)data;
2660
2661		PRIV_LOCK(priv);
2662		mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
2663
2664		if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
2665		    ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
2666			int was_opened;
2667
2668			was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2669			if (was_opened)
2670				mlx5e_close_locked(ifp);
2671
2672			/* set new MTU */
2673			mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
2674
2675			if (was_opened)
2676				mlx5e_open_locked(ifp);
2677		} else {
2678			error = EINVAL;
2679			if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
2680			    MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
2681		}
2682		PRIV_UNLOCK(priv);
2683		break;
2684	case SIOCSIFFLAGS:
2685		if ((ifp->if_flags & IFF_UP) &&
2686		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2687			mlx5e_set_rx_mode(ifp);
2688			break;
2689		}
2690		PRIV_LOCK(priv);
2691		if (ifp->if_flags & IFF_UP) {
2692			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2693				if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2694					mlx5e_open_locked(ifp);
2695				ifp->if_drv_flags |= IFF_DRV_RUNNING;
2696				mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
2697			}
2698		} else {
2699			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2700				mlx5_set_port_status(priv->mdev,
2701				    MLX5_PORT_DOWN);
2702				if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2703					mlx5e_close_locked(ifp);
2704				mlx5e_update_carrier(priv);
2705				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2706			}
2707		}
2708		PRIV_UNLOCK(priv);
2709		break;
2710	case SIOCADDMULTI:
2711	case SIOCDELMULTI:
2712		mlx5e_set_rx_mode(ifp);
2713		break;
2714	case SIOCSIFMEDIA:
2715	case SIOCGIFMEDIA:
2716	case SIOCGIFXMEDIA:
2717		ifr = (struct ifreq *)data;
2718		error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
2719		break;
2720	case SIOCSIFCAP:
2721		ifr = (struct ifreq *)data;
2722		PRIV_LOCK(priv);
2723		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2724
2725		if (mask & IFCAP_TXCSUM) {
2726			ifp->if_capenable ^= IFCAP_TXCSUM;
2727			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2728
2729			if (IFCAP_TSO4 & ifp->if_capenable &&
2730			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2731				ifp->if_capenable &= ~IFCAP_TSO4;
2732				ifp->if_hwassist &= ~CSUM_IP_TSO;
2733				if_printf(ifp,
2734				    "tso4 disabled due to -txcsum.\n");
2735			}
2736		}
2737		if (mask & IFCAP_TXCSUM_IPV6) {
2738			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2739			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2740
2741			if (IFCAP_TSO6 & ifp->if_capenable &&
2742			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2743				ifp->if_capenable &= ~IFCAP_TSO6;
2744				ifp->if_hwassist &= ~CSUM_IP6_TSO;
2745				if_printf(ifp,
2746				    "tso6 disabled due to -txcsum6.\n");
2747			}
2748		}
2749		if (mask & IFCAP_RXCSUM)
2750			ifp->if_capenable ^= IFCAP_RXCSUM;
2751		if (mask & IFCAP_RXCSUM_IPV6)
2752			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2753		if (mask & IFCAP_TSO4) {
2754			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2755			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2756				if_printf(ifp, "enable txcsum first.\n");
2757				error = EAGAIN;
2758				goto out;
2759			}
2760			ifp->if_capenable ^= IFCAP_TSO4;
2761			ifp->if_hwassist ^= CSUM_IP_TSO;
2762		}
2763		if (mask & IFCAP_TSO6) {
2764			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2765			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2766				if_printf(ifp, "enable txcsum6 first.\n");
2767				error = EAGAIN;
2768				goto out;
2769			}
2770			ifp->if_capenable ^= IFCAP_TSO6;
2771			ifp->if_hwassist ^= CSUM_IP6_TSO;
2772		}
2773		if (mask & IFCAP_VLAN_HWFILTER) {
2774			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2775				mlx5e_disable_vlan_filter(priv);
2776			else
2777				mlx5e_enable_vlan_filter(priv);
2778
2779			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2780		}
2781		if (mask & IFCAP_VLAN_HWTAGGING)
2782			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2783		if (mask & IFCAP_WOL_MAGIC)
2784			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2785
2786		VLAN_CAPABILITIES(ifp);
2787		/* turn off LRO means also turn of HW LRO - if it's on */
2788		if (mask & IFCAP_LRO) {
2789			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2790			bool need_restart = false;
2791
2792			ifp->if_capenable ^= IFCAP_LRO;
2793			if (!(ifp->if_capenable & IFCAP_LRO)) {
2794				if (priv->params.hw_lro_en) {
2795					priv->params.hw_lro_en = false;
2796					need_restart = true;
2797					/* Not sure this is the correct way */
2798					priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
2799				}
2800			}
2801			if (was_opened && need_restart) {
2802				mlx5e_close_locked(ifp);
2803				mlx5e_open_locked(ifp);
2804			}
2805		}
2806out:
2807		PRIV_UNLOCK(priv);
2808		break;
2809
2810	case SIOCGI2C:
2811		ifr = (struct ifreq *)data;
2812
2813		/*
2814		 * Copy from the user-space address ifr_data to the
2815		 * kernel-space address i2c
2816		 */
2817		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2818		if (error)
2819			break;
2820
2821		if (i2c.len > sizeof(i2c.data)) {
2822			error = EINVAL;
2823			break;
2824		}
2825
2826		PRIV_LOCK(priv);
2827		/* Get module_num which is required for the query_eeprom */
2828		error = mlx5_query_module_num(priv->mdev, &module_num);
2829		if (error) {
2830			if_printf(ifp, "Query module num failed, eeprom "
2831			    "reading is not supported\n");
2832			error = EINVAL;
2833			goto err_i2c;
2834		}
2835		/* Check if module is present before doing an access */
2836		module_status = mlx5_query_module_status(priv->mdev, module_num);
2837		if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED &&
2838		    module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) {
2839			error = EINVAL;
2840			goto err_i2c;
2841		}
2842		/*
2843		 * Currently 0XA0 and 0xA2 are the only addresses permitted.
2844		 * The internal conversion is as follows:
2845		 */
2846		if (i2c.dev_addr == 0xA0)
2847			read_addr = MLX5E_I2C_ADDR_LOW;
2848		else if (i2c.dev_addr == 0xA2)
2849			read_addr = MLX5E_I2C_ADDR_HIGH;
2850		else {
2851			if_printf(ifp, "Query eeprom failed, "
2852			    "Invalid Address: %X\n", i2c.dev_addr);
2853			error = EINVAL;
2854			goto err_i2c;
2855		}
2856		error = mlx5_query_eeprom(priv->mdev,
2857		    read_addr, MLX5E_EEPROM_LOW_PAGE,
2858		    (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
2859		    (uint32_t *)i2c.data, &size_read);
2860		if (error) {
2861			if_printf(ifp, "Query eeprom failed, eeprom "
2862			    "reading is not supported\n");
2863			error = EINVAL;
2864			goto err_i2c;
2865		}
2866
2867		if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
2868			error = mlx5_query_eeprom(priv->mdev,
2869			    read_addr, MLX5E_EEPROM_LOW_PAGE,
2870			    (uint32_t)(i2c.offset + size_read),
2871			    (uint32_t)(i2c.len - size_read), module_num,
2872			    (uint32_t *)(i2c.data + size_read), &size_read);
2873		}
2874		if (error) {
2875			if_printf(ifp, "Query eeprom failed, eeprom "
2876			    "reading is not supported\n");
2877			error = EINVAL;
2878			goto err_i2c;
2879		}
2880
2881		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2882err_i2c:
2883		PRIV_UNLOCK(priv);
2884		break;
2885
2886	default:
2887		error = ether_ioctl(ifp, command, data);
2888		break;
2889	}
2890	return (error);
2891}
2892
2893static int
2894mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2895{
2896	/*
2897	 * TODO: uncoment once FW really sets all these bits if
2898	 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
2899	 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
2900	 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
2901	 * -ENOTSUPP;
2902	 */
2903
2904	/* TODO: add more must-to-have features */
2905
2906	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2907		return (-ENODEV);
2908
2909	return (0);
2910}
2911
2912static u16
2913mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2914{
2915	int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2916
2917	return bf_buf_size -
2918	       sizeof(struct mlx5e_tx_wqe) +
2919	       2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2920}
2921
2922static void
2923mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
2924    struct mlx5e_priv *priv,
2925    int num_comp_vectors)
2926{
2927	/*
2928	 * TODO: Consider link speed for setting "log_sq_size",
2929	 * "log_rq_size" and "cq_moderation_xxx":
2930	 */
2931	priv->params.log_sq_size =
2932	    MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2933	priv->params.log_rq_size =
2934	    MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2935	priv->params.rx_cq_moderation_usec =
2936	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
2937	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
2938	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2939	priv->params.rx_cq_moderation_mode =
2940	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
2941	priv->params.rx_cq_moderation_pkts =
2942	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2943	priv->params.tx_cq_moderation_usec =
2944	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2945	priv->params.tx_cq_moderation_pkts =
2946	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
2947	priv->params.min_rx_wqes =
2948	    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
2949	priv->params.rx_hash_log_tbl_sz =
2950	    (order_base_2(num_comp_vectors) >
2951	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
2952	    order_base_2(num_comp_vectors) :
2953	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
2954	priv->params.num_tc = 1;
2955	priv->params.default_vlan_prio = 0;
2956	priv->counter_set_id = -1;
2957	priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
2958	mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
2959
2960	/*
2961	 * hw lro is currently defaulted to off. when it won't anymore we
2962	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
2963	 */
2964	priv->params.hw_lro_en = false;
2965	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2966
2967	priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression);
2968
2969	priv->mdev = mdev;
2970	priv->params.num_channels = num_comp_vectors;
2971	priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
2972	priv->queue_mapping_channel_mask =
2973	    roundup_pow_of_two(num_comp_vectors) - 1;
2974	priv->num_tc = priv->params.num_tc;
2975	priv->default_vlan_prio = priv->params.default_vlan_prio;
2976
2977	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2978	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2979	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2980}
2981
2982static int
2983mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2984		  struct mlx5_core_mr *mkey)
2985{
2986	struct ifnet *ifp = priv->ifp;
2987	struct mlx5_core_dev *mdev = priv->mdev;
2988	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2989	void *mkc;
2990	u32 *in;
2991	int err;
2992
2993	in = mlx5_vzalloc(inlen);
2994	if (in == NULL) {
2995		if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
2996		return (-ENOMEM);
2997	}
2998
2999	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3000	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
3001	MLX5_SET(mkc, mkc, lw, 1);
3002	MLX5_SET(mkc, mkc, lr, 1);
3003
3004	MLX5_SET(mkc, mkc, pd, pdn);
3005	MLX5_SET(mkc, mkc, length64, 1);
3006	MLX5_SET(mkc, mkc, qpn, 0xffffff);
3007
3008	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
3009	if (err)
3010		if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
3011		    __func__, err);
3012
3013	kvfree(in);
3014	return (err);
3015}
3016
3017static const char *mlx5e_vport_stats_desc[] = {
3018	MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
3019};
3020
3021static const char *mlx5e_pport_stats_desc[] = {
3022	MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
3023};
3024
3025static void
3026mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
3027{
3028	mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
3029	sx_init(&priv->state_lock, "mlx5state");
3030	callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
3031	MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
3032}
3033
3034static void
3035mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
3036{
3037	mtx_destroy(&priv->async_events_mtx);
3038	sx_destroy(&priv->state_lock);
3039}
3040
3041static int
3042sysctl_firmware(SYSCTL_HANDLER_ARGS)
3043{
3044	/*
3045	 * %d.%d%.d the string format.
3046	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
3047	 * We need at most 5 chars to store that.
3048	 * It also has: two "." and NULL at the end, which means we need 18
3049	 * (5*3 + 3) chars at most.
3050	 */
3051	char fw[18];
3052	struct mlx5e_priv *priv = arg1;
3053	int error;
3054
3055	snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3056	    fw_rev_sub(priv->mdev));
3057	error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3058	return (error);
3059}
3060
3061static void
3062mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
3063{
3064	int i;
3065
3066	for (i = 0; i < ch->num_tc; i++)
3067		mlx5e_drain_sq(&ch->sq[i]);
3068}
3069
3070static void
3071mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
3072{
3073
3074	sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
3075	sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
3076	mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
3077	sq->doorbell.d64 = 0;
3078}
3079
3080void
3081mlx5e_resume_sq(struct mlx5e_sq *sq)
3082{
3083	int err;
3084
3085	/* check if already enabled */
3086	if (sq->stopped == 0)
3087		return;
3088
3089	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
3090	    MLX5_SQC_STATE_RST);
3091	if (err != 0) {
3092		if_printf(sq->ifp,
3093		    "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
3094	}
3095
3096	sq->cc = 0;
3097	sq->pc = 0;
3098
3099	/* reset doorbell prior to moving from RST to RDY */
3100	mlx5e_reset_sq_doorbell_record(sq);
3101
3102	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
3103	    MLX5_SQC_STATE_RDY);
3104	if (err != 0) {
3105		if_printf(sq->ifp,
3106		    "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
3107	}
3108
3109	mtx_lock(&sq->lock);
3110	sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
3111	sq->stopped = 0;
3112	mtx_unlock(&sq->lock);
3113
3114}
3115
3116static void
3117mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
3118{
3119        int i;
3120
3121	for (i = 0; i < ch->num_tc; i++)
3122		mlx5e_resume_sq(&ch->sq[i]);
3123}
3124
3125static void
3126mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
3127{
3128	struct mlx5e_rq *rq = &ch->rq;
3129	int err;
3130
3131	mtx_lock(&rq->mtx);
3132	rq->enabled = 0;
3133	callout_stop(&rq->watchdog);
3134	mtx_unlock(&rq->mtx);
3135
3136	callout_drain(&rq->watchdog);
3137
3138	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
3139	if (err != 0) {
3140		if_printf(rq->ifp,
3141		    "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
3142	}
3143
3144	while (!mlx5_wq_ll_is_empty(&rq->wq)) {
3145		msleep(1);
3146		rq->cq.mcq.comp(&rq->cq.mcq);
3147	}
3148
3149	/*
3150	 * Transitioning into RST state will allow the FW to track less ERR state queues,
3151	 * thus reducing the recv queue flushing time
3152	 */
3153	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
3154	if (err != 0) {
3155		if_printf(rq->ifp,
3156		    "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
3157	}
3158}
3159
3160static void
3161mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
3162{
3163	struct mlx5e_rq *rq = &ch->rq;
3164	int err;
3165
3166	rq->wq.wqe_ctr = 0;
3167	mlx5_wq_ll_update_db_record(&rq->wq);
3168	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3169	if (err != 0) {
3170		if_printf(rq->ifp,
3171		    "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
3172        }
3173
3174	rq->enabled = 1;
3175
3176	rq->cq.mcq.comp(&rq->cq.mcq);
3177}
3178
3179void
3180mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
3181{
3182	int i;
3183
3184	if (priv->channel == NULL)
3185		return;
3186
3187	for (i = 0; i < priv->params.num_channels; i++) {
3188
3189		if (!priv->channel[i])
3190			continue;
3191
3192		if (value)
3193			mlx5e_disable_tx_dma(priv->channel[i]);
3194		else
3195			mlx5e_enable_tx_dma(priv->channel[i]);
3196	}
3197}
3198
3199void
3200mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
3201{
3202	int i;
3203
3204	if (priv->channel == NULL)
3205		return;
3206
3207	for (i = 0; i < priv->params.num_channels; i++) {
3208
3209		if (!priv->channel[i])
3210			continue;
3211
3212		if (value)
3213			mlx5e_disable_rx_dma(priv->channel[i]);
3214		else
3215			mlx5e_enable_rx_dma(priv->channel[i]);
3216	}
3217}
3218
3219u8
3220mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
3221{
3222	u8 min_inline_mode;
3223
3224	min_inline_mode = MLX5_INLINE_MODE_L2;
3225	mlx5_query_min_inline(mdev, &min_inline_mode);
3226	if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
3227	    !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
3228		min_inline_mode = MLX5_INLINE_MODE_L2;
3229
3230	return (min_inline_mode);
3231}
3232
3233static void
3234mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3235{
3236	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3237	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3238	    sysctl_firmware, "A", "HCA firmware version");
3239
3240	SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3241	    OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3242	    "Board ID");
3243}
3244
3245static int
3246mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3247{
3248	struct mlx5e_priv *priv = arg1;
3249	uint32_t tx_pfc;
3250	uint32_t value;
3251	int error;
3252
3253	PRIV_LOCK(priv);
3254
3255	tx_pfc = priv->params.tx_priority_flow_control;
3256
3257	/* get current value */
3258	value = (tx_pfc >> arg2) & 1;
3259
3260	error = sysctl_handle_32(oidp, &value, 0, req);
3261
3262	/* range check value */
3263	if (value != 0)
3264		priv->params.tx_priority_flow_control |= (1 << arg2);
3265	else
3266		priv->params.tx_priority_flow_control &= ~(1 << arg2);
3267
3268	/* check if update is required */
3269	if (error == 0 && priv->gone == 0 &&
3270	    tx_pfc != priv->params.tx_priority_flow_control) {
3271		error = -mlx5e_set_port_pfc(priv);
3272		/* restore previous value */
3273		if (error != 0)
3274			priv->params.tx_priority_flow_control= tx_pfc;
3275	}
3276	PRIV_UNLOCK(priv);
3277
3278	return (error);
3279}
3280
3281static int
3282mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3283{
3284	struct mlx5e_priv *priv = arg1;
3285	uint32_t rx_pfc;
3286	uint32_t value;
3287	int error;
3288
3289	PRIV_LOCK(priv);
3290
3291	rx_pfc = priv->params.rx_priority_flow_control;
3292
3293	/* get current value */
3294	value = (rx_pfc >> arg2) & 1;
3295
3296	error = sysctl_handle_32(oidp, &value, 0, req);
3297
3298	/* range check value */
3299	if (value != 0)
3300		priv->params.rx_priority_flow_control |= (1 << arg2);
3301	else
3302		priv->params.rx_priority_flow_control &= ~(1 << arg2);
3303
3304	/* check if update is required */
3305	if (error == 0 && priv->gone == 0 &&
3306	    rx_pfc != priv->params.rx_priority_flow_control) {
3307		error = -mlx5e_set_port_pfc(priv);
3308		/* restore previous value */
3309		if (error != 0)
3310			priv->params.rx_priority_flow_control= rx_pfc;
3311	}
3312	PRIV_UNLOCK(priv);
3313
3314	return (error);
3315}
3316
3317static void
3318mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3319{
3320	unsigned int x;
3321	char path[96];
3322	int error;
3323
3324	/* enable pauseframes by default */
3325	priv->params.tx_pauseframe_control = 1;
3326	priv->params.rx_pauseframe_control = 1;
3327
3328	/* disable ports flow control, PFC, by default */
3329	priv->params.tx_priority_flow_control = 0;
3330	priv->params.rx_priority_flow_control = 0;
3331
3332#if (__FreeBSD_version < 1100000)
3333	/* compute path for sysctl */
3334	snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3335	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3336
3337	/* try to fetch tunable, if any */
3338	TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3339
3340	/* compute path for sysctl */
3341	snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3342	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3343
3344	/* try to fetch tunable, if any */
3345	TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3346
3347	for (x = 0; x != 8; x++) {
3348
3349		/* compute path for sysctl */
3350		snprintf(path, sizeof(path), "dev.mce.%d.tx_priority_flow_control_%u",
3351		    device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3352
3353		/* try to fetch tunable, if any */
3354		if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3355			priv->params.tx_priority_flow_control |= 1 << x;
3356
3357		/* compute path for sysctl */
3358		snprintf(path, sizeof(path), "dev.mce.%d.rx_priority_flow_control_%u",
3359		    device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3360
3361		/* try to fetch tunable, if any */
3362		if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3363			priv->params.rx_priority_flow_control |= 1 << x;
3364	}
3365#endif
3366
3367	/* register pauseframe SYSCTLs */
3368	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3369	    OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3370	    &priv->params.tx_pauseframe_control, 0,
3371	    "Set to enable TX pause frames. Clear to disable.");
3372
3373	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3374	    OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3375	    &priv->params.rx_pauseframe_control, 0,
3376	    "Set to enable RX pause frames. Clear to disable.");
3377
3378	/* register priority_flow control, PFC, SYSCTLs */
3379	for (x = 0; x != 8; x++) {
3380		snprintf(path, sizeof(path), "tx_priority_flow_control_%u", x);
3381
3382		SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3383		    OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3384		    CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_tx_priority_flow_control, "IU",
3385		    "Set to enable TX ports flow control frames for given priority. Clear to disable.");
3386
3387		snprintf(path, sizeof(path), "rx_priority_flow_control_%u", x);
3388
3389		SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3390		    OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3391		    CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_rx_priority_flow_control, "IU",
3392		    "Set to enable RX ports flow control frames for given priority. Clear to disable.");
3393	}
3394
3395	PRIV_LOCK(priv);
3396
3397	/* range check */
3398	priv->params.tx_pauseframe_control =
3399	    priv->params.tx_pauseframe_control ? 1 : 0;
3400	priv->params.rx_pauseframe_control =
3401	    priv->params.rx_pauseframe_control ? 1 : 0;
3402
3403	/* update firmware */
3404	error = mlx5e_set_port_pause_and_pfc(priv);
3405	if (error == -EINVAL) {
3406		if_printf(priv->ifp,
3407		    "Global pauseframes must be disabled before enabling PFC.\n");
3408		priv->params.rx_priority_flow_control = 0;
3409		priv->params.tx_priority_flow_control = 0;
3410
3411		/* update firmware */
3412		(void) mlx5e_set_port_pause_and_pfc(priv);
3413	}
3414	PRIV_UNLOCK(priv);
3415}
3416
3417static void *
3418mlx5e_create_ifp(struct mlx5_core_dev *mdev)
3419{
3420	struct ifnet *ifp;
3421	struct mlx5e_priv *priv;
3422	u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
3423	struct sysctl_oid_list *child;
3424	int ncv = mdev->priv.eq_table.num_comp_vectors;
3425	char unit[16];
3426	int err;
3427	int i;
3428	u32 eth_proto_cap;
3429
3430	if (mlx5e_check_required_hca_cap(mdev)) {
3431		mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
3432		return (NULL);
3433	}
3434	priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO);
3435	mlx5e_priv_mtx_init(priv);
3436
3437	ifp = priv->ifp = if_alloc(IFT_ETHER);
3438	if (ifp == NULL) {
3439		mlx5_core_err(mdev, "if_alloc() failed\n");
3440		goto err_free_priv;
3441	}
3442	ifp->if_softc = priv;
3443	if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
3444	ifp->if_mtu = ETHERMTU;
3445	ifp->if_init = mlx5e_open;
3446	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3447	ifp->if_ioctl = mlx5e_ioctl;
3448	ifp->if_transmit = mlx5e_xmit;
3449	ifp->if_qflush = if_qflush;
3450#if (__FreeBSD_version >= 1100000)
3451	ifp->if_get_counter = mlx5e_get_counter;
3452#endif
3453	ifp->if_snd.ifq_maxlen = ifqmaxlen;
3454	/*
3455         * Set driver features
3456         */
3457	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
3458	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
3459	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
3460	ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
3461	ifp->if_capabilities |= IFCAP_LRO;
3462	ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
3463	ifp->if_capabilities |= IFCAP_HWSTATS;
3464
3465	/* set TSO limits so that we don't have to drop TX packets */
3466	ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
3467	ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
3468	ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
3469
3470	ifp->if_capenable = ifp->if_capabilities;
3471	ifp->if_hwassist = 0;
3472	if (ifp->if_capenable & IFCAP_TSO)
3473		ifp->if_hwassist |= CSUM_TSO;
3474	if (ifp->if_capenable & IFCAP_TXCSUM)
3475		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3476	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3477		ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3478
3479	sysctl_ctx_init(&priv->sysctl_ctx_channel_debug);
3480
3481	/* ifnet sysctl tree */
3482	sysctl_ctx_init(&priv->sysctl_ctx);
3483	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
3484	    OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
3485	if (priv->sysctl_ifnet == NULL) {
3486		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3487		goto err_free_sysctl;
3488	}
3489	snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
3490	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3491	    OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
3492	if (priv->sysctl_ifnet == NULL) {
3493		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3494		goto err_free_sysctl;
3495	}
3496
3497	/* HW sysctl tree */
3498	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
3499	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
3500	    OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
3501	if (priv->sysctl_hw == NULL) {
3502		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3503		goto err_free_sysctl;
3504	}
3505	mlx5e_build_ifp_priv(mdev, priv, ncv);
3506
3507	snprintf(unit, sizeof(unit), "mce%u_wq",
3508	    device_get_unit(mdev->pdev->dev.bsddev));
3509	priv->wq = alloc_workqueue(unit, 0, 1);
3510	if (priv->wq == NULL) {
3511		if_printf(ifp, "%s: alloc_workqueue failed\n", __func__);
3512		goto err_free_sysctl;
3513	}
3514
3515	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
3516	if (err) {
3517		if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
3518		    __func__, err);
3519		goto err_free_wq;
3520	}
3521	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
3522	if (err) {
3523		if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
3524		    __func__, err);
3525		goto err_unmap_free_uar;
3526	}
3527	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
3528	if (err) {
3529		if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
3530		    __func__, err);
3531		goto err_dealloc_pd;
3532	}
3533	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
3534	if (err) {
3535		if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
3536		    __func__, err);
3537		goto err_dealloc_transport_domain;
3538	}
3539	mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
3540
3541	/* check if we should generate a random MAC address */
3542	if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
3543	    is_zero_ether_addr(dev_addr)) {
3544		random_ether_addr(dev_addr);
3545		if_printf(ifp, "Assigned random MAC address\n");
3546	}
3547
3548	/* set default MTU */
3549	mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
3550
3551	/* Set desc */
3552	device_set_desc(mdev->pdev->dev.bsddev, mlx5e_version);
3553
3554	/* Set default media status */
3555	priv->media_status_last = IFM_AVALID;
3556	priv->media_active_last = IFM_ETHER | IFM_AUTO |
3557	    IFM_ETH_RXPAUSE | IFM_FDX;
3558
3559	/* setup default pauseframes configuration */
3560	mlx5e_setup_pauseframes(priv);
3561
3562	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
3563	if (err) {
3564		eth_proto_cap = 0;
3565		if_printf(ifp, "%s: Query port media capability failed, %d\n",
3566		    __func__, err);
3567	}
3568
3569	/* Setup supported medias */
3570	ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
3571	    mlx5e_media_change, mlx5e_media_status);
3572
3573	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
3574		if (mlx5e_mode_table[i].baudrate == 0)
3575			continue;
3576		if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
3577			ifmedia_add(&priv->media,
3578			    mlx5e_mode_table[i].subtype |
3579			    IFM_ETHER, 0, NULL);
3580			ifmedia_add(&priv->media,
3581			    mlx5e_mode_table[i].subtype |
3582			    IFM_ETHER | IFM_FDX |
3583			    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3584		}
3585	}
3586
3587	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3588	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3589	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3590
3591	/* Set autoselect by default */
3592	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3593	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
3594	ether_ifattach(ifp, dev_addr);
3595
3596	/* Register for VLAN events */
3597	priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
3598	    mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
3599	priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
3600	    mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
3601
3602	/* Link is down by default */
3603	if_link_state_change(ifp, LINK_STATE_DOWN);
3604
3605	mlx5e_enable_async_events(priv);
3606
3607	mlx5e_add_hw_stats(priv);
3608
3609	mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3610	    "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
3611	    priv->stats.vport.arg);
3612
3613	mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3614	    "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
3615	    priv->stats.pport.arg);
3616
3617	mlx5e_create_ethtool(priv);
3618
3619	mtx_lock(&priv->async_events_mtx);
3620	mlx5e_update_stats(priv);
3621	mtx_unlock(&priv->async_events_mtx);
3622
3623	return (priv);
3624
3625err_dealloc_transport_domain:
3626	mlx5_dealloc_transport_domain(mdev, priv->tdn);
3627
3628err_dealloc_pd:
3629	mlx5_core_dealloc_pd(mdev, priv->pdn);
3630
3631err_unmap_free_uar:
3632	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3633
3634err_free_wq:
3635	destroy_workqueue(priv->wq);
3636
3637err_free_sysctl:
3638	sysctl_ctx_free(&priv->sysctl_ctx);
3639	sysctl_ctx_free(&priv->sysctl_ctx_channel_debug);
3640
3641	if_free(ifp);
3642
3643err_free_priv:
3644	mlx5e_priv_mtx_destroy(priv);
3645	free(priv, M_MLX5EN);
3646	return (NULL);
3647}
3648
3649static void
3650mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
3651{
3652	struct mlx5e_priv *priv = vpriv;
3653	struct ifnet *ifp = priv->ifp;
3654
3655	/* don't allow more IOCTLs */
3656	priv->gone = 1;
3657
3658	/*
3659	 * Clear the device description to avoid use after free,
3660	 * because the bsddev is not destroyed when this module is
3661	 * unloaded:
3662	 */
3663	device_set_desc(mdev->pdev->dev.bsddev, NULL);
3664
3665	/* XXX wait a bit to allow IOCTL handlers to complete */
3666	pause("W", hz);
3667
3668	/* stop watchdog timer */
3669	callout_drain(&priv->watchdog);
3670
3671	if (priv->vlan_attach != NULL)
3672		EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
3673	if (priv->vlan_detach != NULL)
3674		EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
3675
3676	/* make sure device gets closed */
3677	PRIV_LOCK(priv);
3678	mlx5e_close_locked(ifp);
3679	PRIV_UNLOCK(priv);
3680
3681	/* unregister device */
3682	ifmedia_removeall(&priv->media);
3683	ether_ifdetach(ifp);
3684	if_free(ifp);
3685
3686	/* destroy all remaining sysctl nodes */
3687	if (priv->sysctl_debug) {
3688		sysctl_ctx_free(&priv->sysctl_ctx_channel_debug);
3689		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3690	}
3691	sysctl_ctx_free(&priv->stats.vport.ctx);
3692	sysctl_ctx_free(&priv->stats.pport.ctx);
3693	sysctl_ctx_free(&priv->sysctl_ctx);
3694
3695	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3696	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
3697	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3698	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3699	mlx5e_disable_async_events(priv);
3700	destroy_workqueue(priv->wq);
3701	mlx5e_priv_mtx_destroy(priv);
3702	free(priv, M_MLX5EN);
3703}
3704
3705static void *
3706mlx5e_get_ifp(void *vpriv)
3707{
3708	struct mlx5e_priv *priv = vpriv;
3709
3710	return (priv->ifp);
3711}
3712
3713static struct mlx5_interface mlx5e_interface = {
3714	.add = mlx5e_create_ifp,
3715	.remove = mlx5e_destroy_ifp,
3716	.event = mlx5e_async_event,
3717	.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3718	.get_dev = mlx5e_get_ifp,
3719};
3720
3721void
3722mlx5e_init(void)
3723{
3724	mlx5_register_interface(&mlx5e_interface);
3725}
3726
3727void
3728mlx5e_cleanup(void)
3729{
3730	mlx5_unregister_interface(&mlx5e_interface);
3731}
3732
3733module_init_order(mlx5e_init, SI_ORDER_THIRD);
3734module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
3735
3736#if (__FreeBSD_version >= 1100000)
3737MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
3738#endif
3739MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
3740MODULE_VERSION(mlx5en, 1);
3741