mlx5_en_main.c revision 347805
1/*-
2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 347805 2019-05-16 17:17:12Z hselasky $
26 */
27
28#include "en.h"
29
30#include <sys/sockio.h>
31#include <machine/atomic.h>
32
33#ifndef ETH_DRIVER_VERSION
34#define	ETH_DRIVER_VERSION	"3.5.0"
35#endif
36#define DRIVER_RELDATE	"November 2018"
37
38static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver "
39	ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
40
41static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
42
43struct mlx5e_channel_param {
44	struct mlx5e_rq_param rq;
45	struct mlx5e_sq_param sq;
46	struct mlx5e_cq_param rx_cq;
47	struct mlx5e_cq_param tx_cq;
48};
49
50static const struct {
51	u32	subtype;
52	u64	baudrate;
53}	mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
54
55	[MLX5E_1000BASE_CX_SGMII] = {
56		.subtype = IFM_1000_CX_SGMII,
57		.baudrate = IF_Mbps(1000ULL),
58	},
59	[MLX5E_1000BASE_KX] = {
60		.subtype = IFM_1000_KX,
61		.baudrate = IF_Mbps(1000ULL),
62	},
63	[MLX5E_10GBASE_CX4] = {
64		.subtype = IFM_10G_CX4,
65		.baudrate = IF_Gbps(10ULL),
66	},
67	[MLX5E_10GBASE_KX4] = {
68		.subtype = IFM_10G_KX4,
69		.baudrate = IF_Gbps(10ULL),
70	},
71	[MLX5E_10GBASE_KR] = {
72		.subtype = IFM_10G_KR,
73		.baudrate = IF_Gbps(10ULL),
74	},
75	[MLX5E_20GBASE_KR2] = {
76		.subtype = IFM_20G_KR2,
77		.baudrate = IF_Gbps(20ULL),
78	},
79	[MLX5E_40GBASE_CR4] = {
80		.subtype = IFM_40G_CR4,
81		.baudrate = IF_Gbps(40ULL),
82	},
83	[MLX5E_40GBASE_KR4] = {
84		.subtype = IFM_40G_KR4,
85		.baudrate = IF_Gbps(40ULL),
86	},
87	[MLX5E_56GBASE_R4] = {
88		.subtype = IFM_56G_R4,
89		.baudrate = IF_Gbps(56ULL),
90	},
91	[MLX5E_10GBASE_CR] = {
92		.subtype = IFM_10G_CR1,
93		.baudrate = IF_Gbps(10ULL),
94	},
95	[MLX5E_10GBASE_SR] = {
96		.subtype = IFM_10G_SR,
97		.baudrate = IF_Gbps(10ULL),
98	},
99	[MLX5E_10GBASE_ER] = {
100		.subtype = IFM_10G_ER,
101		.baudrate = IF_Gbps(10ULL),
102	},
103	[MLX5E_40GBASE_SR4] = {
104		.subtype = IFM_40G_SR4,
105		.baudrate = IF_Gbps(40ULL),
106	},
107	[MLX5E_40GBASE_LR4] = {
108		.subtype = IFM_40G_LR4,
109		.baudrate = IF_Gbps(40ULL),
110	},
111	[MLX5E_100GBASE_CR4] = {
112		.subtype = IFM_100G_CR4,
113		.baudrate = IF_Gbps(100ULL),
114	},
115	[MLX5E_100GBASE_SR4] = {
116		.subtype = IFM_100G_SR4,
117		.baudrate = IF_Gbps(100ULL),
118	},
119	[MLX5E_100GBASE_KR4] = {
120		.subtype = IFM_100G_KR4,
121		.baudrate = IF_Gbps(100ULL),
122	},
123	[MLX5E_100GBASE_LR4] = {
124		.subtype = IFM_100G_LR4,
125		.baudrate = IF_Gbps(100ULL),
126	},
127	[MLX5E_100BASE_TX] = {
128		.subtype = IFM_100_TX,
129		.baudrate = IF_Mbps(100ULL),
130	},
131	[MLX5E_1000BASE_T] = {
132		.subtype = IFM_1000_T,
133		.baudrate = IF_Mbps(1000ULL),
134	},
135	[MLX5E_10GBASE_T] = {
136		.subtype = IFM_10G_T,
137		.baudrate = IF_Gbps(10ULL),
138	},
139	[MLX5E_25GBASE_CR] = {
140		.subtype = IFM_25G_CR,
141		.baudrate = IF_Gbps(25ULL),
142	},
143	[MLX5E_25GBASE_KR] = {
144		.subtype = IFM_25G_KR,
145		.baudrate = IF_Gbps(25ULL),
146	},
147	[MLX5E_25GBASE_SR] = {
148		.subtype = IFM_25G_SR,
149		.baudrate = IF_Gbps(25ULL),
150	},
151	[MLX5E_50GBASE_CR2] = {
152		.subtype = IFM_50G_CR2,
153		.baudrate = IF_Gbps(50ULL),
154	},
155	[MLX5E_50GBASE_KR2] = {
156		.subtype = IFM_50G_KR2,
157		.baudrate = IF_Gbps(50ULL),
158	},
159};
160
161MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
162
163static void
164mlx5e_update_carrier(struct mlx5e_priv *priv)
165{
166	struct mlx5_core_dev *mdev = priv->mdev;
167	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
168	u32 eth_proto_oper;
169	int error;
170	u8 port_state;
171	u8 is_er_type;
172	u8 i;
173
174	port_state = mlx5_query_vport_state(mdev,
175	    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
176
177	if (port_state == VPORT_STATE_UP) {
178		priv->media_status_last |= IFM_ACTIVE;
179	} else {
180		priv->media_status_last &= ~IFM_ACTIVE;
181		priv->media_active_last = IFM_ETHER;
182		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
183		return;
184	}
185
186	error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
187	if (error) {
188		priv->media_active_last = IFM_ETHER;
189		priv->ifp->if_baudrate = 1;
190		if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
191		    __func__, error);
192		return;
193	}
194	eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
195
196	for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
197		if (mlx5e_mode_table[i].baudrate == 0)
198			continue;
199		if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
200			u32 subtype = mlx5e_mode_table[i].subtype;
201
202			priv->ifp->if_baudrate =
203			    mlx5e_mode_table[i].baudrate;
204
205			switch (subtype) {
206			case IFM_10G_ER:
207				error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
208				if (error != 0) {
209					if_printf(priv->ifp, "%s: query port pddr failed: %d\n",
210					    __func__, error);
211				}
212				if (error != 0 || is_er_type == 0)
213					subtype = IFM_10G_LR;
214				break;
215			case IFM_40G_LR4:
216				error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
217				if (error != 0) {
218					if_printf(priv->ifp, "%s: query port pddr failed: %d\n",
219					    __func__, error);
220				}
221				if (error == 0 && is_er_type != 0)
222					subtype = IFM_40G_ER4;
223				break;
224			}
225			priv->media_active_last = subtype | IFM_ETHER | IFM_FDX;
226			break;
227		}
228	}
229	if_link_state_change(priv->ifp, LINK_STATE_UP);
230}
231
232static void
233mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
234{
235	struct mlx5e_priv *priv = dev->if_softc;
236
237	ifmr->ifm_status = priv->media_status_last;
238	ifmr->ifm_active = priv->media_active_last |
239	    (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
240	    (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
241
242}
243
244static u32
245mlx5e_find_link_mode(u32 subtype)
246{
247	u32 i;
248	u32 link_mode = 0;
249
250	switch (subtype) {
251	case IFM_10G_LR:
252		subtype = IFM_10G_ER;
253		break;
254	case IFM_40G_ER4:
255		subtype = IFM_40G_LR4;
256		break;
257	}
258
259	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
260		if (mlx5e_mode_table[i].baudrate == 0)
261			continue;
262		if (mlx5e_mode_table[i].subtype == subtype)
263			link_mode |= MLX5E_PROT_MASK(i);
264	}
265
266	return (link_mode);
267}
268
269static int
270mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
271{
272	return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
273	    priv->params.rx_pauseframe_control,
274	    priv->params.tx_pauseframe_control,
275	    priv->params.rx_priority_flow_control,
276	    priv->params.tx_priority_flow_control));
277}
278
279static int
280mlx5e_set_port_pfc(struct mlx5e_priv *priv)
281{
282	int error;
283
284	if (priv->params.rx_pauseframe_control ||
285	    priv->params.tx_pauseframe_control) {
286		if_printf(priv->ifp,
287		    "Global pauseframes must be disabled before enabling PFC.\n");
288		error = -EINVAL;
289	} else {
290		error = mlx5e_set_port_pause_and_pfc(priv);
291	}
292	return (error);
293}
294
295static int
296mlx5e_media_change(struct ifnet *dev)
297{
298	struct mlx5e_priv *priv = dev->if_softc;
299	struct mlx5_core_dev *mdev = priv->mdev;
300	u32 eth_proto_cap;
301	u32 link_mode;
302	int was_opened;
303	int locked;
304	int error;
305
306	locked = PRIV_LOCKED(priv);
307	if (!locked)
308		PRIV_LOCK(priv);
309
310	if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
311		error = EINVAL;
312		goto done;
313	}
314	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
315
316	/* query supported capabilities */
317	error = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
318	if (error != 0) {
319		if_printf(dev, "Query port media capability failed\n");
320		goto done;
321	}
322	/* check for autoselect */
323	if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
324		link_mode = eth_proto_cap;
325		if (link_mode == 0) {
326			if_printf(dev, "Port media capability is zero\n");
327			error = EINVAL;
328			goto done;
329		}
330	} else {
331		link_mode = link_mode & eth_proto_cap;
332		if (link_mode == 0) {
333			if_printf(dev, "Not supported link mode requested\n");
334			error = EINVAL;
335			goto done;
336		}
337	}
338	if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
339		/* check if PFC is enabled */
340		if (priv->params.rx_priority_flow_control ||
341		    priv->params.tx_priority_flow_control) {
342			if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n");
343			error = EINVAL;
344			goto done;
345		}
346	}
347	/* update pauseframe control bits */
348	priv->params.rx_pauseframe_control =
349	    (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
350	priv->params.tx_pauseframe_control =
351	    (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
352
353	/* check if device is opened */
354	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
355
356	/* reconfigure the hardware */
357	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
358	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
359	error = -mlx5e_set_port_pause_and_pfc(priv);
360	if (was_opened)
361		mlx5_set_port_status(mdev, MLX5_PORT_UP);
362
363done:
364	if (!locked)
365		PRIV_UNLOCK(priv);
366	return (error);
367}
368
369static void
370mlx5e_update_carrier_work(struct work_struct *work)
371{
372	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
373	    update_carrier_work);
374
375	PRIV_LOCK(priv);
376	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
377		mlx5e_update_carrier(priv);
378	PRIV_UNLOCK(priv);
379}
380
381/*
382 * This function reads the physical port counters from the firmware
383 * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
384 * macros. The output is converted from big-endian 64-bit values into
385 * host endian ones and stored in the "priv->stats.pport" structure.
386 */
387static void
388mlx5e_update_pport_counters(struct mlx5e_priv *priv)
389{
390	struct mlx5_core_dev *mdev = priv->mdev;
391	struct mlx5e_pport_stats *s = &priv->stats.pport;
392	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
393	u32 *in;
394	u32 *out;
395	const u64 *ptr;
396	unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
397	unsigned x;
398	unsigned y;
399	unsigned z;
400
401	/* allocate firmware request structures */
402	in = mlx5_vzalloc(sz);
403	out = mlx5_vzalloc(sz);
404	if (in == NULL || out == NULL)
405		goto free_out;
406
407	/*
408	 * Get pointer to the 64-bit counter set which is located at a
409	 * fixed offset in the output firmware request structure:
410	 */
411	ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
412
413	MLX5_SET(ppcnt_reg, in, local_port, 1);
414
415	/* read IEEE802_3 counter group using predefined counter layout */
416	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
417	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
418	for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
419	     x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
420		s->arg[y] = be64toh(ptr[x]);
421
422	/* read RFC2819 counter group using predefined counter layout */
423	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
424	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
425	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
426		s->arg[y] = be64toh(ptr[x]);
427	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
428	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
429		s_debug->arg[y] = be64toh(ptr[x]);
430
431	/* read RFC2863 counter group using predefined counter layout */
432	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
433	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
434	for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
435		s_debug->arg[y] = be64toh(ptr[x]);
436
437	/* read physical layer stats counter group using predefined counter layout */
438	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
439	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
440	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
441		s_debug->arg[y] = be64toh(ptr[x]);
442
443	/* read Extended Ethernet counter group using predefined counter layout */
444	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
445	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
446	for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++)
447		s_debug->arg[y] = be64toh(ptr[x]);
448
449	/* read per-priority counters */
450	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
451
452	/* iterate all the priorities */
453	for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
454		MLX5_SET(ppcnt_reg, in, prio_tc, z);
455		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
456
457		/* read per priority stats counter group using predefined counter layout */
458		for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
459		    MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
460			s->arg[y] = be64toh(ptr[x]);
461	}
462
463free_out:
464	/* free firmware request structures */
465	kvfree(in);
466	kvfree(out);
467}
468
469/*
470 * This function is called regularly to collect all statistics
471 * counters from the firmware. The values can be viewed through the
472 * sysctl interface. Execution is serialized using the priv's global
473 * configuration lock.
474 */
475static void
476mlx5e_update_stats_work(struct work_struct *work)
477{
478	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
479	    update_stats_work);
480	struct mlx5_core_dev *mdev = priv->mdev;
481	struct mlx5e_vport_stats *s = &priv->stats.vport;
482	struct mlx5e_sq_stats *sq_stats;
483	struct buf_ring *sq_br;
484#if (__FreeBSD_version < 1100000)
485	struct ifnet *ifp = priv->ifp;
486#endif
487
488	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
489	u32 *out;
490	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
491	u64 tso_packets = 0;
492	u64 tso_bytes = 0;
493	u64 tx_queue_dropped = 0;
494	u64 tx_defragged = 0;
495	u64 tx_offload_none = 0;
496	u64 lro_packets = 0;
497	u64 lro_bytes = 0;
498	u64 sw_lro_queued = 0;
499	u64 sw_lro_flushed = 0;
500	u64 rx_csum_none = 0;
501	u64 rx_wqe_err = 0;
502	u32 rx_out_of_buffer = 0;
503	int i;
504	int j;
505
506	PRIV_LOCK(priv);
507	out = mlx5_vzalloc(outlen);
508	if (out == NULL)
509		goto free_out;
510	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
511		goto free_out;
512
513	/* Collect firts the SW counters and then HW for consistency */
514	for (i = 0; i < priv->params.num_channels; i++) {
515		struct mlx5e_channel *pch = priv->channel + i;
516		struct mlx5e_rq *rq = &pch->rq;
517		struct mlx5e_rq_stats *rq_stats = &pch->rq.stats;
518
519		/* collect stats from LRO */
520		rq_stats->sw_lro_queued = rq->lro.lro_queued;
521		rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
522		sw_lro_queued += rq_stats->sw_lro_queued;
523		sw_lro_flushed += rq_stats->sw_lro_flushed;
524		lro_packets += rq_stats->lro_packets;
525		lro_bytes += rq_stats->lro_bytes;
526		rx_csum_none += rq_stats->csum_none;
527		rx_wqe_err += rq_stats->wqe_err;
528
529		for (j = 0; j < priv->num_tc; j++) {
530			sq_stats = &pch->sq[j].stats;
531			sq_br = pch->sq[j].br;
532
533			tso_packets += sq_stats->tso_packets;
534			tso_bytes += sq_stats->tso_bytes;
535			tx_queue_dropped += sq_stats->dropped;
536			if (sq_br != NULL)
537				tx_queue_dropped += sq_br->br_drops;
538			tx_defragged += sq_stats->defragged;
539			tx_offload_none += sq_stats->csum_offload_none;
540		}
541	}
542
543	/* update counters */
544	s->tso_packets = tso_packets;
545	s->tso_bytes = tso_bytes;
546	s->tx_queue_dropped = tx_queue_dropped;
547	s->tx_defragged = tx_defragged;
548	s->lro_packets = lro_packets;
549	s->lro_bytes = lro_bytes;
550	s->sw_lro_queued = sw_lro_queued;
551	s->sw_lro_flushed = sw_lro_flushed;
552	s->rx_csum_none = rx_csum_none;
553	s->rx_wqe_err = rx_wqe_err;
554
555	/* HW counters */
556	memset(in, 0, sizeof(in));
557
558	MLX5_SET(query_vport_counter_in, in, opcode,
559	    MLX5_CMD_OP_QUERY_VPORT_COUNTER);
560	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
561	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
562
563	memset(out, 0, outlen);
564
565	/* get number of out-of-buffer drops first */
566	if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
567	    &rx_out_of_buffer))
568		goto free_out;
569
570	/* accumulate difference into a 64-bit counter */
571	s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev);
572	s->rx_out_of_buffer_prev = rx_out_of_buffer;
573
574	/* get port statistics */
575	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
576		goto free_out;
577
578#define	MLX5_GET_CTR(out, x) \
579	MLX5_GET64(query_vport_counter_out, out, x)
580
581	s->rx_error_packets =
582	    MLX5_GET_CTR(out, received_errors.packets);
583	s->rx_error_bytes =
584	    MLX5_GET_CTR(out, received_errors.octets);
585	s->tx_error_packets =
586	    MLX5_GET_CTR(out, transmit_errors.packets);
587	s->tx_error_bytes =
588	    MLX5_GET_CTR(out, transmit_errors.octets);
589
590	s->rx_unicast_packets =
591	    MLX5_GET_CTR(out, received_eth_unicast.packets);
592	s->rx_unicast_bytes =
593	    MLX5_GET_CTR(out, received_eth_unicast.octets);
594	s->tx_unicast_packets =
595	    MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
596	s->tx_unicast_bytes =
597	    MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
598
599	s->rx_multicast_packets =
600	    MLX5_GET_CTR(out, received_eth_multicast.packets);
601	s->rx_multicast_bytes =
602	    MLX5_GET_CTR(out, received_eth_multicast.octets);
603	s->tx_multicast_packets =
604	    MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
605	s->tx_multicast_bytes =
606	    MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
607
608	s->rx_broadcast_packets =
609	    MLX5_GET_CTR(out, received_eth_broadcast.packets);
610	s->rx_broadcast_bytes =
611	    MLX5_GET_CTR(out, received_eth_broadcast.octets);
612	s->tx_broadcast_packets =
613	    MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
614	s->tx_broadcast_bytes =
615	    MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
616
617	s->rx_packets =
618	    s->rx_unicast_packets +
619	    s->rx_multicast_packets +
620	    s->rx_broadcast_packets -
621	    s->rx_out_of_buffer;
622	s->rx_bytes =
623	    s->rx_unicast_bytes +
624	    s->rx_multicast_bytes +
625	    s->rx_broadcast_bytes;
626	s->tx_packets =
627	    s->tx_unicast_packets +
628	    s->tx_multicast_packets +
629	    s->tx_broadcast_packets;
630	s->tx_bytes =
631	    s->tx_unicast_bytes +
632	    s->tx_multicast_bytes +
633	    s->tx_broadcast_bytes;
634
635	/* Update calculated offload counters */
636	s->tx_csum_offload = s->tx_packets - tx_offload_none;
637	s->rx_csum_good = s->rx_packets - s->rx_csum_none;
638
639	/* Get physical port counters */
640	mlx5e_update_pport_counters(priv);
641
642	s->tx_jumbo_packets =
643	    priv->stats.port_stats_debug.tx_stat_p1519to2047octets +
644	    priv->stats.port_stats_debug.tx_stat_p2048to4095octets +
645	    priv->stats.port_stats_debug.tx_stat_p4096to8191octets +
646	    priv->stats.port_stats_debug.tx_stat_p8192to10239octets;
647
648#if (__FreeBSD_version < 1100000)
649	/* no get_counters interface in fbsd 10 */
650	ifp->if_ipackets = s->rx_packets;
651	ifp->if_ierrors = s->rx_error_packets +
652	    priv->stats.pport.alignment_err +
653	    priv->stats.pport.check_seq_err +
654	    priv->stats.pport.crc_align_errors +
655	    priv->stats.pport.in_range_len_errors +
656	    priv->stats.pport.jabbers +
657	    priv->stats.pport.out_of_range_len +
658	    priv->stats.pport.oversize_pkts +
659	    priv->stats.pport.symbol_err +
660	    priv->stats.pport.too_long_errors +
661	    priv->stats.pport.undersize_pkts +
662	    priv->stats.pport.unsupported_op_rx;
663	ifp->if_iqdrops = s->rx_out_of_buffer +
664	    priv->stats.pport.drop_events;
665	ifp->if_opackets = s->tx_packets;
666	ifp->if_oerrors = s->tx_error_packets;
667	ifp->if_snd.ifq_drops = s->tx_queue_dropped;
668	ifp->if_ibytes = s->rx_bytes;
669	ifp->if_obytes = s->tx_bytes;
670	ifp->if_collisions =
671	    priv->stats.pport.collisions;
672#endif
673
674free_out:
675	kvfree(out);
676
677	/* Update diagnostics, if any */
678	if (priv->params_ethtool.diag_pci_enable ||
679	    priv->params_ethtool.diag_general_enable) {
680		int error = mlx5_core_get_diagnostics_full(mdev,
681		    priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
682		    priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
683		if (error != 0)
684			if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error);
685	}
686	PRIV_UNLOCK(priv);
687}
688
689static void
690mlx5e_update_stats(void *arg)
691{
692	struct mlx5e_priv *priv = arg;
693
694	queue_work(priv->wq, &priv->update_stats_work);
695
696	callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
697}
698
699static void
700mlx5e_async_event_sub(struct mlx5e_priv *priv,
701    enum mlx5_dev_event event)
702{
703	switch (event) {
704	case MLX5_DEV_EVENT_PORT_UP:
705	case MLX5_DEV_EVENT_PORT_DOWN:
706		queue_work(priv->wq, &priv->update_carrier_work);
707		break;
708
709	default:
710		break;
711	}
712}
713
714static void
715mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
716    enum mlx5_dev_event event, unsigned long param)
717{
718	struct mlx5e_priv *priv = vpriv;
719
720	mtx_lock(&priv->async_events_mtx);
721	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
722		mlx5e_async_event_sub(priv, event);
723	mtx_unlock(&priv->async_events_mtx);
724}
725
726static void
727mlx5e_enable_async_events(struct mlx5e_priv *priv)
728{
729	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
730}
731
732static void
733mlx5e_disable_async_events(struct mlx5e_priv *priv)
734{
735	mtx_lock(&priv->async_events_mtx);
736	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
737	mtx_unlock(&priv->async_events_mtx);
738}
739
740static const char *mlx5e_rq_stats_desc[] = {
741	MLX5E_RQ_STATS(MLX5E_STATS_DESC)
742};
743
744static int
745mlx5e_create_rq(struct mlx5e_channel *c,
746    struct mlx5e_rq_param *param,
747    struct mlx5e_rq *rq)
748{
749	struct mlx5e_priv *priv = c->priv;
750	struct mlx5_core_dev *mdev = priv->mdev;
751	char buffer[16];
752	void *rqc = param->rqc;
753	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
754	int wq_sz;
755	int err;
756	int i;
757	u32 nsegs, wqe_sz;
758
759	err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
760	if (err != 0)
761		goto done;
762
763	/* Create DMA descriptor TAG */
764	if ((err = -bus_dma_tag_create(
765	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
766	    1,				/* any alignment */
767	    0,				/* no boundary */
768	    BUS_SPACE_MAXADDR,		/* lowaddr */
769	    BUS_SPACE_MAXADDR,		/* highaddr */
770	    NULL, NULL,			/* filter, filterarg */
771	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsize */
772	    nsegs,			/* nsegments */
773	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsegsize */
774	    0,				/* flags */
775	    NULL, NULL,			/* lockfunc, lockfuncarg */
776	    &rq->dma_tag)))
777		goto done;
778
779	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
780	    &rq->wq_ctrl);
781	if (err)
782		goto err_free_dma_tag;
783
784	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
785
786	err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
787	if (err != 0)
788		goto err_rq_wq_destroy;
789
790	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
791
792	err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz);
793	if (err)
794		goto err_rq_wq_destroy;
795
796	rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
797	for (i = 0; i != wq_sz; i++) {
798		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
799#if (MLX5E_MAX_RX_SEGS == 1)
800		uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
801#else
802		int j;
803#endif
804
805		err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
806		if (err != 0) {
807			while (i--)
808				bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
809			goto err_rq_mbuf_free;
810		}
811
812		/* set value for constant fields */
813#if (MLX5E_MAX_RX_SEGS == 1)
814		wqe->data[0].lkey = c->mkey_be;
815		wqe->data[0].byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
816#else
817		for (j = 0; j < rq->nsegs; j++)
818			wqe->data[j].lkey = c->mkey_be;
819#endif
820	}
821
822	INIT_WORK(&rq->dim.work, mlx5e_dim_work);
823	if (priv->params.rx_cq_moderation_mode < 2) {
824		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
825	} else {
826		void *cqc = container_of(param,
827		    struct mlx5e_channel_param, rq)->rx_cq.cqc;
828
829		switch (MLX5_GET(cqc, cqc, cq_period_mode)) {
830		case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
831			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
832			break;
833		case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
834			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
835			break;
836		default:
837			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
838			break;
839		}
840	}
841
842	rq->ifp = c->ifp;
843	rq->channel = c;
844	rq->ix = c->ix;
845
846	snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
847	mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
848	    buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
849	    rq->stats.arg);
850	return (0);
851
852err_rq_mbuf_free:
853	free(rq->mbuf, M_MLX5EN);
854	tcp_lro_free(&rq->lro);
855err_rq_wq_destroy:
856	mlx5_wq_destroy(&rq->wq_ctrl);
857err_free_dma_tag:
858	bus_dma_tag_destroy(rq->dma_tag);
859done:
860	return (err);
861}
862
863static void
864mlx5e_destroy_rq(struct mlx5e_rq *rq)
865{
866	int wq_sz;
867	int i;
868
869	/* destroy all sysctl nodes */
870	sysctl_ctx_free(&rq->stats.ctx);
871
872	/* free leftover LRO packets, if any */
873	tcp_lro_free(&rq->lro);
874
875	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
876	for (i = 0; i != wq_sz; i++) {
877		if (rq->mbuf[i].mbuf != NULL) {
878			bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
879			m_freem(rq->mbuf[i].mbuf);
880		}
881		bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
882	}
883	free(rq->mbuf, M_MLX5EN);
884	mlx5_wq_destroy(&rq->wq_ctrl);
885}
886
887static int
888mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
889{
890	struct mlx5e_channel *c = rq->channel;
891	struct mlx5e_priv *priv = c->priv;
892	struct mlx5_core_dev *mdev = priv->mdev;
893
894	void *in;
895	void *rqc;
896	void *wq;
897	int inlen;
898	int err;
899
900	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
901	    sizeof(u64) * rq->wq_ctrl.buf.npages;
902	in = mlx5_vzalloc(inlen);
903	if (in == NULL)
904		return (-ENOMEM);
905
906	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
907	wq = MLX5_ADDR_OF(rqc, rqc, wq);
908
909	memcpy(rqc, param->rqc, sizeof(param->rqc));
910
911	MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
912	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
913	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
914	if (priv->counter_set_id >= 0)
915		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
916	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
917	    PAGE_SHIFT);
918	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
919
920	mlx5_fill_page_array(&rq->wq_ctrl.buf,
921	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
922
923	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
924
925	kvfree(in);
926
927	return (err);
928}
929
930static int
931mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
932{
933	struct mlx5e_channel *c = rq->channel;
934	struct mlx5e_priv *priv = c->priv;
935	struct mlx5_core_dev *mdev = priv->mdev;
936
937	void *in;
938	void *rqc;
939	int inlen;
940	int err;
941
942	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
943	in = mlx5_vzalloc(inlen);
944	if (in == NULL)
945		return (-ENOMEM);
946
947	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
948
949	MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
950	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
951	MLX5_SET(rqc, rqc, state, next_state);
952
953	err = mlx5_core_modify_rq(mdev, in, inlen);
954
955	kvfree(in);
956
957	return (err);
958}
959
960static void
961mlx5e_disable_rq(struct mlx5e_rq *rq)
962{
963	struct mlx5e_channel *c = rq->channel;
964	struct mlx5e_priv *priv = c->priv;
965	struct mlx5_core_dev *mdev = priv->mdev;
966
967	mlx5_core_destroy_rq(mdev, rq->rqn);
968}
969
970static int
971mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
972{
973	struct mlx5e_channel *c = rq->channel;
974	struct mlx5e_priv *priv = c->priv;
975	struct mlx5_wq_ll *wq = &rq->wq;
976	int i;
977
978	for (i = 0; i < 1000; i++) {
979		if (wq->cur_sz >= priv->params.min_rx_wqes)
980			return (0);
981
982		msleep(4);
983	}
984	return (-ETIMEDOUT);
985}
986
987static int
988mlx5e_open_rq(struct mlx5e_channel *c,
989    struct mlx5e_rq_param *param,
990    struct mlx5e_rq *rq)
991{
992	int err;
993
994	err = mlx5e_create_rq(c, param, rq);
995	if (err)
996		return (err);
997
998	err = mlx5e_enable_rq(rq, param);
999	if (err)
1000		goto err_destroy_rq;
1001
1002	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1003	if (err)
1004		goto err_disable_rq;
1005
1006	c->rq.enabled = 1;
1007
1008	return (0);
1009
1010err_disable_rq:
1011	mlx5e_disable_rq(rq);
1012err_destroy_rq:
1013	mlx5e_destroy_rq(rq);
1014
1015	return (err);
1016}
1017
1018static void
1019mlx5e_close_rq(struct mlx5e_rq *rq)
1020{
1021	mtx_lock(&rq->mtx);
1022	rq->enabled = 0;
1023	callout_stop(&rq->watchdog);
1024	mtx_unlock(&rq->mtx);
1025
1026	callout_drain(&rq->watchdog);
1027
1028	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
1029}
1030
1031static void
1032mlx5e_close_rq_wait(struct mlx5e_rq *rq)
1033{
1034	struct mlx5_core_dev *mdev = rq->channel->priv->mdev;
1035
1036	/* wait till RQ is empty */
1037	while (!mlx5_wq_ll_is_empty(&rq->wq) &&
1038	       (mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
1039		msleep(4);
1040		rq->cq.mcq.comp(&rq->cq.mcq);
1041	}
1042
1043	cancel_work_sync(&rq->dim.work);
1044	mlx5e_disable_rq(rq);
1045	mlx5e_destroy_rq(rq);
1046}
1047
1048void
1049mlx5e_free_sq_db(struct mlx5e_sq *sq)
1050{
1051	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1052	int x;
1053
1054	for (x = 0; x != wq_sz; x++)
1055		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1056	free(sq->mbuf, M_MLX5EN);
1057}
1058
1059int
1060mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
1061{
1062	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1063	int err;
1064	int x;
1065
1066	sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1067
1068	/* Create DMA descriptor MAPs */
1069	for (x = 0; x != wq_sz; x++) {
1070		err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
1071		if (err != 0) {
1072			while (x--)
1073				bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1074			free(sq->mbuf, M_MLX5EN);
1075			return (err);
1076		}
1077	}
1078	return (0);
1079}
1080
1081static const char *mlx5e_sq_stats_desc[] = {
1082	MLX5E_SQ_STATS(MLX5E_STATS_DESC)
1083};
1084
1085void
1086mlx5e_update_sq_inline(struct mlx5e_sq *sq)
1087{
1088	sq->max_inline = sq->priv->params.tx_max_inline;
1089	sq->min_inline_mode = sq->priv->params.tx_min_inline_mode;
1090
1091	/*
1092	 * Check if trust state is DSCP or if inline mode is NONE which
1093	 * indicates CX-5 or newer hardware.
1094	 */
1095	if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP ||
1096	    sq->min_inline_mode == MLX5_INLINE_MODE_NONE) {
1097		if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert))
1098			sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN;
1099		else
1100			sq->min_insert_caps = MLX5E_INSERT_NON_VLAN;
1101	} else {
1102		sq->min_insert_caps = 0;
1103	}
1104}
1105
1106static void
1107mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1108{
1109	int i;
1110
1111	for (i = 0; i != c->num_tc; i++) {
1112		mtx_lock(&c->sq[i].lock);
1113		mlx5e_update_sq_inline(&c->sq[i]);
1114		mtx_unlock(&c->sq[i].lock);
1115	}
1116}
1117
1118void
1119mlx5e_refresh_sq_inline(struct mlx5e_priv *priv)
1120{
1121	int i;
1122
1123	/* check if channels are closed */
1124	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
1125		return;
1126
1127	for (i = 0; i < priv->params.num_channels; i++)
1128		mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]);
1129}
1130
1131static int
1132mlx5e_create_sq(struct mlx5e_channel *c,
1133    int tc,
1134    struct mlx5e_sq_param *param,
1135    struct mlx5e_sq *sq)
1136{
1137	struct mlx5e_priv *priv = c->priv;
1138	struct mlx5_core_dev *mdev = priv->mdev;
1139	char buffer[16];
1140	void *sqc = param->sqc;
1141	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
1142	int err;
1143
1144	/* Create DMA descriptor TAG */
1145	if ((err = -bus_dma_tag_create(
1146	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1147	    1,				/* any alignment */
1148	    0,				/* no boundary */
1149	    BUS_SPACE_MAXADDR,		/* lowaddr */
1150	    BUS_SPACE_MAXADDR,		/* highaddr */
1151	    NULL, NULL,			/* filter, filterarg */
1152	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
1153	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
1154	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
1155	    0,				/* flags */
1156	    NULL, NULL,			/* lockfunc, lockfuncarg */
1157	    &sq->dma_tag)))
1158		goto done;
1159
1160	err = mlx5_alloc_map_uar(mdev, &sq->uar);
1161	if (err)
1162		goto err_free_dma_tag;
1163
1164	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1165	    &sq->wq_ctrl);
1166	if (err)
1167		goto err_unmap_free_uar;
1168
1169	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1170	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1171
1172	err = mlx5e_alloc_sq_db(sq);
1173	if (err)
1174		goto err_sq_wq_destroy;
1175
1176	sq->mkey_be = c->mkey_be;
1177	sq->ifp = priv->ifp;
1178	sq->priv = priv;
1179	sq->tc = tc;
1180
1181	mlx5e_update_sq_inline(sq);
1182
1183	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1184	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1185	    buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1186	    sq->stats.arg);
1187
1188	return (0);
1189
1190err_sq_wq_destroy:
1191	mlx5_wq_destroy(&sq->wq_ctrl);
1192
1193err_unmap_free_uar:
1194	mlx5_unmap_free_uar(mdev, &sq->uar);
1195
1196err_free_dma_tag:
1197	bus_dma_tag_destroy(sq->dma_tag);
1198done:
1199	return (err);
1200}
1201
1202static void
1203mlx5e_destroy_sq(struct mlx5e_sq *sq)
1204{
1205	/* destroy all sysctl nodes */
1206	sysctl_ctx_free(&sq->stats.ctx);
1207
1208	mlx5e_free_sq_db(sq);
1209	mlx5_wq_destroy(&sq->wq_ctrl);
1210	mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1211}
1212
1213int
1214mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1215    int tis_num)
1216{
1217	void *in;
1218	void *sqc;
1219	void *wq;
1220	int inlen;
1221	int err;
1222
1223	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1224	    sizeof(u64) * sq->wq_ctrl.buf.npages;
1225	in = mlx5_vzalloc(inlen);
1226	if (in == NULL)
1227		return (-ENOMEM);
1228
1229	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1230	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1231
1232	memcpy(sqc, param->sqc, sizeof(param->sqc));
1233
1234	MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1235	MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1236	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1237	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1238	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1239
1240	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1241	MLX5_SET(wq, wq, uar_page, sq->uar.index);
1242	MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1243	    PAGE_SHIFT);
1244	MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1245
1246	mlx5_fill_page_array(&sq->wq_ctrl.buf,
1247	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1248
1249	err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1250
1251	kvfree(in);
1252
1253	return (err);
1254}
1255
1256int
1257mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1258{
1259	void *in;
1260	void *sqc;
1261	int inlen;
1262	int err;
1263
1264	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1265	in = mlx5_vzalloc(inlen);
1266	if (in == NULL)
1267		return (-ENOMEM);
1268
1269	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1270
1271	MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1272	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1273	MLX5_SET(sqc, sqc, state, next_state);
1274
1275	err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1276
1277	kvfree(in);
1278
1279	return (err);
1280}
1281
1282void
1283mlx5e_disable_sq(struct mlx5e_sq *sq)
1284{
1285
1286	mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1287}
1288
1289static int
1290mlx5e_open_sq(struct mlx5e_channel *c,
1291    int tc,
1292    struct mlx5e_sq_param *param,
1293    struct mlx5e_sq *sq)
1294{
1295	int err;
1296
1297	err = mlx5e_create_sq(c, tc, param, sq);
1298	if (err)
1299		return (err);
1300
1301	err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1302	if (err)
1303		goto err_destroy_sq;
1304
1305	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1306	if (err)
1307		goto err_disable_sq;
1308
1309	WRITE_ONCE(sq->running, 1);
1310
1311	return (0);
1312
1313err_disable_sq:
1314	mlx5e_disable_sq(sq);
1315err_destroy_sq:
1316	mlx5e_destroy_sq(sq);
1317
1318	return (err);
1319}
1320
1321static void
1322mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1323{
1324	/* fill up remainder with NOPs */
1325	while (sq->cev_counter != 0) {
1326		while (!mlx5e_sq_has_room_for(sq, 1)) {
1327			if (can_sleep != 0) {
1328				mtx_unlock(&sq->lock);
1329				msleep(4);
1330				mtx_lock(&sq->lock);
1331			} else {
1332				goto done;
1333			}
1334		}
1335		/* send a single NOP */
1336		mlx5e_send_nop(sq, 1);
1337		atomic_thread_fence_rel();
1338	}
1339done:
1340	/* Check if we need to write the doorbell */
1341	if (likely(sq->doorbell.d64 != 0)) {
1342		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1343		sq->doorbell.d64 = 0;
1344	}
1345}
1346
1347void
1348mlx5e_sq_cev_timeout(void *arg)
1349{
1350	struct mlx5e_sq *sq = arg;
1351
1352	mtx_assert(&sq->lock, MA_OWNED);
1353
1354	/* check next state */
1355	switch (sq->cev_next_state) {
1356	case MLX5E_CEV_STATE_SEND_NOPS:
1357		/* fill TX ring with NOPs, if any */
1358		mlx5e_sq_send_nops_locked(sq, 0);
1359
1360		/* check if completed */
1361		if (sq->cev_counter == 0) {
1362			sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1363			return;
1364		}
1365		break;
1366	default:
1367		/* send NOPs on next timeout */
1368		sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1369		break;
1370	}
1371
1372	/* restart timer */
1373	callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1374}
1375
1376void
1377mlx5e_drain_sq(struct mlx5e_sq *sq)
1378{
1379	int error;
1380	struct mlx5_core_dev *mdev= sq->priv->mdev;
1381
1382	/*
1383	 * Check if already stopped.
1384	 *
1385	 * NOTE: Serialization of this function is managed by the
1386	 * caller ensuring the priv's state lock is locked or in case
1387	 * of rate limit support, a single thread manages drain and
1388	 * resume of SQs. The "running" variable can therefore safely
1389	 * be read without any locks.
1390	 */
1391	if (READ_ONCE(sq->running) == 0)
1392		return;
1393
1394	/* don't put more packets into the SQ */
1395	WRITE_ONCE(sq->running, 0);
1396
1397	/* serialize access to DMA rings */
1398	mtx_lock(&sq->lock);
1399
1400	/* teardown event factor timer, if any */
1401	sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1402	callout_stop(&sq->cev_callout);
1403
1404	/* send dummy NOPs in order to flush the transmit ring */
1405	mlx5e_sq_send_nops_locked(sq, 1);
1406	mtx_unlock(&sq->lock);
1407
1408	/* make sure it is safe to free the callout */
1409	callout_drain(&sq->cev_callout);
1410
1411	/* wait till SQ is empty or link is down */
1412	mtx_lock(&sq->lock);
1413	while (sq->cc != sq->pc &&
1414	    (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1415	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1416		mtx_unlock(&sq->lock);
1417		msleep(1);
1418		sq->cq.mcq.comp(&sq->cq.mcq);
1419		mtx_lock(&sq->lock);
1420	}
1421	mtx_unlock(&sq->lock);
1422
1423	/* error out remaining requests */
1424	error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1425	if (error != 0) {
1426		if_printf(sq->ifp,
1427		    "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1428	}
1429
1430	/* wait till SQ is empty */
1431	mtx_lock(&sq->lock);
1432	while (sq->cc != sq->pc &&
1433	       mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1434		mtx_unlock(&sq->lock);
1435		msleep(1);
1436		sq->cq.mcq.comp(&sq->cq.mcq);
1437		mtx_lock(&sq->lock);
1438	}
1439	mtx_unlock(&sq->lock);
1440}
1441
1442static void
1443mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1444{
1445
1446	mlx5e_drain_sq(sq);
1447	mlx5e_disable_sq(sq);
1448	mlx5e_destroy_sq(sq);
1449}
1450
1451static int
1452mlx5e_create_cq(struct mlx5e_priv *priv,
1453    struct mlx5e_cq_param *param,
1454    struct mlx5e_cq *cq,
1455    mlx5e_cq_comp_t *comp,
1456    int eq_ix)
1457{
1458	struct mlx5_core_dev *mdev = priv->mdev;
1459	struct mlx5_core_cq *mcq = &cq->mcq;
1460	int eqn_not_used;
1461	int irqn;
1462	int err;
1463	u32 i;
1464
1465	param->wq.buf_numa_node = 0;
1466	param->wq.db_numa_node = 0;
1467
1468	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1469	    &cq->wq_ctrl);
1470	if (err)
1471		return (err);
1472
1473	mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1474
1475	mcq->cqe_sz = 64;
1476	mcq->set_ci_db = cq->wq_ctrl.db.db;
1477	mcq->arm_db = cq->wq_ctrl.db.db + 1;
1478	*mcq->set_ci_db = 0;
1479	*mcq->arm_db = 0;
1480	mcq->vector = eq_ix;
1481	mcq->comp = comp;
1482	mcq->event = mlx5e_cq_error_event;
1483	mcq->irqn = irqn;
1484	mcq->uar = &priv->cq_uar;
1485
1486	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1487		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1488
1489		cqe->op_own = 0xf1;
1490	}
1491
1492	cq->priv = priv;
1493
1494	return (0);
1495}
1496
1497static void
1498mlx5e_destroy_cq(struct mlx5e_cq *cq)
1499{
1500	mlx5_wq_destroy(&cq->wq_ctrl);
1501}
1502
1503static int
1504mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1505{
1506	struct mlx5_core_cq *mcq = &cq->mcq;
1507	void *in;
1508	void *cqc;
1509	int inlen;
1510	int irqn_not_used;
1511	int eqn;
1512	int err;
1513
1514	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1515	    sizeof(u64) * cq->wq_ctrl.buf.npages;
1516	in = mlx5_vzalloc(inlen);
1517	if (in == NULL)
1518		return (-ENOMEM);
1519
1520	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1521
1522	memcpy(cqc, param->cqc, sizeof(param->cqc));
1523
1524	mlx5_fill_page_array(&cq->wq_ctrl.buf,
1525	    (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1526
1527	mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1528
1529	MLX5_SET(cqc, cqc, c_eqn, eqn);
1530	MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1531	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1532	    PAGE_SHIFT);
1533	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1534
1535	err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
1536
1537	kvfree(in);
1538
1539	if (err)
1540		return (err);
1541
1542	mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
1543
1544	return (0);
1545}
1546
1547static void
1548mlx5e_disable_cq(struct mlx5e_cq *cq)
1549{
1550
1551	mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
1552}
1553
1554int
1555mlx5e_open_cq(struct mlx5e_priv *priv,
1556    struct mlx5e_cq_param *param,
1557    struct mlx5e_cq *cq,
1558    mlx5e_cq_comp_t *comp,
1559    int eq_ix)
1560{
1561	int err;
1562
1563	err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
1564	if (err)
1565		return (err);
1566
1567	err = mlx5e_enable_cq(cq, param, eq_ix);
1568	if (err)
1569		goto err_destroy_cq;
1570
1571	return (0);
1572
1573err_destroy_cq:
1574	mlx5e_destroy_cq(cq);
1575
1576	return (err);
1577}
1578
1579void
1580mlx5e_close_cq(struct mlx5e_cq *cq)
1581{
1582	mlx5e_disable_cq(cq);
1583	mlx5e_destroy_cq(cq);
1584}
1585
1586static int
1587mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1588    struct mlx5e_channel_param *cparam)
1589{
1590	int err;
1591	int tc;
1592
1593	for (tc = 0; tc < c->num_tc; tc++) {
1594		/* open completion queue */
1595		err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
1596		    &mlx5e_tx_cq_comp, c->ix);
1597		if (err)
1598			goto err_close_tx_cqs;
1599	}
1600	return (0);
1601
1602err_close_tx_cqs:
1603	for (tc--; tc >= 0; tc--)
1604		mlx5e_close_cq(&c->sq[tc].cq);
1605
1606	return (err);
1607}
1608
1609static void
1610mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1611{
1612	int tc;
1613
1614	for (tc = 0; tc < c->num_tc; tc++)
1615		mlx5e_close_cq(&c->sq[tc].cq);
1616}
1617
1618static int
1619mlx5e_open_sqs(struct mlx5e_channel *c,
1620    struct mlx5e_channel_param *cparam)
1621{
1622	int err;
1623	int tc;
1624
1625	for (tc = 0; tc < c->num_tc; tc++) {
1626		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1627		if (err)
1628			goto err_close_sqs;
1629	}
1630
1631	return (0);
1632
1633err_close_sqs:
1634	for (tc--; tc >= 0; tc--)
1635		mlx5e_close_sq_wait(&c->sq[tc]);
1636
1637	return (err);
1638}
1639
1640static void
1641mlx5e_close_sqs_wait(struct mlx5e_channel *c)
1642{
1643	int tc;
1644
1645	for (tc = 0; tc < c->num_tc; tc++)
1646		mlx5e_close_sq_wait(&c->sq[tc]);
1647}
1648
1649static void
1650mlx5e_chan_mtx_init(struct mlx5e_channel *c)
1651{
1652	int tc;
1653
1654	mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1655
1656	callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
1657
1658	for (tc = 0; tc < c->num_tc; tc++) {
1659		struct mlx5e_sq *sq = c->sq + tc;
1660
1661		mtx_init(&sq->lock, "mlx5tx",
1662		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1663		mtx_init(&sq->comp_lock, "mlx5comp",
1664		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1665
1666		callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
1667
1668		sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
1669
1670		/* ensure the TX completion event factor is not zero */
1671		if (sq->cev_factor == 0)
1672			sq->cev_factor = 1;
1673	}
1674}
1675
1676static void
1677mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
1678{
1679	int tc;
1680
1681	mtx_destroy(&c->rq.mtx);
1682
1683	for (tc = 0; tc < c->num_tc; tc++) {
1684		mtx_destroy(&c->sq[tc].lock);
1685		mtx_destroy(&c->sq[tc].comp_lock);
1686	}
1687}
1688
1689static int
1690mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1691    struct mlx5e_channel_param *cparam,
1692    struct mlx5e_channel *c)
1693{
1694	int err;
1695
1696	memset(c, 0, sizeof(*c));
1697
1698	c->priv = priv;
1699	c->ix = ix;
1700	c->ifp = priv->ifp;
1701	c->mkey_be = cpu_to_be32(priv->mr.key);
1702	c->num_tc = priv->num_tc;
1703
1704	/* init mutexes */
1705	mlx5e_chan_mtx_init(c);
1706
1707	/* open transmit completion queue */
1708	err = mlx5e_open_tx_cqs(c, cparam);
1709	if (err)
1710		goto err_free;
1711
1712	/* open receive completion queue */
1713	err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
1714	    &mlx5e_rx_cq_comp, c->ix);
1715	if (err)
1716		goto err_close_tx_cqs;
1717
1718	err = mlx5e_open_sqs(c, cparam);
1719	if (err)
1720		goto err_close_rx_cq;
1721
1722	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1723	if (err)
1724		goto err_close_sqs;
1725
1726	/* poll receive queue initially */
1727	c->rq.cq.mcq.comp(&c->rq.cq.mcq);
1728
1729	return (0);
1730
1731err_close_sqs:
1732	mlx5e_close_sqs_wait(c);
1733
1734err_close_rx_cq:
1735	mlx5e_close_cq(&c->rq.cq);
1736
1737err_close_tx_cqs:
1738	mlx5e_close_tx_cqs(c);
1739
1740err_free:
1741	/* destroy mutexes */
1742	mlx5e_chan_mtx_destroy(c);
1743	return (err);
1744}
1745
1746static void
1747mlx5e_close_channel(struct mlx5e_channel *c)
1748{
1749	mlx5e_close_rq(&c->rq);
1750}
1751
1752static void
1753mlx5e_close_channel_wait(struct mlx5e_channel *c)
1754{
1755	mlx5e_close_rq_wait(&c->rq);
1756	mlx5e_close_sqs_wait(c);
1757	mlx5e_close_cq(&c->rq.cq);
1758	mlx5e_close_tx_cqs(c);
1759	/* destroy mutexes */
1760	mlx5e_chan_mtx_destroy(c);
1761}
1762
1763static int
1764mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
1765{
1766	u32 r, n;
1767
1768	r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
1769	    MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
1770	if (r > MJUM16BYTES)
1771		return (-ENOMEM);
1772
1773	if (r > MJUM9BYTES)
1774		r = MJUM16BYTES;
1775	else if (r > MJUMPAGESIZE)
1776		r = MJUM9BYTES;
1777	else if (r > MCLBYTES)
1778		r = MJUMPAGESIZE;
1779	else
1780		r = MCLBYTES;
1781
1782	/*
1783	 * n + 1 must be a power of two, because stride size must be.
1784	 * Stride size is 16 * (n + 1), as the first segment is
1785	 * control.
1786	 */
1787	for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
1788		;
1789
1790	*wqe_sz = r;
1791	*nsegs = n;
1792	return (0);
1793}
1794
1795static void
1796mlx5e_build_rq_param(struct mlx5e_priv *priv,
1797    struct mlx5e_rq_param *param)
1798{
1799	void *rqc = param->rqc;
1800	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1801	u32 wqe_sz, nsegs;
1802
1803	mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
1804	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1805	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1806	MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
1807	    nsegs * sizeof(struct mlx5_wqe_data_seg)));
1808	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1809	MLX5_SET(wq, wq, pd, priv->pdn);
1810
1811	param->wq.buf_numa_node = 0;
1812	param->wq.db_numa_node = 0;
1813	param->wq.linear = 1;
1814}
1815
1816static void
1817mlx5e_build_sq_param(struct mlx5e_priv *priv,
1818    struct mlx5e_sq_param *param)
1819{
1820	void *sqc = param->sqc;
1821	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1822
1823	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1824	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1825	MLX5_SET(wq, wq, pd, priv->pdn);
1826
1827	param->wq.buf_numa_node = 0;
1828	param->wq.db_numa_node = 0;
1829	param->wq.linear = 1;
1830}
1831
1832static void
1833mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1834    struct mlx5e_cq_param *param)
1835{
1836	void *cqc = param->cqc;
1837
1838	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1839}
1840
1841static void
1842mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr)
1843{
1844
1845	*ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE);
1846
1847	/* apply LRO restrictions */
1848	if (priv->params.hw_lro_en &&
1849	    ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) {
1850		ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO;
1851	}
1852}
1853
1854static void
1855mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1856    struct mlx5e_cq_param *param)
1857{
1858	struct net_dim_cq_moder curr;
1859	void *cqc = param->cqc;
1860
1861
1862	/*
1863	 * TODO The sysctl to control on/off is a bool value for now, which means
1864	 * we only support CSUM, once HASH is implemnted we'll need to address that.
1865	 */
1866	if (priv->params.cqe_zipping_en) {
1867		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1868		MLX5_SET(cqc, cqc, cqe_compression_en, 1);
1869	}
1870
1871	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1872
1873	switch (priv->params.rx_cq_moderation_mode) {
1874	case 0:
1875		MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1876		MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1877		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1878		break;
1879	case 1:
1880		MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1881		MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1882		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1883			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1884		else
1885			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1886		break;
1887	case 2:
1888		mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr);
1889		MLX5_SET(cqc, cqc, cq_period, curr.usec);
1890		MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
1891		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1892		break;
1893	case 3:
1894		mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr);
1895		MLX5_SET(cqc, cqc, cq_period, curr.usec);
1896		MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
1897		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1898			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1899		else
1900			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1901		break;
1902	default:
1903		break;
1904	}
1905
1906	mlx5e_dim_build_cq_param(priv, param);
1907
1908	mlx5e_build_common_cq_param(priv, param);
1909}
1910
1911static void
1912mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1913    struct mlx5e_cq_param *param)
1914{
1915	void *cqc = param->cqc;
1916
1917	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1918	MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
1919	MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
1920
1921	switch (priv->params.tx_cq_moderation_mode) {
1922	case 0:
1923		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1924		break;
1925	default:
1926		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1927			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1928		else
1929			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1930		break;
1931	}
1932
1933	mlx5e_build_common_cq_param(priv, param);
1934}
1935
1936static void
1937mlx5e_build_channel_param(struct mlx5e_priv *priv,
1938    struct mlx5e_channel_param *cparam)
1939{
1940	memset(cparam, 0, sizeof(*cparam));
1941
1942	mlx5e_build_rq_param(priv, &cparam->rq);
1943	mlx5e_build_sq_param(priv, &cparam->sq);
1944	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1945	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1946}
1947
1948static int
1949mlx5e_open_channels(struct mlx5e_priv *priv)
1950{
1951	struct mlx5e_channel_param cparam;
1952	int err;
1953	int i;
1954	int j;
1955
1956	mlx5e_build_channel_param(priv, &cparam);
1957	for (i = 0; i < priv->params.num_channels; i++) {
1958		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1959		if (err)
1960			goto err_close_channels;
1961	}
1962
1963	for (j = 0; j < priv->params.num_channels; j++) {
1964		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq);
1965		if (err)
1966			goto err_close_channels;
1967	}
1968
1969	return (0);
1970
1971err_close_channels:
1972	while (i--) {
1973		mlx5e_close_channel(&priv->channel[i]);
1974		mlx5e_close_channel_wait(&priv->channel[i]);
1975	}
1976	return (err);
1977}
1978
1979static void
1980mlx5e_close_channels(struct mlx5e_priv *priv)
1981{
1982	int i;
1983
1984	for (i = 0; i < priv->params.num_channels; i++)
1985		mlx5e_close_channel(&priv->channel[i]);
1986	for (i = 0; i < priv->params.num_channels; i++)
1987		mlx5e_close_channel_wait(&priv->channel[i]);
1988}
1989
1990static int
1991mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
1992{
1993
1994	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1995		uint8_t cq_mode;
1996
1997		switch (priv->params.tx_cq_moderation_mode) {
1998		case 0:
1999		case 2:
2000			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2001			break;
2002		default:
2003			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2004			break;
2005		}
2006
2007		return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
2008		    priv->params.tx_cq_moderation_usec,
2009		    priv->params.tx_cq_moderation_pkts,
2010		    cq_mode));
2011	}
2012
2013	return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
2014	    priv->params.tx_cq_moderation_usec,
2015	    priv->params.tx_cq_moderation_pkts));
2016}
2017
2018static int
2019mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
2020{
2021
2022	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
2023		uint8_t cq_mode;
2024		uint8_t dim_mode;
2025		int retval;
2026
2027		switch (priv->params.rx_cq_moderation_mode) {
2028		case 0:
2029		case 2:
2030			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2031			dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2032			break;
2033		default:
2034			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2035			dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
2036			break;
2037		}
2038
2039		/* tear down dynamic interrupt moderation */
2040		mtx_lock(&rq->mtx);
2041		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
2042		mtx_unlock(&rq->mtx);
2043
2044		/* wait for dynamic interrupt moderation work task, if any */
2045		cancel_work_sync(&rq->dim.work);
2046
2047		if (priv->params.rx_cq_moderation_mode >= 2) {
2048			struct net_dim_cq_moder curr;
2049
2050			mlx5e_get_default_profile(priv, dim_mode, &curr);
2051
2052			retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2053			    curr.usec, curr.pkts, cq_mode);
2054
2055			/* set dynamic interrupt moderation mode and zero defaults */
2056			mtx_lock(&rq->mtx);
2057			rq->dim.mode = dim_mode;
2058			rq->dim.state = 0;
2059			rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE;
2060			mtx_unlock(&rq->mtx);
2061		} else {
2062			retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2063			    priv->params.rx_cq_moderation_usec,
2064			    priv->params.rx_cq_moderation_pkts,
2065			    cq_mode);
2066		}
2067		return (retval);
2068	}
2069
2070	return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
2071	    priv->params.rx_cq_moderation_usec,
2072	    priv->params.rx_cq_moderation_pkts));
2073}
2074
2075static int
2076mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
2077{
2078	int err;
2079	int i;
2080
2081	err = mlx5e_refresh_rq_params(priv, &c->rq);
2082	if (err)
2083		goto done;
2084
2085	for (i = 0; i != c->num_tc; i++) {
2086		err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
2087		if (err)
2088			goto done;
2089	}
2090done:
2091	return (err);
2092}
2093
2094int
2095mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
2096{
2097	int i;
2098
2099	/* check if channels are closed */
2100	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2101		return (EINVAL);
2102
2103	for (i = 0; i < priv->params.num_channels; i++) {
2104		int err;
2105
2106		err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]);
2107		if (err)
2108			return (err);
2109	}
2110	return (0);
2111}
2112
2113static int
2114mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
2115{
2116	struct mlx5_core_dev *mdev = priv->mdev;
2117	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
2118	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2119
2120	memset(in, 0, sizeof(in));
2121
2122	MLX5_SET(tisc, tisc, prio, tc);
2123	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
2124
2125	return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
2126}
2127
2128static void
2129mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
2130{
2131	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2132}
2133
2134static int
2135mlx5e_open_tises(struct mlx5e_priv *priv)
2136{
2137	int num_tc = priv->num_tc;
2138	int err;
2139	int tc;
2140
2141	for (tc = 0; tc < num_tc; tc++) {
2142		err = mlx5e_open_tis(priv, tc);
2143		if (err)
2144			goto err_close_tises;
2145	}
2146
2147	return (0);
2148
2149err_close_tises:
2150	for (tc--; tc >= 0; tc--)
2151		mlx5e_close_tis(priv, tc);
2152
2153	return (err);
2154}
2155
2156static void
2157mlx5e_close_tises(struct mlx5e_priv *priv)
2158{
2159	int num_tc = priv->num_tc;
2160	int tc;
2161
2162	for (tc = 0; tc < num_tc; tc++)
2163		mlx5e_close_tis(priv, tc);
2164}
2165
2166static int
2167mlx5e_open_rqt(struct mlx5e_priv *priv)
2168{
2169	struct mlx5_core_dev *mdev = priv->mdev;
2170	u32 *in;
2171	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
2172	void *rqtc;
2173	int inlen;
2174	int err;
2175	int sz;
2176	int i;
2177
2178	sz = 1 << priv->params.rx_hash_log_tbl_sz;
2179
2180	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2181	in = mlx5_vzalloc(inlen);
2182	if (in == NULL)
2183		return (-ENOMEM);
2184	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2185
2186	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2187	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2188
2189	for (i = 0; i < sz; i++) {
2190		int ix = i;
2191#ifdef RSS
2192		ix = rss_get_indirection_to_bucket(ix);
2193#endif
2194		/* ensure we don't overflow */
2195		ix %= priv->params.num_channels;
2196
2197		/* apply receive side scaling stride, if any */
2198		ix -= ix % (int)priv->params.channels_rsss;
2199
2200		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn);
2201	}
2202
2203	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2204
2205	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
2206	if (!err)
2207		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2208
2209	kvfree(in);
2210
2211	return (err);
2212}
2213
2214static void
2215mlx5e_close_rqt(struct mlx5e_priv *priv)
2216{
2217	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
2218	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
2219
2220	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2221	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2222
2223	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
2224}
2225
2226static void
2227mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2228{
2229	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2230	__be32 *hkey;
2231
2232	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2233
2234#define	ROUGH_MAX_L2_L3_HDR_SZ 256
2235
2236#define	MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2237			  MLX5_HASH_FIELD_SEL_DST_IP)
2238
2239#define	MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2240			  MLX5_HASH_FIELD_SEL_DST_IP   |\
2241			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2242			  MLX5_HASH_FIELD_SEL_L4_DPORT)
2243
2244#define	MLX5_HASH_IP_IPSEC_SPI	(MLX5_HASH_FIELD_SEL_SRC_IP   |\
2245				 MLX5_HASH_FIELD_SEL_DST_IP   |\
2246				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2247
2248	if (priv->params.hw_lro_en) {
2249		MLX5_SET(tirc, tirc, lro_enable_mask,
2250		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2251		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2252		MLX5_SET(tirc, tirc, lro_max_msg_sz,
2253		    (priv->params.lro_wqe_sz -
2254		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2255		/* TODO: add the option to choose timer value dynamically */
2256		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2257		    MLX5_CAP_ETH(priv->mdev,
2258		    lro_timer_supported_periods[2]));
2259	}
2260
2261	/* setup parameters for hashing TIR type, if any */
2262	switch (tt) {
2263	case MLX5E_TT_ANY:
2264		MLX5_SET(tirc, tirc, disp_type,
2265		    MLX5_TIRC_DISP_TYPE_DIRECT);
2266		MLX5_SET(tirc, tirc, inline_rqn,
2267		    priv->channel[0].rq.rqn);
2268		break;
2269	default:
2270		MLX5_SET(tirc, tirc, disp_type,
2271		    MLX5_TIRC_DISP_TYPE_INDIRECT);
2272		MLX5_SET(tirc, tirc, indirect_table,
2273		    priv->rqtn);
2274		MLX5_SET(tirc, tirc, rx_hash_fn,
2275		    MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2276		hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2277#ifdef RSS
2278		/*
2279		 * The FreeBSD RSS implementation does currently not
2280		 * support symmetric Toeplitz hashes:
2281		 */
2282		MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2283		rss_getkey((uint8_t *)hkey);
2284#else
2285		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2286		hkey[0] = cpu_to_be32(0xD181C62C);
2287		hkey[1] = cpu_to_be32(0xF7F4DB5B);
2288		hkey[2] = cpu_to_be32(0x1983A2FC);
2289		hkey[3] = cpu_to_be32(0x943E1ADB);
2290		hkey[4] = cpu_to_be32(0xD9389E6B);
2291		hkey[5] = cpu_to_be32(0xD1039C2C);
2292		hkey[6] = cpu_to_be32(0xA74499AD);
2293		hkey[7] = cpu_to_be32(0x593D56D9);
2294		hkey[8] = cpu_to_be32(0xF3253C06);
2295		hkey[9] = cpu_to_be32(0x2ADC1FFC);
2296#endif
2297		break;
2298	}
2299
2300	switch (tt) {
2301	case MLX5E_TT_IPV4_TCP:
2302		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2303		    MLX5_L3_PROT_TYPE_IPV4);
2304		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2305		    MLX5_L4_PROT_TYPE_TCP);
2306#ifdef RSS
2307		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2308			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2309			    MLX5_HASH_IP);
2310		} else
2311#endif
2312		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2313		    MLX5_HASH_ALL);
2314		break;
2315
2316	case MLX5E_TT_IPV6_TCP:
2317		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2318		    MLX5_L3_PROT_TYPE_IPV6);
2319		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2320		    MLX5_L4_PROT_TYPE_TCP);
2321#ifdef RSS
2322		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2323			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2324			    MLX5_HASH_IP);
2325		} else
2326#endif
2327		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2328		    MLX5_HASH_ALL);
2329		break;
2330
2331	case MLX5E_TT_IPV4_UDP:
2332		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2333		    MLX5_L3_PROT_TYPE_IPV4);
2334		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2335		    MLX5_L4_PROT_TYPE_UDP);
2336#ifdef RSS
2337		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2338			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2339			    MLX5_HASH_IP);
2340		} else
2341#endif
2342		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2343		    MLX5_HASH_ALL);
2344		break;
2345
2346	case MLX5E_TT_IPV6_UDP:
2347		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2348		    MLX5_L3_PROT_TYPE_IPV6);
2349		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2350		    MLX5_L4_PROT_TYPE_UDP);
2351#ifdef RSS
2352		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2353			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2354			    MLX5_HASH_IP);
2355		} else
2356#endif
2357		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2358		    MLX5_HASH_ALL);
2359		break;
2360
2361	case MLX5E_TT_IPV4_IPSEC_AH:
2362		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2363		    MLX5_L3_PROT_TYPE_IPV4);
2364		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2365		    MLX5_HASH_IP_IPSEC_SPI);
2366		break;
2367
2368	case MLX5E_TT_IPV6_IPSEC_AH:
2369		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2370		    MLX5_L3_PROT_TYPE_IPV6);
2371		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2372		    MLX5_HASH_IP_IPSEC_SPI);
2373		break;
2374
2375	case MLX5E_TT_IPV4_IPSEC_ESP:
2376		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2377		    MLX5_L3_PROT_TYPE_IPV4);
2378		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2379		    MLX5_HASH_IP_IPSEC_SPI);
2380		break;
2381
2382	case MLX5E_TT_IPV6_IPSEC_ESP:
2383		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2384		    MLX5_L3_PROT_TYPE_IPV6);
2385		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2386		    MLX5_HASH_IP_IPSEC_SPI);
2387		break;
2388
2389	case MLX5E_TT_IPV4:
2390		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2391		    MLX5_L3_PROT_TYPE_IPV4);
2392		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2393		    MLX5_HASH_IP);
2394		break;
2395
2396	case MLX5E_TT_IPV6:
2397		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2398		    MLX5_L3_PROT_TYPE_IPV6);
2399		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2400		    MLX5_HASH_IP);
2401		break;
2402
2403	default:
2404		break;
2405	}
2406}
2407
2408static int
2409mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2410{
2411	struct mlx5_core_dev *mdev = priv->mdev;
2412	u32 *in;
2413	void *tirc;
2414	int inlen;
2415	int err;
2416
2417	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2418	in = mlx5_vzalloc(inlen);
2419	if (in == NULL)
2420		return (-ENOMEM);
2421	tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2422
2423	mlx5e_build_tir_ctx(priv, tirc, tt);
2424
2425	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2426
2427	kvfree(in);
2428
2429	return (err);
2430}
2431
2432static void
2433mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2434{
2435	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2436}
2437
2438static int
2439mlx5e_open_tirs(struct mlx5e_priv *priv)
2440{
2441	int err;
2442	int i;
2443
2444	for (i = 0; i < MLX5E_NUM_TT; i++) {
2445		err = mlx5e_open_tir(priv, i);
2446		if (err)
2447			goto err_close_tirs;
2448	}
2449
2450	return (0);
2451
2452err_close_tirs:
2453	for (i--; i >= 0; i--)
2454		mlx5e_close_tir(priv, i);
2455
2456	return (err);
2457}
2458
2459static void
2460mlx5e_close_tirs(struct mlx5e_priv *priv)
2461{
2462	int i;
2463
2464	for (i = 0; i < MLX5E_NUM_TT; i++)
2465		mlx5e_close_tir(priv, i);
2466}
2467
2468/*
2469 * SW MTU does not include headers,
2470 * HW MTU includes all headers and checksums.
2471 */
2472static int
2473mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2474{
2475	struct mlx5e_priv *priv = ifp->if_softc;
2476	struct mlx5_core_dev *mdev = priv->mdev;
2477	int hw_mtu;
2478	int err;
2479
2480	hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2481
2482	err = mlx5_set_port_mtu(mdev, hw_mtu);
2483	if (err) {
2484		if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
2485		    __func__, sw_mtu, err);
2486		return (err);
2487	}
2488
2489	/* Update vport context MTU */
2490	err = mlx5_set_vport_mtu(mdev, hw_mtu);
2491	if (err) {
2492		if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n",
2493		    __func__, err);
2494	}
2495
2496	ifp->if_mtu = sw_mtu;
2497
2498	err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2499	if (err || !hw_mtu) {
2500		/* fallback to port oper mtu */
2501		err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2502	}
2503	if (err) {
2504		if_printf(ifp, "Query port MTU, after setting new "
2505		    "MTU value, failed\n");
2506		return (err);
2507	} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2508		err = -E2BIG,
2509		if_printf(ifp, "Port MTU %d is smaller than "
2510                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2511	} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
2512		err = -EINVAL;
2513                if_printf(ifp, "Port MTU %d is bigger than "
2514                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2515	}
2516	priv->params_ethtool.hw_mtu = hw_mtu;
2517
2518	return (err);
2519}
2520
2521int
2522mlx5e_open_locked(struct ifnet *ifp)
2523{
2524	struct mlx5e_priv *priv = ifp->if_softc;
2525	int err;
2526	u16 set_id;
2527
2528	/* check if already opened */
2529	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2530		return (0);
2531
2532#ifdef RSS
2533	if (rss_getnumbuckets() > priv->params.num_channels) {
2534		if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
2535		    "channels(%u) available\n", rss_getnumbuckets(),
2536		    priv->params.num_channels);
2537	}
2538#endif
2539	err = mlx5e_open_tises(priv);
2540	if (err) {
2541		if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
2542		    __func__, err);
2543		return (err);
2544	}
2545	err = mlx5_vport_alloc_q_counter(priv->mdev,
2546	    MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
2547	if (err) {
2548		if_printf(priv->ifp,
2549		    "%s: mlx5_vport_alloc_q_counter failed: %d\n",
2550		    __func__, err);
2551		goto err_close_tises;
2552	}
2553	/* store counter set ID */
2554	priv->counter_set_id = set_id;
2555
2556	err = mlx5e_open_channels(priv);
2557	if (err) {
2558		if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
2559		    __func__, err);
2560		goto err_dalloc_q_counter;
2561	}
2562	err = mlx5e_open_rqt(priv);
2563	if (err) {
2564		if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
2565		    __func__, err);
2566		goto err_close_channels;
2567	}
2568	err = mlx5e_open_tirs(priv);
2569	if (err) {
2570		if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
2571		    __func__, err);
2572		goto err_close_rqls;
2573	}
2574	err = mlx5e_open_flow_table(priv);
2575	if (err) {
2576		if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
2577		    __func__, err);
2578		goto err_close_tirs;
2579	}
2580	err = mlx5e_add_all_vlan_rules(priv);
2581	if (err) {
2582		if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
2583		    __func__, err);
2584		goto err_close_flow_table;
2585	}
2586	set_bit(MLX5E_STATE_OPENED, &priv->state);
2587
2588	mlx5e_update_carrier(priv);
2589	mlx5e_set_rx_mode_core(priv);
2590
2591	return (0);
2592
2593err_close_flow_table:
2594	mlx5e_close_flow_table(priv);
2595
2596err_close_tirs:
2597	mlx5e_close_tirs(priv);
2598
2599err_close_rqls:
2600	mlx5e_close_rqt(priv);
2601
2602err_close_channels:
2603	mlx5e_close_channels(priv);
2604
2605err_dalloc_q_counter:
2606	mlx5_vport_dealloc_q_counter(priv->mdev,
2607	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2608
2609err_close_tises:
2610	mlx5e_close_tises(priv);
2611
2612	return (err);
2613}
2614
2615static void
2616mlx5e_open(void *arg)
2617{
2618	struct mlx5e_priv *priv = arg;
2619
2620	PRIV_LOCK(priv);
2621	if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2622		if_printf(priv->ifp,
2623		    "%s: Setting port status to up failed\n",
2624		    __func__);
2625
2626	mlx5e_open_locked(priv->ifp);
2627	priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2628	PRIV_UNLOCK(priv);
2629}
2630
2631int
2632mlx5e_close_locked(struct ifnet *ifp)
2633{
2634	struct mlx5e_priv *priv = ifp->if_softc;
2635
2636	/* check if already closed */
2637	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2638		return (0);
2639
2640	clear_bit(MLX5E_STATE_OPENED, &priv->state);
2641
2642	mlx5e_set_rx_mode_core(priv);
2643	mlx5e_del_all_vlan_rules(priv);
2644	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2645	mlx5e_close_flow_table(priv);
2646	mlx5e_close_tirs(priv);
2647	mlx5e_close_rqt(priv);
2648	mlx5e_close_channels(priv);
2649	mlx5_vport_dealloc_q_counter(priv->mdev,
2650	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2651	mlx5e_close_tises(priv);
2652
2653	return (0);
2654}
2655
2656#if (__FreeBSD_version >= 1100000)
2657static uint64_t
2658mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
2659{
2660	struct mlx5e_priv *priv = ifp->if_softc;
2661	u64 retval;
2662
2663	/* PRIV_LOCK(priv); XXX not allowed */
2664	switch (cnt) {
2665	case IFCOUNTER_IPACKETS:
2666		retval = priv->stats.vport.rx_packets;
2667		break;
2668	case IFCOUNTER_IERRORS:
2669		retval = priv->stats.vport.rx_error_packets +
2670		    priv->stats.pport.alignment_err +
2671		    priv->stats.pport.check_seq_err +
2672		    priv->stats.pport.crc_align_errors +
2673		    priv->stats.pport.in_range_len_errors +
2674		    priv->stats.pport.jabbers +
2675		    priv->stats.pport.out_of_range_len +
2676		    priv->stats.pport.oversize_pkts +
2677		    priv->stats.pport.symbol_err +
2678		    priv->stats.pport.too_long_errors +
2679		    priv->stats.pport.undersize_pkts +
2680		    priv->stats.pport.unsupported_op_rx;
2681		break;
2682	case IFCOUNTER_IQDROPS:
2683		retval = priv->stats.vport.rx_out_of_buffer +
2684		    priv->stats.pport.drop_events;
2685		break;
2686	case IFCOUNTER_OPACKETS:
2687		retval = priv->stats.vport.tx_packets;
2688		break;
2689	case IFCOUNTER_OERRORS:
2690		retval = priv->stats.vport.tx_error_packets;
2691		break;
2692	case IFCOUNTER_IBYTES:
2693		retval = priv->stats.vport.rx_bytes;
2694		break;
2695	case IFCOUNTER_OBYTES:
2696		retval = priv->stats.vport.tx_bytes;
2697		break;
2698	case IFCOUNTER_IMCASTS:
2699		retval = priv->stats.vport.rx_multicast_packets;
2700		break;
2701	case IFCOUNTER_OMCASTS:
2702		retval = priv->stats.vport.tx_multicast_packets;
2703		break;
2704	case IFCOUNTER_OQDROPS:
2705		retval = priv->stats.vport.tx_queue_dropped;
2706		break;
2707	case IFCOUNTER_COLLISIONS:
2708		retval = priv->stats.pport.collisions;
2709		break;
2710	default:
2711		retval = if_get_counter_default(ifp, cnt);
2712		break;
2713	}
2714	/* PRIV_UNLOCK(priv); XXX not allowed */
2715	return (retval);
2716}
2717#endif
2718
2719static void
2720mlx5e_set_rx_mode(struct ifnet *ifp)
2721{
2722	struct mlx5e_priv *priv = ifp->if_softc;
2723
2724	queue_work(priv->wq, &priv->set_rx_mode_work);
2725}
2726
2727static int
2728mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2729{
2730	struct mlx5e_priv *priv;
2731	struct ifreq *ifr;
2732	struct ifi2creq i2c;
2733	int error = 0;
2734	int mask = 0;
2735	int size_read = 0;
2736	int module_status;
2737	int module_num;
2738	int max_mtu;
2739	uint8_t read_addr;
2740
2741	priv = ifp->if_softc;
2742
2743	/* check if detaching */
2744	if (priv == NULL || priv->gone != 0)
2745		return (ENXIO);
2746
2747	switch (command) {
2748	case SIOCSIFMTU:
2749		ifr = (struct ifreq *)data;
2750
2751		PRIV_LOCK(priv);
2752		mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
2753
2754		if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
2755		    ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
2756			int was_opened;
2757
2758			was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2759			if (was_opened)
2760				mlx5e_close_locked(ifp);
2761
2762			/* set new MTU */
2763			mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
2764
2765			if (was_opened)
2766				mlx5e_open_locked(ifp);
2767		} else {
2768			error = EINVAL;
2769			if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
2770			    MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
2771		}
2772		PRIV_UNLOCK(priv);
2773		break;
2774	case SIOCSIFFLAGS:
2775		if ((ifp->if_flags & IFF_UP) &&
2776		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2777			mlx5e_set_rx_mode(ifp);
2778			break;
2779		}
2780		PRIV_LOCK(priv);
2781		if (ifp->if_flags & IFF_UP) {
2782			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2783				if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2784					mlx5e_open_locked(ifp);
2785				ifp->if_drv_flags |= IFF_DRV_RUNNING;
2786				mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
2787			}
2788		} else {
2789			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2790				mlx5_set_port_status(priv->mdev,
2791				    MLX5_PORT_DOWN);
2792				if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2793					mlx5e_close_locked(ifp);
2794				mlx5e_update_carrier(priv);
2795				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2796			}
2797		}
2798		PRIV_UNLOCK(priv);
2799		break;
2800	case SIOCADDMULTI:
2801	case SIOCDELMULTI:
2802		mlx5e_set_rx_mode(ifp);
2803		break;
2804	case SIOCSIFMEDIA:
2805	case SIOCGIFMEDIA:
2806	case SIOCGIFXMEDIA:
2807		ifr = (struct ifreq *)data;
2808		error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
2809		break;
2810	case SIOCSIFCAP:
2811		ifr = (struct ifreq *)data;
2812		PRIV_LOCK(priv);
2813		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2814
2815		if (mask & IFCAP_TXCSUM) {
2816			ifp->if_capenable ^= IFCAP_TXCSUM;
2817			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2818
2819			if (IFCAP_TSO4 & ifp->if_capenable &&
2820			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2821				ifp->if_capenable &= ~IFCAP_TSO4;
2822				ifp->if_hwassist &= ~CSUM_IP_TSO;
2823				if_printf(ifp,
2824				    "tso4 disabled due to -txcsum.\n");
2825			}
2826		}
2827		if (mask & IFCAP_TXCSUM_IPV6) {
2828			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2829			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2830
2831			if (IFCAP_TSO6 & ifp->if_capenable &&
2832			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2833				ifp->if_capenable &= ~IFCAP_TSO6;
2834				ifp->if_hwassist &= ~CSUM_IP6_TSO;
2835				if_printf(ifp,
2836				    "tso6 disabled due to -txcsum6.\n");
2837			}
2838		}
2839		if (mask & IFCAP_RXCSUM)
2840			ifp->if_capenable ^= IFCAP_RXCSUM;
2841		if (mask & IFCAP_RXCSUM_IPV6)
2842			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2843		if (mask & IFCAP_TSO4) {
2844			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2845			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2846				if_printf(ifp, "enable txcsum first.\n");
2847				error = EAGAIN;
2848				goto out;
2849			}
2850			ifp->if_capenable ^= IFCAP_TSO4;
2851			ifp->if_hwassist ^= CSUM_IP_TSO;
2852		}
2853		if (mask & IFCAP_TSO6) {
2854			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2855			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2856				if_printf(ifp, "enable txcsum6 first.\n");
2857				error = EAGAIN;
2858				goto out;
2859			}
2860			ifp->if_capenable ^= IFCAP_TSO6;
2861			ifp->if_hwassist ^= CSUM_IP6_TSO;
2862		}
2863		if (mask & IFCAP_VLAN_HWFILTER) {
2864			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2865				mlx5e_disable_vlan_filter(priv);
2866			else
2867				mlx5e_enable_vlan_filter(priv);
2868
2869			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2870		}
2871		if (mask & IFCAP_VLAN_HWTAGGING)
2872			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2873		if (mask & IFCAP_WOL_MAGIC)
2874			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2875
2876		VLAN_CAPABILITIES(ifp);
2877		/* turn off LRO means also turn of HW LRO - if it's on */
2878		if (mask & IFCAP_LRO) {
2879			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2880			bool need_restart = false;
2881
2882			ifp->if_capenable ^= IFCAP_LRO;
2883
2884			/* figure out if updating HW LRO is needed */
2885			if (!(ifp->if_capenable & IFCAP_LRO)) {
2886				if (priv->params.hw_lro_en) {
2887					priv->params.hw_lro_en = false;
2888					need_restart = true;
2889				}
2890			} else {
2891				if (priv->params.hw_lro_en == false &&
2892				    priv->params_ethtool.hw_lro != 0) {
2893					priv->params.hw_lro_en = true;
2894					need_restart = true;
2895				}
2896			}
2897			if (was_opened && need_restart) {
2898				mlx5e_close_locked(ifp);
2899				mlx5e_open_locked(ifp);
2900			}
2901		}
2902out:
2903		PRIV_UNLOCK(priv);
2904		break;
2905
2906	case SIOCGI2C:
2907		ifr = (struct ifreq *)data;
2908
2909		/*
2910		 * Copy from the user-space address ifr_data to the
2911		 * kernel-space address i2c
2912		 */
2913		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2914		if (error)
2915			break;
2916
2917		if (i2c.len > sizeof(i2c.data)) {
2918			error = EINVAL;
2919			break;
2920		}
2921
2922		PRIV_LOCK(priv);
2923		/* Get module_num which is required for the query_eeprom */
2924		error = mlx5_query_module_num(priv->mdev, &module_num);
2925		if (error) {
2926			if_printf(ifp, "Query module num failed, eeprom "
2927			    "reading is not supported\n");
2928			error = EINVAL;
2929			goto err_i2c;
2930		}
2931		/* Check if module is present before doing an access */
2932		module_status = mlx5_query_module_status(priv->mdev, module_num);
2933		if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED &&
2934		    module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) {
2935			error = EINVAL;
2936			goto err_i2c;
2937		}
2938		/*
2939		 * Currently 0XA0 and 0xA2 are the only addresses permitted.
2940		 * The internal conversion is as follows:
2941		 */
2942		if (i2c.dev_addr == 0xA0)
2943			read_addr = MLX5E_I2C_ADDR_LOW;
2944		else if (i2c.dev_addr == 0xA2)
2945			read_addr = MLX5E_I2C_ADDR_HIGH;
2946		else {
2947			if_printf(ifp, "Query eeprom failed, "
2948			    "Invalid Address: %X\n", i2c.dev_addr);
2949			error = EINVAL;
2950			goto err_i2c;
2951		}
2952		error = mlx5_query_eeprom(priv->mdev,
2953		    read_addr, MLX5E_EEPROM_LOW_PAGE,
2954		    (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
2955		    (uint32_t *)i2c.data, &size_read);
2956		if (error) {
2957			if_printf(ifp, "Query eeprom failed, eeprom "
2958			    "reading is not supported\n");
2959			error = EINVAL;
2960			goto err_i2c;
2961		}
2962
2963		if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
2964			error = mlx5_query_eeprom(priv->mdev,
2965			    read_addr, MLX5E_EEPROM_LOW_PAGE,
2966			    (uint32_t)(i2c.offset + size_read),
2967			    (uint32_t)(i2c.len - size_read), module_num,
2968			    (uint32_t *)(i2c.data + size_read), &size_read);
2969		}
2970		if (error) {
2971			if_printf(ifp, "Query eeprom failed, eeprom "
2972			    "reading is not supported\n");
2973			error = EINVAL;
2974			goto err_i2c;
2975		}
2976
2977		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2978err_i2c:
2979		PRIV_UNLOCK(priv);
2980		break;
2981
2982	default:
2983		error = ether_ioctl(ifp, command, data);
2984		break;
2985	}
2986	return (error);
2987}
2988
2989static int
2990mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2991{
2992	/*
2993	 * TODO: uncoment once FW really sets all these bits if
2994	 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
2995	 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
2996	 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
2997	 * -ENOTSUPP;
2998	 */
2999
3000	/* TODO: add more must-to-have features */
3001
3002	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3003		return (-ENODEV);
3004
3005	return (0);
3006}
3007
3008static u16
3009mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3010{
3011	uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U;
3012
3013	bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2;
3014
3015	/* verify against driver hardware limit */
3016	if (bf_buf_size > MLX5E_MAX_TX_INLINE)
3017		bf_buf_size = MLX5E_MAX_TX_INLINE;
3018
3019	return (bf_buf_size);
3020}
3021
3022static int
3023mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
3024    struct mlx5e_priv *priv,
3025    int num_comp_vectors)
3026{
3027	int err;
3028
3029	/*
3030	 * TODO: Consider link speed for setting "log_sq_size",
3031	 * "log_rq_size" and "cq_moderation_xxx":
3032	 */
3033	priv->params.log_sq_size =
3034	    MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3035	priv->params.log_rq_size =
3036	    MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
3037	priv->params.rx_cq_moderation_usec =
3038	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3039	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
3040	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3041	priv->params.rx_cq_moderation_mode =
3042	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
3043	priv->params.rx_cq_moderation_pkts =
3044	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3045	priv->params.tx_cq_moderation_usec =
3046	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3047	priv->params.tx_cq_moderation_pkts =
3048	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3049	priv->params.min_rx_wqes =
3050	    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
3051	priv->params.rx_hash_log_tbl_sz =
3052	    (order_base_2(num_comp_vectors) >
3053	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
3054	    order_base_2(num_comp_vectors) :
3055	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
3056	priv->params.num_tc = 1;
3057	priv->params.default_vlan_prio = 0;
3058	priv->counter_set_id = -1;
3059	priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3060
3061	err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
3062	if (err)
3063		return (err);
3064
3065	/*
3066	 * hw lro is currently defaulted to off. when it won't anymore we
3067	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
3068	 */
3069	priv->params.hw_lro_en = false;
3070	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
3071
3072	priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression);
3073
3074	priv->mdev = mdev;
3075	priv->params.num_channels = num_comp_vectors;
3076	priv->params.channels_rsss = 1;
3077	priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
3078	priv->queue_mapping_channel_mask =
3079	    roundup_pow_of_two(num_comp_vectors) - 1;
3080	priv->num_tc = priv->params.num_tc;
3081	priv->default_vlan_prio = priv->params.default_vlan_prio;
3082
3083	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3084	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3085	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3086
3087	return (0);
3088}
3089
3090static int
3091mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
3092		  struct mlx5_core_mr *mkey)
3093{
3094	struct ifnet *ifp = priv->ifp;
3095	struct mlx5_core_dev *mdev = priv->mdev;
3096	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3097	void *mkc;
3098	u32 *in;
3099	int err;
3100
3101	in = mlx5_vzalloc(inlen);
3102	if (in == NULL) {
3103		if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
3104		return (-ENOMEM);
3105	}
3106
3107	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3108	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
3109	MLX5_SET(mkc, mkc, lw, 1);
3110	MLX5_SET(mkc, mkc, lr, 1);
3111
3112	MLX5_SET(mkc, mkc, pd, pdn);
3113	MLX5_SET(mkc, mkc, length64, 1);
3114	MLX5_SET(mkc, mkc, qpn, 0xffffff);
3115
3116	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
3117	if (err)
3118		if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
3119		    __func__, err);
3120
3121	kvfree(in);
3122	return (err);
3123}
3124
3125static const char *mlx5e_vport_stats_desc[] = {
3126	MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
3127};
3128
3129static const char *mlx5e_pport_stats_desc[] = {
3130	MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
3131};
3132
3133static void
3134mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
3135{
3136	mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
3137	sx_init(&priv->state_lock, "mlx5state");
3138	callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
3139	MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
3140}
3141
3142static void
3143mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
3144{
3145	mtx_destroy(&priv->async_events_mtx);
3146	sx_destroy(&priv->state_lock);
3147}
3148
3149static int
3150sysctl_firmware(SYSCTL_HANDLER_ARGS)
3151{
3152	/*
3153	 * %d.%d%.d the string format.
3154	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
3155	 * We need at most 5 chars to store that.
3156	 * It also has: two "." and NULL at the end, which means we need 18
3157	 * (5*3 + 3) chars at most.
3158	 */
3159	char fw[18];
3160	struct mlx5e_priv *priv = arg1;
3161	int error;
3162
3163	snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3164	    fw_rev_sub(priv->mdev));
3165	error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3166	return (error);
3167}
3168
3169static void
3170mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
3171{
3172	int i;
3173
3174	for (i = 0; i < ch->num_tc; i++)
3175		mlx5e_drain_sq(&ch->sq[i]);
3176}
3177
3178static void
3179mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
3180{
3181
3182	sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
3183	sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
3184	mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
3185	sq->doorbell.d64 = 0;
3186}
3187
3188void
3189mlx5e_resume_sq(struct mlx5e_sq *sq)
3190{
3191	int err;
3192
3193	/* check if already enabled */
3194	if (READ_ONCE(sq->running) != 0)
3195		return;
3196
3197	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
3198	    MLX5_SQC_STATE_RST);
3199	if (err != 0) {
3200		if_printf(sq->ifp,
3201		    "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
3202	}
3203
3204	sq->cc = 0;
3205	sq->pc = 0;
3206
3207	/* reset doorbell prior to moving from RST to RDY */
3208	mlx5e_reset_sq_doorbell_record(sq);
3209
3210	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
3211	    MLX5_SQC_STATE_RDY);
3212	if (err != 0) {
3213		if_printf(sq->ifp,
3214		    "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
3215	}
3216
3217	sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
3218	WRITE_ONCE(sq->running, 1);
3219}
3220
3221static void
3222mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
3223{
3224        int i;
3225
3226	for (i = 0; i < ch->num_tc; i++)
3227		mlx5e_resume_sq(&ch->sq[i]);
3228}
3229
3230static void
3231mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
3232{
3233	struct mlx5e_rq *rq = &ch->rq;
3234	int err;
3235
3236	mtx_lock(&rq->mtx);
3237	rq->enabled = 0;
3238	callout_stop(&rq->watchdog);
3239	mtx_unlock(&rq->mtx);
3240
3241	callout_drain(&rq->watchdog);
3242
3243	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
3244	if (err != 0) {
3245		if_printf(rq->ifp,
3246		    "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
3247	}
3248
3249	while (!mlx5_wq_ll_is_empty(&rq->wq)) {
3250		msleep(1);
3251		rq->cq.mcq.comp(&rq->cq.mcq);
3252	}
3253
3254	/*
3255	 * Transitioning into RST state will allow the FW to track less ERR state queues,
3256	 * thus reducing the recv queue flushing time
3257	 */
3258	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
3259	if (err != 0) {
3260		if_printf(rq->ifp,
3261		    "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
3262	}
3263}
3264
3265static void
3266mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
3267{
3268	struct mlx5e_rq *rq = &ch->rq;
3269	int err;
3270
3271	rq->wq.wqe_ctr = 0;
3272	mlx5_wq_ll_update_db_record(&rq->wq);
3273	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3274	if (err != 0) {
3275		if_printf(rq->ifp,
3276		    "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
3277        }
3278
3279	rq->enabled = 1;
3280
3281	rq->cq.mcq.comp(&rq->cq.mcq);
3282}
3283
3284void
3285mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
3286{
3287	int i;
3288
3289	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3290		return;
3291
3292	for (i = 0; i < priv->params.num_channels; i++) {
3293		if (value)
3294			mlx5e_disable_tx_dma(&priv->channel[i]);
3295		else
3296			mlx5e_enable_tx_dma(&priv->channel[i]);
3297	}
3298}
3299
3300void
3301mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
3302{
3303	int i;
3304
3305	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3306		return;
3307
3308	for (i = 0; i < priv->params.num_channels; i++) {
3309		if (value)
3310			mlx5e_disable_rx_dma(&priv->channel[i]);
3311		else
3312			mlx5e_enable_rx_dma(&priv->channel[i]);
3313	}
3314}
3315
3316static void
3317mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3318{
3319	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3320	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3321	    sysctl_firmware, "A", "HCA firmware version");
3322
3323	SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3324	    OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3325	    "Board ID");
3326}
3327
3328static int
3329mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3330{
3331	struct mlx5e_priv *priv = arg1;
3332	uint32_t tx_pfc;
3333	uint32_t value;
3334	int error;
3335
3336	PRIV_LOCK(priv);
3337
3338	tx_pfc = priv->params.tx_priority_flow_control;
3339
3340	/* get current value */
3341	value = (tx_pfc >> arg2) & 1;
3342
3343	error = sysctl_handle_32(oidp, &value, 0, req);
3344
3345	/* range check value */
3346	if (value != 0)
3347		priv->params.tx_priority_flow_control |= (1 << arg2);
3348	else
3349		priv->params.tx_priority_flow_control &= ~(1 << arg2);
3350
3351	/* check if update is required */
3352	if (error == 0 && priv->gone == 0 &&
3353	    tx_pfc != priv->params.tx_priority_flow_control) {
3354		error = -mlx5e_set_port_pfc(priv);
3355		/* restore previous value */
3356		if (error != 0)
3357			priv->params.tx_priority_flow_control= tx_pfc;
3358	}
3359	PRIV_UNLOCK(priv);
3360
3361	return (error);
3362}
3363
3364static int
3365mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3366{
3367	struct mlx5e_priv *priv = arg1;
3368	uint32_t rx_pfc;
3369	uint32_t value;
3370	int error;
3371
3372	PRIV_LOCK(priv);
3373
3374	rx_pfc = priv->params.rx_priority_flow_control;
3375
3376	/* get current value */
3377	value = (rx_pfc >> arg2) & 1;
3378
3379	error = sysctl_handle_32(oidp, &value, 0, req);
3380
3381	/* range check value */
3382	if (value != 0)
3383		priv->params.rx_priority_flow_control |= (1 << arg2);
3384	else
3385		priv->params.rx_priority_flow_control &= ~(1 << arg2);
3386
3387	/* check if update is required */
3388	if (error == 0 && priv->gone == 0 &&
3389	    rx_pfc != priv->params.rx_priority_flow_control) {
3390		error = -mlx5e_set_port_pfc(priv);
3391		/* restore previous value */
3392		if (error != 0)
3393			priv->params.rx_priority_flow_control= rx_pfc;
3394	}
3395	PRIV_UNLOCK(priv);
3396
3397	return (error);
3398}
3399
3400static void
3401mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3402{
3403	unsigned int x;
3404	char path[96];
3405	int error;
3406
3407	/* enable pauseframes by default */
3408	priv->params.tx_pauseframe_control = 1;
3409	priv->params.rx_pauseframe_control = 1;
3410
3411	/* disable ports flow control, PFC, by default */
3412	priv->params.tx_priority_flow_control = 0;
3413	priv->params.rx_priority_flow_control = 0;
3414
3415#if (__FreeBSD_version < 1100000)
3416	/* compute path for sysctl */
3417	snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3418	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3419
3420	/* try to fetch tunable, if any */
3421	TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3422
3423	/* compute path for sysctl */
3424	snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3425	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3426
3427	/* try to fetch tunable, if any */
3428	TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3429
3430	for (x = 0; x != 8; x++) {
3431
3432		/* compute path for sysctl */
3433		snprintf(path, sizeof(path), "dev.mce.%d.tx_priority_flow_control_%u",
3434		    device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3435
3436		/* try to fetch tunable, if any */
3437		if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3438			priv->params.tx_priority_flow_control |= 1 << x;
3439
3440		/* compute path for sysctl */
3441		snprintf(path, sizeof(path), "dev.mce.%d.rx_priority_flow_control_%u",
3442		    device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3443
3444		/* try to fetch tunable, if any */
3445		if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3446			priv->params.rx_priority_flow_control |= 1 << x;
3447	}
3448#endif
3449
3450	/* register pauseframe SYSCTLs */
3451	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3452	    OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3453	    &priv->params.tx_pauseframe_control, 0,
3454	    "Set to enable TX pause frames. Clear to disable.");
3455
3456	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3457	    OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3458	    &priv->params.rx_pauseframe_control, 0,
3459	    "Set to enable RX pause frames. Clear to disable.");
3460
3461	/* register priority_flow control, PFC, SYSCTLs */
3462	for (x = 0; x != 8; x++) {
3463		snprintf(path, sizeof(path), "tx_priority_flow_control_%u", x);
3464
3465		SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3466		    OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3467		    CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_tx_priority_flow_control, "IU",
3468		    "Set to enable TX ports flow control frames for given priority. Clear to disable.");
3469
3470		snprintf(path, sizeof(path), "rx_priority_flow_control_%u", x);
3471
3472		SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3473		    OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3474		    CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_rx_priority_flow_control, "IU",
3475		    "Set to enable RX ports flow control frames for given priority. Clear to disable.");
3476	}
3477
3478	PRIV_LOCK(priv);
3479
3480	/* range check */
3481	priv->params.tx_pauseframe_control =
3482	    priv->params.tx_pauseframe_control ? 1 : 0;
3483	priv->params.rx_pauseframe_control =
3484	    priv->params.rx_pauseframe_control ? 1 : 0;
3485
3486	/* update firmware */
3487	error = mlx5e_set_port_pause_and_pfc(priv);
3488	if (error == -EINVAL) {
3489		if_printf(priv->ifp,
3490		    "Global pauseframes must be disabled before enabling PFC.\n");
3491		priv->params.rx_priority_flow_control = 0;
3492		priv->params.tx_priority_flow_control = 0;
3493
3494		/* update firmware */
3495		(void) mlx5e_set_port_pause_and_pfc(priv);
3496	}
3497	PRIV_UNLOCK(priv);
3498}
3499
3500static void *
3501mlx5e_create_ifp(struct mlx5_core_dev *mdev)
3502{
3503	struct ifnet *ifp;
3504	struct mlx5e_priv *priv;
3505	u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
3506	struct sysctl_oid_list *child;
3507	int ncv = mdev->priv.eq_table.num_comp_vectors;
3508	char unit[16];
3509	int err;
3510	int i;
3511	u32 eth_proto_cap;
3512
3513	if (mlx5e_check_required_hca_cap(mdev)) {
3514		mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
3515		return (NULL);
3516	}
3517	/*
3518	 * Try to allocate the priv and make room for worst-case
3519	 * number of channel structures:
3520	 */
3521	priv = malloc(sizeof(*priv) +
3522	    (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
3523	    M_MLX5EN, M_WAITOK | M_ZERO);
3524	mlx5e_priv_mtx_init(priv);
3525
3526	ifp = priv->ifp = if_alloc(IFT_ETHER);
3527	if (ifp == NULL) {
3528		mlx5_core_err(mdev, "if_alloc() failed\n");
3529		goto err_free_priv;
3530	}
3531	ifp->if_softc = priv;
3532	if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
3533	ifp->if_mtu = ETHERMTU;
3534	ifp->if_init = mlx5e_open;
3535	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3536	ifp->if_ioctl = mlx5e_ioctl;
3537	ifp->if_transmit = mlx5e_xmit;
3538	ifp->if_qflush = if_qflush;
3539#if (__FreeBSD_version >= 1100000)
3540	ifp->if_get_counter = mlx5e_get_counter;
3541#endif
3542	ifp->if_snd.ifq_maxlen = ifqmaxlen;
3543	/*
3544         * Set driver features
3545         */
3546	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
3547	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
3548	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
3549	ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
3550	ifp->if_capabilities |= IFCAP_LRO;
3551	ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
3552	ifp->if_capabilities |= IFCAP_HWSTATS;
3553
3554	/* set TSO limits so that we don't have to drop TX packets */
3555	ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
3556	ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
3557	ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
3558
3559	ifp->if_capenable = ifp->if_capabilities;
3560	ifp->if_hwassist = 0;
3561	if (ifp->if_capenable & IFCAP_TSO)
3562		ifp->if_hwassist |= CSUM_TSO;
3563	if (ifp->if_capenable & IFCAP_TXCSUM)
3564		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3565	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3566		ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3567
3568	/* ifnet sysctl tree */
3569	sysctl_ctx_init(&priv->sysctl_ctx);
3570	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
3571	    OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
3572	if (priv->sysctl_ifnet == NULL) {
3573		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3574		goto err_free_sysctl;
3575	}
3576	snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
3577	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3578	    OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
3579	if (priv->sysctl_ifnet == NULL) {
3580		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3581		goto err_free_sysctl;
3582	}
3583
3584	/* HW sysctl tree */
3585	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
3586	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
3587	    OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
3588	if (priv->sysctl_hw == NULL) {
3589		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3590		goto err_free_sysctl;
3591	}
3592
3593	err = mlx5e_build_ifp_priv(mdev, priv, ncv);
3594	if (err) {
3595		mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err);
3596		goto err_free_sysctl;
3597	}
3598
3599	snprintf(unit, sizeof(unit), "mce%u_wq",
3600	    device_get_unit(mdev->pdev->dev.bsddev));
3601	priv->wq = alloc_workqueue(unit, 0, 1);
3602	if (priv->wq == NULL) {
3603		if_printf(ifp, "%s: alloc_workqueue failed\n", __func__);
3604		goto err_free_sysctl;
3605	}
3606
3607	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
3608	if (err) {
3609		if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
3610		    __func__, err);
3611		goto err_free_wq;
3612	}
3613	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
3614	if (err) {
3615		if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
3616		    __func__, err);
3617		goto err_unmap_free_uar;
3618	}
3619	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
3620	if (err) {
3621		if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
3622		    __func__, err);
3623		goto err_dealloc_pd;
3624	}
3625	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
3626	if (err) {
3627		if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
3628		    __func__, err);
3629		goto err_dealloc_transport_domain;
3630	}
3631	mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
3632
3633	/* check if we should generate a random MAC address */
3634	if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
3635	    is_zero_ether_addr(dev_addr)) {
3636		random_ether_addr(dev_addr);
3637		if_printf(ifp, "Assigned random MAC address\n");
3638	}
3639
3640	/* set default MTU */
3641	mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
3642
3643	/* Set default media status */
3644	priv->media_status_last = IFM_AVALID;
3645	priv->media_active_last = IFM_ETHER | IFM_AUTO |
3646	    IFM_ETH_RXPAUSE | IFM_FDX;
3647
3648	/* setup default pauseframes configuration */
3649	mlx5e_setup_pauseframes(priv);
3650
3651	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
3652	if (err) {
3653		eth_proto_cap = 0;
3654		if_printf(ifp, "%s: Query port media capability failed, %d\n",
3655		    __func__, err);
3656	}
3657
3658	/* Setup supported medias */
3659	ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
3660	    mlx5e_media_change, mlx5e_media_status);
3661
3662	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
3663		if (mlx5e_mode_table[i].baudrate == 0)
3664			continue;
3665		if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
3666			ifmedia_add(&priv->media,
3667			    mlx5e_mode_table[i].subtype |
3668			    IFM_ETHER, 0, NULL);
3669			ifmedia_add(&priv->media,
3670			    mlx5e_mode_table[i].subtype |
3671			    IFM_ETHER | IFM_FDX |
3672			    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3673		}
3674	}
3675
3676	/* Additional supported medias */
3677	ifmedia_add(&priv->media, IFM_10G_LR | IFM_ETHER, 0, NULL);
3678	ifmedia_add(&priv->media, IFM_10G_LR |
3679	    IFM_ETHER | IFM_FDX |
3680	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3681
3682	ifmedia_add(&priv->media, IFM_40G_ER4 | IFM_ETHER, 0, NULL);
3683	ifmedia_add(&priv->media, IFM_40G_ER4 |
3684	    IFM_ETHER | IFM_FDX |
3685	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3686
3687	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3688	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3689	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3690
3691	/* Set autoselect by default */
3692	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3693	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
3694	ether_ifattach(ifp, dev_addr);
3695
3696	/* Register for VLAN events */
3697	priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
3698	    mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
3699	priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
3700	    mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
3701
3702	/* Link is down by default */
3703	if_link_state_change(ifp, LINK_STATE_DOWN);
3704
3705	mlx5e_enable_async_events(priv);
3706
3707	mlx5e_add_hw_stats(priv);
3708
3709	mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3710	    "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
3711	    priv->stats.vport.arg);
3712
3713	mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3714	    "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
3715	    priv->stats.pport.arg);
3716
3717	mlx5e_create_ethtool(priv);
3718
3719	mtx_lock(&priv->async_events_mtx);
3720	mlx5e_update_stats(priv);
3721	mtx_unlock(&priv->async_events_mtx);
3722
3723	return (priv);
3724
3725err_dealloc_transport_domain:
3726	mlx5_dealloc_transport_domain(mdev, priv->tdn);
3727
3728err_dealloc_pd:
3729	mlx5_core_dealloc_pd(mdev, priv->pdn);
3730
3731err_unmap_free_uar:
3732	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3733
3734err_free_wq:
3735	destroy_workqueue(priv->wq);
3736
3737err_free_sysctl:
3738	sysctl_ctx_free(&priv->sysctl_ctx);
3739	if (priv->sysctl_debug)
3740		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3741	if_free(ifp);
3742
3743err_free_priv:
3744	mlx5e_priv_mtx_destroy(priv);
3745	free(priv, M_MLX5EN);
3746	return (NULL);
3747}
3748
3749static void
3750mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
3751{
3752	struct mlx5e_priv *priv = vpriv;
3753	struct ifnet *ifp = priv->ifp;
3754
3755	/* don't allow more IOCTLs */
3756	priv->gone = 1;
3757
3758	/* XXX wait a bit to allow IOCTL handlers to complete */
3759	pause("W", hz);
3760
3761	/* stop watchdog timer */
3762	callout_drain(&priv->watchdog);
3763
3764	if (priv->vlan_attach != NULL)
3765		EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
3766	if (priv->vlan_detach != NULL)
3767		EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
3768
3769	/* make sure device gets closed */
3770	PRIV_LOCK(priv);
3771	mlx5e_close_locked(ifp);
3772	PRIV_UNLOCK(priv);
3773
3774	/* unregister device */
3775	ifmedia_removeall(&priv->media);
3776	ether_ifdetach(ifp);
3777	if_free(ifp);
3778
3779	/* destroy all remaining sysctl nodes */
3780	sysctl_ctx_free(&priv->stats.vport.ctx);
3781	sysctl_ctx_free(&priv->stats.pport.ctx);
3782	sysctl_ctx_free(&priv->sysctl_ctx);
3783	if (priv->sysctl_debug)
3784		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3785
3786	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3787	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
3788	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3789	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3790	mlx5e_disable_async_events(priv);
3791	destroy_workqueue(priv->wq);
3792	mlx5e_priv_mtx_destroy(priv);
3793	free(priv, M_MLX5EN);
3794}
3795
3796static void *
3797mlx5e_get_ifp(void *vpriv)
3798{
3799	struct mlx5e_priv *priv = vpriv;
3800
3801	return (priv->ifp);
3802}
3803
3804static struct mlx5_interface mlx5e_interface = {
3805	.add = mlx5e_create_ifp,
3806	.remove = mlx5e_destroy_ifp,
3807	.event = mlx5e_async_event,
3808	.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3809	.get_dev = mlx5e_get_ifp,
3810};
3811
3812void
3813mlx5e_init(void)
3814{
3815	mlx5_register_interface(&mlx5e_interface);
3816}
3817
3818void
3819mlx5e_cleanup(void)
3820{
3821	mlx5_unregister_interface(&mlx5e_interface);
3822}
3823
3824static void
3825mlx5e_show_version(void __unused *arg)
3826{
3827
3828	printf("%s", mlx5e_version);
3829}
3830SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL);
3831
3832module_init_order(mlx5e_init, SI_ORDER_THIRD);
3833module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
3834
3835#if (__FreeBSD_version >= 1100000)
3836MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
3837#endif
3838MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
3839MODULE_VERSION(mlx5en, 1);
3840