1/*-
2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 359847 2020-04-13 08:33:49Z hselasky $
26 */
27
28#include "en.h"
29
30#include <sys/sockio.h>
31#include <machine/atomic.h>
32
33#define	ETH_DRIVER_VERSION	"3.2.1"
34char mlx5e_version[] = "Mellanox Ethernet driver"
35    " (" ETH_DRIVER_VERSION ")";
36
37static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
38
39struct mlx5e_channel_param {
40	struct mlx5e_rq_param rq;
41	struct mlx5e_sq_param sq;
42	struct mlx5e_cq_param rx_cq;
43	struct mlx5e_cq_param tx_cq;
44};
45
46static const struct {
47	u32	subtype;
48	u64	baudrate;
49}	mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
50
51	[MLX5E_1000BASE_CX_SGMII] = {
52		.subtype = IFM_1000_CX_SGMII,
53		.baudrate = IF_Mbps(1000ULL),
54	},
55	[MLX5E_1000BASE_KX] = {
56		.subtype = IFM_1000_KX,
57		.baudrate = IF_Mbps(1000ULL),
58	},
59	[MLX5E_10GBASE_CX4] = {
60		.subtype = IFM_10G_CX4,
61		.baudrate = IF_Gbps(10ULL),
62	},
63	[MLX5E_10GBASE_KX4] = {
64		.subtype = IFM_10G_KX4,
65		.baudrate = IF_Gbps(10ULL),
66	},
67	[MLX5E_10GBASE_KR] = {
68		.subtype = IFM_10G_KR,
69		.baudrate = IF_Gbps(10ULL),
70	},
71	[MLX5E_20GBASE_KR2] = {
72		.subtype = IFM_20G_KR2,
73		.baudrate = IF_Gbps(20ULL),
74	},
75	[MLX5E_40GBASE_CR4] = {
76		.subtype = IFM_40G_CR4,
77		.baudrate = IF_Gbps(40ULL),
78	},
79	[MLX5E_40GBASE_KR4] = {
80		.subtype = IFM_40G_KR4,
81		.baudrate = IF_Gbps(40ULL),
82	},
83	[MLX5E_56GBASE_R4] = {
84		.subtype = IFM_56G_R4,
85		.baudrate = IF_Gbps(56ULL),
86	},
87	[MLX5E_10GBASE_CR] = {
88		.subtype = IFM_10G_CR1,
89		.baudrate = IF_Gbps(10ULL),
90	},
91	[MLX5E_10GBASE_SR] = {
92		.subtype = IFM_10G_SR,
93		.baudrate = IF_Gbps(10ULL),
94	},
95	[MLX5E_10GBASE_LR] = {
96		.subtype = IFM_10G_LR,
97		.baudrate = IF_Gbps(10ULL),
98	},
99	[MLX5E_40GBASE_SR4] = {
100		.subtype = IFM_40G_SR4,
101		.baudrate = IF_Gbps(40ULL),
102	},
103	[MLX5E_40GBASE_LR4] = {
104		.subtype = IFM_40G_LR4,
105		.baudrate = IF_Gbps(40ULL),
106	},
107	[MLX5E_100GBASE_CR4] = {
108		.subtype = IFM_100G_CR4,
109		.baudrate = IF_Gbps(100ULL),
110	},
111	[MLX5E_100GBASE_SR4] = {
112		.subtype = IFM_100G_SR4,
113		.baudrate = IF_Gbps(100ULL),
114	},
115	[MLX5E_100GBASE_KR4] = {
116		.subtype = IFM_100G_KR4,
117		.baudrate = IF_Gbps(100ULL),
118	},
119	[MLX5E_100GBASE_LR4] = {
120		.subtype = IFM_100G_LR4,
121		.baudrate = IF_Gbps(100ULL),
122	},
123	[MLX5E_100BASE_TX] = {
124		.subtype = IFM_100_TX,
125		.baudrate = IF_Mbps(100ULL),
126	},
127	[MLX5E_100BASE_T] = {
128		.subtype = IFM_100_T,
129		.baudrate = IF_Mbps(100ULL),
130	},
131	[MLX5E_10GBASE_T] = {
132		.subtype = IFM_10G_T,
133		.baudrate = IF_Gbps(10ULL),
134	},
135	[MLX5E_25GBASE_CR] = {
136		.subtype = IFM_25G_CR,
137		.baudrate = IF_Gbps(25ULL),
138	},
139	[MLX5E_25GBASE_KR] = {
140		.subtype = IFM_25G_KR,
141		.baudrate = IF_Gbps(25ULL),
142	},
143	[MLX5E_25GBASE_SR] = {
144		.subtype = IFM_25G_SR,
145		.baudrate = IF_Gbps(25ULL),
146	},
147	[MLX5E_50GBASE_CR2] = {
148		.subtype = IFM_50G_CR2,
149		.baudrate = IF_Gbps(50ULL),
150	},
151	[MLX5E_50GBASE_KR2] = {
152		.subtype = IFM_50G_KR2,
153		.baudrate = IF_Gbps(50ULL),
154	},
155};
156
157MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
158
159static void
160mlx5e_update_carrier(struct mlx5e_priv *priv)
161{
162	struct mlx5_core_dev *mdev = priv->mdev;
163	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
164	u32 eth_proto_oper;
165	int error;
166	u8 port_state;
167	u8 i;
168
169	port_state = mlx5_query_vport_state(mdev,
170	    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
171
172	if (port_state == VPORT_STATE_UP) {
173		priv->media_status_last |= IFM_ACTIVE;
174	} else {
175		priv->media_status_last &= ~IFM_ACTIVE;
176		priv->media_active_last = IFM_ETHER;
177		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
178		return;
179	}
180
181	error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN);
182	if (error) {
183		priv->media_active_last = IFM_ETHER;
184		priv->ifp->if_baudrate = 1;
185		if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
186		    __func__, error);
187		return;
188	}
189	eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
190
191	for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
192		if (mlx5e_mode_table[i].baudrate == 0)
193			continue;
194		if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
195			priv->ifp->if_baudrate =
196			    mlx5e_mode_table[i].baudrate;
197			priv->media_active_last =
198			    mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX;
199		}
200	}
201	if_link_state_change(priv->ifp, LINK_STATE_UP);
202}
203
204static void
205mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
206{
207	struct mlx5e_priv *priv = dev->if_softc;
208
209	ifmr->ifm_status = priv->media_status_last;
210	ifmr->ifm_active = priv->media_active_last |
211	    (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
212	    (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
213
214}
215
216static u32
217mlx5e_find_link_mode(u32 subtype)
218{
219	u32 i;
220	u32 link_mode = 0;
221
222	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
223		if (mlx5e_mode_table[i].baudrate == 0)
224			continue;
225		if (mlx5e_mode_table[i].subtype == subtype)
226			link_mode |= MLX5E_PROT_MASK(i);
227	}
228
229	return (link_mode);
230}
231
232static int
233mlx5e_media_change(struct ifnet *dev)
234{
235	struct mlx5e_priv *priv = dev->if_softc;
236	struct mlx5_core_dev *mdev = priv->mdev;
237	u32 eth_proto_cap;
238	u32 link_mode;
239	int was_opened;
240	int locked;
241	int error;
242
243	locked = PRIV_LOCKED(priv);
244	if (!locked)
245		PRIV_LOCK(priv);
246
247	if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
248		error = EINVAL;
249		goto done;
250	}
251	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
252
253	/* query supported capabilities */
254	error = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
255	if (error != 0) {
256		if_printf(dev, "Query port media capability failed\n");
257		goto done;
258	}
259	/* check for autoselect */
260	if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
261		link_mode = eth_proto_cap;
262		if (link_mode == 0) {
263			if_printf(dev, "Port media capability is zero\n");
264			error = EINVAL;
265			goto done;
266		}
267	} else {
268		link_mode = link_mode & eth_proto_cap;
269		if (link_mode == 0) {
270			if_printf(dev, "Not supported link mode requested\n");
271			error = EINVAL;
272			goto done;
273		}
274	}
275	/* update pauseframe control bits */
276	priv->params.rx_pauseframe_control =
277	    (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
278	priv->params.tx_pauseframe_control =
279	    (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
280
281	/* check if device is opened */
282	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
283
284	/* reconfigure the hardware */
285	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
286	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
287	mlx5_set_port_pause(mdev, 1,
288	    priv->params.rx_pauseframe_control,
289	    priv->params.tx_pauseframe_control);
290	if (was_opened)
291		mlx5_set_port_status(mdev, MLX5_PORT_UP);
292
293done:
294	if (!locked)
295		PRIV_UNLOCK(priv);
296	return (error);
297}
298
299static void
300mlx5e_update_carrier_work(struct work_struct *work)
301{
302	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
303	    update_carrier_work);
304
305	PRIV_LOCK(priv);
306	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
307		mlx5e_update_carrier(priv);
308	PRIV_UNLOCK(priv);
309}
310
311/*
312 * This function reads the physical port counters from the firmware
313 * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
314 * macros. The output is converted from big-endian 64-bit values into
315 * host endian ones and stored in the "priv->stats.pport" structure.
316 */
317static void
318mlx5e_update_pport_counters(struct mlx5e_priv *priv)
319{
320	struct mlx5_core_dev *mdev = priv->mdev;
321	struct mlx5e_pport_stats *s = &priv->stats.pport;
322	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
323	u32 *in;
324	u32 *out;
325	const u64 *ptr;
326	unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
327	unsigned x;
328	unsigned y;
329
330	/* allocate firmware request structures */
331	in = mlx5_vzalloc(sz);
332	out = mlx5_vzalloc(sz);
333	if (in == NULL || out == NULL)
334		goto free_out;
335
336	/*
337	 * Get pointer to the 64-bit counter set which is located at a
338	 * fixed offset in the output firmware request structure:
339	 */
340	ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
341
342	MLX5_SET(ppcnt_reg, in, local_port, 1);
343
344	/* read IEEE802_3 counter group using predefined counter layout */
345	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
346	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
347	for (x = y = 0; x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
348		s->arg[y] = be64toh(ptr[x]);
349
350	/* read RFC2819 counter group using predefined counter layout */
351	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
352	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
353	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
354		s->arg[y] = be64toh(ptr[x]);
355	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
356	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
357		s_debug->arg[y] = be64toh(ptr[x]);
358
359	/* read RFC2863 counter group using predefined counter layout */
360	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
361	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
362	for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
363		s_debug->arg[y] = be64toh(ptr[x]);
364
365	/* read physical layer stats counter group using predefined counter layout */
366	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
367	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
368	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
369		s_debug->arg[y] = be64toh(ptr[x]);
370free_out:
371	/* free firmware request structures */
372	kvfree(in);
373	kvfree(out);
374}
375
376/*
377 * This function is called regularly to collect all statistics
378 * counters from the firmware. The values can be viewed through the
379 * sysctl interface. Execution is serialized using the priv's global
380 * configuration lock.
381 */
382static void
383mlx5e_update_stats_work(struct work_struct *work)
384{
385	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
386	    update_stats_work);
387	struct mlx5_core_dev *mdev = priv->mdev;
388	struct mlx5e_vport_stats *s = &priv->stats.vport;
389	struct mlx5e_rq_stats *rq_stats;
390	struct mlx5e_sq_stats *sq_stats;
391	struct buf_ring *sq_br;
392#if (__FreeBSD_version < 1100000)
393	struct ifnet *ifp = priv->ifp;
394#endif
395
396	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
397	u32 *out;
398	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
399	u64 tso_packets = 0;
400	u64 tso_bytes = 0;
401	u64 tx_queue_dropped = 0;
402	u64 tx_defragged = 0;
403	u64 tx_offload_none = 0;
404	u64 lro_packets = 0;
405	u64 lro_bytes = 0;
406	u64 sw_lro_queued = 0;
407	u64 sw_lro_flushed = 0;
408	u64 rx_csum_none = 0;
409	u64 rx_wqe_err = 0;
410	u32 rx_out_of_buffer = 0;
411	int i;
412	int j;
413
414	PRIV_LOCK(priv);
415	out = mlx5_vzalloc(outlen);
416	if (out == NULL)
417		goto free_out;
418	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
419		goto free_out;
420
421	/* Collect firts the SW counters and then HW for consistency */
422	for (i = 0; i < priv->params.num_channels; i++) {
423		struct mlx5e_rq *rq = &priv->channel[i]->rq;
424
425		rq_stats = &priv->channel[i]->rq.stats;
426
427		/* collect stats from LRO */
428		rq_stats->sw_lro_queued = rq->lro.lro_queued;
429		rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
430		sw_lro_queued += rq_stats->sw_lro_queued;
431		sw_lro_flushed += rq_stats->sw_lro_flushed;
432		lro_packets += rq_stats->lro_packets;
433		lro_bytes += rq_stats->lro_bytes;
434		rx_csum_none += rq_stats->csum_none;
435		rx_wqe_err += rq_stats->wqe_err;
436
437		for (j = 0; j < priv->num_tc; j++) {
438			sq_stats = &priv->channel[i]->sq[j].stats;
439			sq_br = priv->channel[i]->sq[j].br;
440
441			tso_packets += sq_stats->tso_packets;
442			tso_bytes += sq_stats->tso_bytes;
443			tx_queue_dropped += sq_stats->dropped;
444			if (sq_br != NULL)
445				tx_queue_dropped += sq_br->br_drops;
446			tx_defragged += sq_stats->defragged;
447			tx_offload_none += sq_stats->csum_offload_none;
448		}
449	}
450
451	/* update counters */
452	s->tso_packets = tso_packets;
453	s->tso_bytes = tso_bytes;
454	s->tx_queue_dropped = tx_queue_dropped;
455	s->tx_defragged = tx_defragged;
456	s->lro_packets = lro_packets;
457	s->lro_bytes = lro_bytes;
458	s->sw_lro_queued = sw_lro_queued;
459	s->sw_lro_flushed = sw_lro_flushed;
460	s->rx_csum_none = rx_csum_none;
461	s->rx_wqe_err = rx_wqe_err;
462
463	/* HW counters */
464	memset(in, 0, sizeof(in));
465
466	MLX5_SET(query_vport_counter_in, in, opcode,
467	    MLX5_CMD_OP_QUERY_VPORT_COUNTER);
468	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
469	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
470
471	memset(out, 0, outlen);
472
473	/* get number of out-of-buffer drops first */
474	if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
475	    &rx_out_of_buffer))
476		goto free_out;
477
478	/* accumulate difference into a 64-bit counter */
479	s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev);
480	s->rx_out_of_buffer_prev = rx_out_of_buffer;
481
482	/* get port statistics */
483	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
484		goto free_out;
485
486#define	MLX5_GET_CTR(out, x) \
487	MLX5_GET64(query_vport_counter_out, out, x)
488
489	s->rx_error_packets =
490	    MLX5_GET_CTR(out, received_errors.packets);
491	s->rx_error_bytes =
492	    MLX5_GET_CTR(out, received_errors.octets);
493	s->tx_error_packets =
494	    MLX5_GET_CTR(out, transmit_errors.packets);
495	s->tx_error_bytes =
496	    MLX5_GET_CTR(out, transmit_errors.octets);
497
498	s->rx_unicast_packets =
499	    MLX5_GET_CTR(out, received_eth_unicast.packets);
500	s->rx_unicast_bytes =
501	    MLX5_GET_CTR(out, received_eth_unicast.octets);
502	s->tx_unicast_packets =
503	    MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
504	s->tx_unicast_bytes =
505	    MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
506
507	s->rx_multicast_packets =
508	    MLX5_GET_CTR(out, received_eth_multicast.packets);
509	s->rx_multicast_bytes =
510	    MLX5_GET_CTR(out, received_eth_multicast.octets);
511	s->tx_multicast_packets =
512	    MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
513	s->tx_multicast_bytes =
514	    MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
515
516	s->rx_broadcast_packets =
517	    MLX5_GET_CTR(out, received_eth_broadcast.packets);
518	s->rx_broadcast_bytes =
519	    MLX5_GET_CTR(out, received_eth_broadcast.octets);
520	s->tx_broadcast_packets =
521	    MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
522	s->tx_broadcast_bytes =
523	    MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
524
525	s->rx_packets =
526	    s->rx_unicast_packets +
527	    s->rx_multicast_packets +
528	    s->rx_broadcast_packets -
529	    s->rx_out_of_buffer;
530	s->rx_bytes =
531	    s->rx_unicast_bytes +
532	    s->rx_multicast_bytes +
533	    s->rx_broadcast_bytes;
534	s->tx_packets =
535	    s->tx_unicast_packets +
536	    s->tx_multicast_packets +
537	    s->tx_broadcast_packets;
538	s->tx_bytes =
539	    s->tx_unicast_bytes +
540	    s->tx_multicast_bytes +
541	    s->tx_broadcast_bytes;
542
543	/* Update calculated offload counters */
544	s->tx_csum_offload = s->tx_packets - tx_offload_none;
545	s->rx_csum_good = s->rx_packets - s->rx_csum_none;
546
547	/* Get physical port counters */
548	mlx5e_update_pport_counters(priv);
549
550#if (__FreeBSD_version < 1100000)
551	/* no get_counters interface in fbsd 10 */
552	ifp->if_ipackets = s->rx_packets;
553	ifp->if_ierrors = s->rx_error_packets +
554	    priv->stats.pport.alignment_err +
555	    priv->stats.pport.check_seq_err +
556	    priv->stats.pport.crc_align_errors +
557	    priv->stats.pport.in_range_len_errors +
558	    priv->stats.pport.jabbers +
559	    priv->stats.pport.out_of_range_len +
560	    priv->stats.pport.oversize_pkts +
561	    priv->stats.pport.symbol_err +
562	    priv->stats.pport.too_long_errors +
563	    priv->stats.pport.undersize_pkts +
564	    priv->stats.pport.unsupported_op_rx;
565	ifp->if_iqdrops = s->rx_out_of_buffer +
566	    priv->stats.pport.drop_events;
567	ifp->if_opackets = s->tx_packets;
568	ifp->if_oerrors = s->tx_error_packets;
569	ifp->if_snd.ifq_drops = s->tx_queue_dropped;
570	ifp->if_ibytes = s->rx_bytes;
571	ifp->if_obytes = s->tx_bytes;
572	ifp->if_collisions =
573	    priv->stats.pport.collisions;
574#endif
575
576free_out:
577	kvfree(out);
578
579	/* Update diagnostics, if any */
580	if (priv->params_ethtool.diag_pci_enable ||
581	    priv->params_ethtool.diag_general_enable) {
582		int error = mlx5_core_get_diagnostics_full(mdev,
583		    priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
584		    priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
585		if (error != 0)
586			if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error);
587	}
588	PRIV_UNLOCK(priv);
589}
590
591static void
592mlx5e_update_stats(void *arg)
593{
594	struct mlx5e_priv *priv = arg;
595
596	schedule_work(&priv->update_stats_work);
597
598	callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
599}
600
601static void
602mlx5e_async_event_sub(struct mlx5e_priv *priv,
603    enum mlx5_dev_event event)
604{
605	switch (event) {
606	case MLX5_DEV_EVENT_PORT_UP:
607	case MLX5_DEV_EVENT_PORT_DOWN:
608		schedule_work(&priv->update_carrier_work);
609		break;
610
611	default:
612		break;
613	}
614}
615
616static void
617mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
618    enum mlx5_dev_event event, unsigned long param)
619{
620	struct mlx5e_priv *priv = vpriv;
621
622	mtx_lock(&priv->async_events_mtx);
623	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
624		mlx5e_async_event_sub(priv, event);
625	mtx_unlock(&priv->async_events_mtx);
626}
627
628static void
629mlx5e_enable_async_events(struct mlx5e_priv *priv)
630{
631	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
632}
633
634static void
635mlx5e_disable_async_events(struct mlx5e_priv *priv)
636{
637	mtx_lock(&priv->async_events_mtx);
638	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
639	mtx_unlock(&priv->async_events_mtx);
640}
641
642static const char *mlx5e_rq_stats_desc[] = {
643	MLX5E_RQ_STATS(MLX5E_STATS_DESC)
644};
645
646static int
647mlx5e_create_rq(struct mlx5e_channel *c,
648    struct mlx5e_rq_param *param,
649    struct mlx5e_rq *rq)
650{
651	struct mlx5e_priv *priv = c->priv;
652	struct mlx5_core_dev *mdev = priv->mdev;
653	char buffer[16];
654	void *rqc = param->rqc;
655	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
656	int wq_sz;
657	int err;
658	int i;
659	u32 nsegs, wqe_sz;
660
661	err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
662	if (err != 0)
663		goto done;
664
665	/* Create DMA descriptor TAG */
666	if ((err = -bus_dma_tag_create(
667	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
668	    1,				/* any alignment */
669	    0,				/* no boundary */
670	    BUS_SPACE_MAXADDR,		/* lowaddr */
671	    BUS_SPACE_MAXADDR,		/* highaddr */
672	    NULL, NULL,			/* filter, filterarg */
673	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsize */
674	    nsegs,			/* nsegments */
675	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsegsize */
676	    0,				/* flags */
677	    NULL, NULL,			/* lockfunc, lockfuncarg */
678	    &rq->dma_tag)))
679		goto done;
680
681	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
682	    &rq->wq_ctrl);
683	if (err)
684		goto err_free_dma_tag;
685
686	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
687
688	err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
689	if (err != 0)
690		goto err_rq_wq_destroy;
691
692	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
693	rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
694	for (i = 0; i != wq_sz; i++) {
695		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
696#if (MLX5E_MAX_RX_SEGS == 1)
697		uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
698#else
699		int j;
700#endif
701
702		err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
703		if (err != 0) {
704			while (i--)
705				bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
706			goto err_rq_mbuf_free;
707		}
708
709		/* set value for constant fields */
710#if (MLX5E_MAX_RX_SEGS == 1)
711		wqe->data[0].lkey = c->mkey_be;
712		wqe->data[0].byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
713#else
714		for (j = 0; j < rq->nsegs; j++)
715			wqe->data[j].lkey = c->mkey_be;
716#endif
717	}
718
719	rq->ifp = c->ifp;
720	rq->channel = c;
721	rq->ix = c->ix;
722
723	snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
724	mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
725	    buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
726	    rq->stats.arg);
727
728#ifdef HAVE_TURBO_LRO
729	if (tcp_tlro_init(&rq->lro, c->ifp, MLX5E_BUDGET_MAX) != 0)
730		rq->lro.mbuf = NULL;
731#else
732	if (tcp_lro_init(&rq->lro))
733		rq->lro.lro_cnt = 0;
734	else
735		rq->lro.ifp = c->ifp;
736#endif
737	return (0);
738
739err_rq_mbuf_free:
740	free(rq->mbuf, M_MLX5EN);
741err_rq_wq_destroy:
742	mlx5_wq_destroy(&rq->wq_ctrl);
743err_free_dma_tag:
744	bus_dma_tag_destroy(rq->dma_tag);
745done:
746	return (err);
747}
748
749static void
750mlx5e_destroy_rq(struct mlx5e_rq *rq)
751{
752	int wq_sz;
753	int i;
754
755	/* destroy all sysctl nodes */
756	sysctl_ctx_free(&rq->stats.ctx);
757
758	/* free leftover LRO packets, if any */
759#ifdef HAVE_TURBO_LRO
760	tcp_tlro_free(&rq->lro);
761#else
762	tcp_lro_free(&rq->lro);
763#endif
764	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
765	for (i = 0; i != wq_sz; i++) {
766		if (rq->mbuf[i].mbuf != NULL) {
767			bus_dmamap_unload(rq->dma_tag,
768			    rq->mbuf[i].dma_map);
769			m_freem(rq->mbuf[i].mbuf);
770		}
771		bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
772	}
773	free(rq->mbuf, M_MLX5EN);
774	mlx5_wq_destroy(&rq->wq_ctrl);
775	bus_dma_tag_destroy(rq->dma_tag);
776}
777
778static int
779mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
780{
781	struct mlx5e_channel *c = rq->channel;
782	struct mlx5e_priv *priv = c->priv;
783	struct mlx5_core_dev *mdev = priv->mdev;
784
785	void *in;
786	void *rqc;
787	void *wq;
788	int inlen;
789	int err;
790
791	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
792	    sizeof(u64) * rq->wq_ctrl.buf.npages;
793	in = mlx5_vzalloc(inlen);
794	if (in == NULL)
795		return (-ENOMEM);
796
797	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
798	wq = MLX5_ADDR_OF(rqc, rqc, wq);
799
800	memcpy(rqc, param->rqc, sizeof(param->rqc));
801
802	MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
803	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
804	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
805	if (priv->counter_set_id >= 0)
806		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
807	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
808	    PAGE_SHIFT);
809	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
810
811	mlx5_fill_page_array(&rq->wq_ctrl.buf,
812	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
813
814	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
815
816	kvfree(in);
817
818	return (err);
819}
820
821static int
822mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
823{
824	struct mlx5e_channel *c = rq->channel;
825	struct mlx5e_priv *priv = c->priv;
826	struct mlx5_core_dev *mdev = priv->mdev;
827
828	void *in;
829	void *rqc;
830	int inlen;
831	int err;
832
833	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
834	in = mlx5_vzalloc(inlen);
835	if (in == NULL)
836		return (-ENOMEM);
837
838	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
839
840	MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
841	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
842	MLX5_SET(rqc, rqc, state, next_state);
843
844	err = mlx5_core_modify_rq(mdev, in, inlen);
845
846	kvfree(in);
847
848	return (err);
849}
850
851static void
852mlx5e_disable_rq(struct mlx5e_rq *rq)
853{
854	struct mlx5e_channel *c = rq->channel;
855	struct mlx5e_priv *priv = c->priv;
856	struct mlx5_core_dev *mdev = priv->mdev;
857
858	mlx5_core_destroy_rq(mdev, rq->rqn);
859}
860
861static int
862mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
863{
864	struct mlx5e_channel *c = rq->channel;
865	struct mlx5e_priv *priv = c->priv;
866	struct mlx5_wq_ll *wq = &rq->wq;
867	int i;
868
869	for (i = 0; i < 1000; i++) {
870		if (wq->cur_sz >= priv->params.min_rx_wqes)
871			return (0);
872
873		msleep(4);
874	}
875	return (-ETIMEDOUT);
876}
877
878static int
879mlx5e_open_rq(struct mlx5e_channel *c,
880    struct mlx5e_rq_param *param,
881    struct mlx5e_rq *rq)
882{
883	int err;
884
885	err = mlx5e_create_rq(c, param, rq);
886	if (err)
887		return (err);
888
889	err = mlx5e_enable_rq(rq, param);
890	if (err)
891		goto err_destroy_rq;
892
893	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
894	if (err)
895		goto err_disable_rq;
896
897	c->rq.enabled = 1;
898
899	return (0);
900
901err_disable_rq:
902	mlx5e_disable_rq(rq);
903err_destroy_rq:
904	mlx5e_destroy_rq(rq);
905
906	return (err);
907}
908
909static void
910mlx5e_close_rq(struct mlx5e_rq *rq)
911{
912	mtx_lock(&rq->mtx);
913	rq->enabled = 0;
914	callout_stop(&rq->watchdog);
915	mtx_unlock(&rq->mtx);
916
917	callout_drain(&rq->watchdog);
918
919	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
920}
921
922static void
923mlx5e_close_rq_wait(struct mlx5e_rq *rq)
924{
925	struct mlx5_core_dev *mdev = rq->channel->priv->mdev;
926
927	/* wait till RQ is empty */
928	while (!mlx5_wq_ll_is_empty(&rq->wq) &&
929		(mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
930		msleep(4);
931		rq->cq.mcq.comp(&rq->cq.mcq);
932	}
933
934	mlx5e_disable_rq(rq);
935	mlx5e_destroy_rq(rq);
936}
937
938void
939mlx5e_free_sq_db(struct mlx5e_sq *sq)
940{
941	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
942	int x;
943
944	for (x = 0; x != wq_sz; x++)
945		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
946	free(sq->mbuf, M_MLX5EN);
947}
948
949int
950mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
951{
952	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
953	int err;
954	int x;
955
956	sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
957
958	/* Create DMA descriptor MAPs */
959	for (x = 0; x != wq_sz; x++) {
960		err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
961		if (err != 0) {
962			while (x--)
963				bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
964			free(sq->mbuf, M_MLX5EN);
965			return (err);
966		}
967	}
968	return (0);
969}
970
971static const char *mlx5e_sq_stats_desc[] = {
972	MLX5E_SQ_STATS(MLX5E_STATS_DESC)
973};
974
975static int
976mlx5e_create_sq(struct mlx5e_channel *c,
977    int tc,
978    struct mlx5e_sq_param *param,
979    struct mlx5e_sq *sq)
980{
981	struct mlx5e_priv *priv = c->priv;
982	struct mlx5_core_dev *mdev = priv->mdev;
983	char buffer[16];
984
985	void *sqc = param->sqc;
986	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
987#ifdef RSS
988	cpuset_t cpu_mask;
989	int cpu_id;
990#endif
991	int err;
992
993	/* Create DMA descriptor TAG */
994	if ((err = -bus_dma_tag_create(
995	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
996	    1,				/* any alignment */
997	    0,				/* no boundary */
998	    BUS_SPACE_MAXADDR,		/* lowaddr */
999	    BUS_SPACE_MAXADDR,		/* highaddr */
1000	    NULL, NULL,			/* filter, filterarg */
1001	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
1002	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
1003	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
1004	    0,				/* flags */
1005	    NULL, NULL,			/* lockfunc, lockfuncarg */
1006	    &sq->dma_tag)))
1007		goto done;
1008
1009	err = mlx5_alloc_map_uar(mdev, &sq->uar);
1010	if (err)
1011		goto err_free_dma_tag;
1012
1013	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1014	    &sq->wq_ctrl);
1015	if (err)
1016		goto err_unmap_free_uar;
1017
1018	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1019	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1020
1021	err = mlx5e_alloc_sq_db(sq);
1022	if (err)
1023		goto err_sq_wq_destroy;
1024
1025	sq->mkey_be = c->mkey_be;
1026	sq->ifp = priv->ifp;
1027	sq->priv = priv;
1028	sq->tc = tc;
1029	sq->max_inline = priv->params.tx_max_inline;
1030	sq->min_inline_mode = priv->params.tx_min_inline_mode;
1031	sq->vlan_inline_cap = MLX5_CAP_ETH(mdev, wqe_vlan_insert);
1032
1033	/* check if we should allocate a second packet buffer */
1034	if (priv->params_ethtool.tx_bufring_disable == 0) {
1035		sq->br = buf_ring_alloc(MLX5E_SQ_TX_QUEUE_SIZE, M_MLX5EN,
1036		    M_WAITOK, &sq->lock);
1037		if (sq->br == NULL) {
1038			if_printf(c->ifp, "%s: Failed allocating sq drbr buffer\n",
1039			    __func__);
1040			err = -ENOMEM;
1041			goto err_free_sq_db;
1042		}
1043
1044		sq->sq_tq = taskqueue_create_fast("mlx5e_que", M_WAITOK,
1045		    taskqueue_thread_enqueue, &sq->sq_tq);
1046		if (sq->sq_tq == NULL) {
1047			if_printf(c->ifp, "%s: Failed allocating taskqueue\n",
1048			    __func__);
1049			err = -ENOMEM;
1050			goto err_free_drbr;
1051		}
1052
1053		TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
1054#ifdef RSS
1055		cpu_id = rss_getcpu(c->ix % rss_getnumbuckets());
1056		CPU_SETOF(cpu_id, &cpu_mask);
1057		taskqueue_start_threads_cpuset(&sq->sq_tq, 1, PI_NET, &cpu_mask,
1058		    "%s TX SQ%d.%d CPU%d", c->ifp->if_xname, c->ix, tc, cpu_id);
1059#else
1060		taskqueue_start_threads(&sq->sq_tq, 1, PI_NET,
1061		    "%s TX SQ%d.%d", c->ifp->if_xname, c->ix, tc);
1062#endif
1063	}
1064	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1065	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1066	    buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1067	    sq->stats.arg);
1068
1069	return (0);
1070
1071err_free_drbr:
1072	buf_ring_free(sq->br, M_MLX5EN);
1073err_free_sq_db:
1074	mlx5e_free_sq_db(sq);
1075err_sq_wq_destroy:
1076	mlx5_wq_destroy(&sq->wq_ctrl);
1077
1078err_unmap_free_uar:
1079	mlx5_unmap_free_uar(mdev, &sq->uar);
1080
1081err_free_dma_tag:
1082	bus_dma_tag_destroy(sq->dma_tag);
1083done:
1084	return (err);
1085}
1086
1087static void
1088mlx5e_destroy_sq(struct mlx5e_sq *sq)
1089{
1090	/* destroy all sysctl nodes */
1091	sysctl_ctx_free(&sq->stats.ctx);
1092
1093	mlx5e_free_sq_db(sq);
1094	mlx5_wq_destroy(&sq->wq_ctrl);
1095	mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1096	if (sq->sq_tq != NULL) {
1097		taskqueue_drain(sq->sq_tq, &sq->sq_task);
1098		taskqueue_free(sq->sq_tq);
1099	}
1100	if (sq->br != NULL)
1101		buf_ring_free(sq->br, M_MLX5EN);
1102	bus_dma_tag_destroy(sq->dma_tag);
1103}
1104
1105int
1106mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1107    int tis_num)
1108{
1109	void *in;
1110	void *sqc;
1111	void *wq;
1112	int inlen;
1113	int err;
1114
1115	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1116	    sizeof(u64) * sq->wq_ctrl.buf.npages;
1117	in = mlx5_vzalloc(inlen);
1118	if (in == NULL)
1119		return (-ENOMEM);
1120
1121	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1122	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1123
1124	memcpy(sqc, param->sqc, sizeof(param->sqc));
1125
1126	MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1127	MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1128	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1129	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1130	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1131
1132	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1133	MLX5_SET(wq, wq, uar_page, sq->uar.index);
1134	MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1135	    PAGE_SHIFT);
1136	MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1137
1138	mlx5_fill_page_array(&sq->wq_ctrl.buf,
1139	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1140
1141	err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1142
1143	kvfree(in);
1144
1145	return (err);
1146}
1147
1148int
1149mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1150{
1151	void *in;
1152	void *sqc;
1153	int inlen;
1154	int err;
1155
1156	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1157	in = mlx5_vzalloc(inlen);
1158	if (in == NULL)
1159		return (-ENOMEM);
1160
1161	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1162
1163	MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1164	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1165	MLX5_SET(sqc, sqc, state, next_state);
1166
1167	err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1168
1169	kvfree(in);
1170
1171	return (err);
1172}
1173
1174void
1175mlx5e_disable_sq(struct mlx5e_sq *sq)
1176{
1177
1178	mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1179}
1180
1181static int
1182mlx5e_open_sq(struct mlx5e_channel *c,
1183    int tc,
1184    struct mlx5e_sq_param *param,
1185    struct mlx5e_sq *sq)
1186{
1187	int err;
1188
1189	err = mlx5e_create_sq(c, tc, param, sq);
1190	if (err)
1191		return (err);
1192
1193	err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1194	if (err)
1195		goto err_destroy_sq;
1196
1197	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1198	if (err)
1199		goto err_disable_sq;
1200
1201	atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_READY);
1202
1203	return (0);
1204
1205err_disable_sq:
1206	mlx5e_disable_sq(sq);
1207err_destroy_sq:
1208	mlx5e_destroy_sq(sq);
1209
1210	return (err);
1211}
1212
1213static void
1214mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1215{
1216	/* fill up remainder with NOPs */
1217	while (sq->cev_counter != 0) {
1218		while (!mlx5e_sq_has_room_for(sq, 1)) {
1219			if (can_sleep != 0) {
1220				mtx_unlock(&sq->lock);
1221				msleep(4);
1222				mtx_lock(&sq->lock);
1223			} else {
1224				goto done;
1225			}
1226		}
1227		/* send a single NOP */
1228		mlx5e_send_nop(sq, 1);
1229		wmb();
1230	}
1231done:
1232	/* Check if we need to write the doorbell */
1233	if (likely(sq->doorbell.d64 != 0)) {
1234		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1235		sq->doorbell.d64 = 0;
1236	}
1237}
1238
1239void
1240mlx5e_sq_cev_timeout(void *arg)
1241{
1242	struct mlx5e_sq *sq = arg;
1243
1244	mtx_assert(&sq->lock, MA_OWNED);
1245
1246	/* check next state */
1247	switch (sq->cev_next_state) {
1248	case MLX5E_CEV_STATE_SEND_NOPS:
1249		/* fill TX ring with NOPs, if any */
1250		mlx5e_sq_send_nops_locked(sq, 0);
1251
1252		/* check if completed */
1253		if (sq->cev_counter == 0) {
1254			sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1255			return;
1256		}
1257		break;
1258	default:
1259		/* send NOPs on next timeout */
1260		sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1261		break;
1262	}
1263
1264	/* restart timer */
1265	callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1266}
1267
1268void
1269mlx5e_drain_sq(struct mlx5e_sq *sq)
1270{
1271	int error;
1272	struct mlx5_core_dev *mdev = sq->priv->mdev;
1273
1274	/*
1275	 * Check if already stopped.
1276	 *
1277	 * NOTE: The "stopped" variable is only written when both the
1278	 * priv's configuration lock and the SQ's lock is locked. It
1279	 * can therefore safely be read when only one of the two locks
1280	 * is locked. This function is always called when the priv's
1281	 * configuration lock is locked.
1282	 */
1283	if (sq->stopped != 0)
1284		return;
1285
1286	mtx_lock(&sq->lock);
1287
1288	/* don't put more packets into the SQ */
1289	sq->stopped = 1;
1290
1291	/* teardown event factor timer, if any */
1292	sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1293	callout_stop(&sq->cev_callout);
1294
1295	/* send dummy NOPs in order to flush the transmit ring */
1296	mlx5e_sq_send_nops_locked(sq, 1);
1297	mtx_unlock(&sq->lock);
1298
1299	/* make sure it is safe to free the callout */
1300	callout_drain(&sq->cev_callout);
1301
1302	/* wait till SQ is empty or link is down */
1303	mtx_lock(&sq->lock);
1304	while (sq->cc != sq->pc &&
1305	    (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1306	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1307		mtx_unlock(&sq->lock);
1308		msleep(1);
1309		sq->cq.mcq.comp(&sq->cq.mcq);
1310		mtx_lock(&sq->lock);
1311	}
1312	mtx_unlock(&sq->lock);
1313
1314	/* error out remaining requests */
1315	error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1316	if (error != 0) {
1317		if_printf(sq->ifp,
1318		    "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1319	}
1320
1321	/* wait till SQ is empty */
1322	mtx_lock(&sq->lock);
1323	while (sq->cc != sq->pc &&
1324	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1325		mtx_unlock(&sq->lock);
1326		msleep(1);
1327		sq->cq.mcq.comp(&sq->cq.mcq);
1328		mtx_lock(&sq->lock);
1329	}
1330	mtx_unlock(&sq->lock);
1331}
1332
1333static void
1334mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1335{
1336
1337	mlx5e_drain_sq(sq);
1338	mlx5e_disable_sq(sq);
1339	mlx5e_destroy_sq(sq);
1340}
1341
1342static int
1343mlx5e_create_cq(struct mlx5e_priv *priv,
1344    struct mlx5e_cq_param *param,
1345    struct mlx5e_cq *cq,
1346    mlx5e_cq_comp_t *comp,
1347    int eq_ix)
1348{
1349	struct mlx5_core_dev *mdev = priv->mdev;
1350	struct mlx5_core_cq *mcq = &cq->mcq;
1351	int eqn_not_used;
1352	int irqn;
1353	int err;
1354	u32 i;
1355
1356	param->wq.buf_numa_node = 0;
1357	param->wq.db_numa_node = 0;
1358
1359	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1360	    &cq->wq_ctrl);
1361	if (err)
1362		return (err);
1363
1364	mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1365
1366	mcq->cqe_sz = 64;
1367	mcq->set_ci_db = cq->wq_ctrl.db.db;
1368	mcq->arm_db = cq->wq_ctrl.db.db + 1;
1369	*mcq->set_ci_db = 0;
1370	*mcq->arm_db = 0;
1371	mcq->vector = eq_ix;
1372	mcq->comp = comp;
1373	mcq->event = mlx5e_cq_error_event;
1374	mcq->irqn = irqn;
1375	mcq->uar = &priv->cq_uar;
1376
1377	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1378		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1379
1380		cqe->op_own = 0xf1;
1381	}
1382
1383	cq->priv = priv;
1384
1385	return (0);
1386}
1387
1388static void
1389mlx5e_destroy_cq(struct mlx5e_cq *cq)
1390{
1391	mlx5_wq_destroy(&cq->wq_ctrl);
1392}
1393
1394static int
1395mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1396{
1397	struct mlx5_core_cq *mcq = &cq->mcq;
1398	void *in;
1399	void *cqc;
1400	int inlen;
1401	int irqn_not_used;
1402	int eqn;
1403	int err;
1404
1405	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1406	    sizeof(u64) * cq->wq_ctrl.buf.npages;
1407	in = mlx5_vzalloc(inlen);
1408	if (in == NULL)
1409		return (-ENOMEM);
1410
1411	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1412
1413	memcpy(cqc, param->cqc, sizeof(param->cqc));
1414
1415	mlx5_fill_page_array(&cq->wq_ctrl.buf,
1416	    (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1417
1418	mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1419
1420	MLX5_SET(cqc, cqc, c_eqn, eqn);
1421	MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1422	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1423	    PAGE_SHIFT);
1424	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1425
1426	err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
1427
1428	kvfree(in);
1429
1430	if (err)
1431		return (err);
1432
1433	mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
1434
1435	return (0);
1436}
1437
1438static void
1439mlx5e_disable_cq(struct mlx5e_cq *cq)
1440{
1441
1442	mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
1443}
1444
1445int
1446mlx5e_open_cq(struct mlx5e_priv *priv,
1447    struct mlx5e_cq_param *param,
1448    struct mlx5e_cq *cq,
1449    mlx5e_cq_comp_t *comp,
1450    int eq_ix)
1451{
1452	int err;
1453
1454	err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
1455	if (err)
1456		return (err);
1457
1458	err = mlx5e_enable_cq(cq, param, eq_ix);
1459	if (err)
1460		goto err_destroy_cq;
1461
1462	return (0);
1463
1464err_destroy_cq:
1465	mlx5e_destroy_cq(cq);
1466
1467	return (err);
1468}
1469
1470void
1471mlx5e_close_cq(struct mlx5e_cq *cq)
1472{
1473	mlx5e_disable_cq(cq);
1474	mlx5e_destroy_cq(cq);
1475}
1476
1477static int
1478mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1479    struct mlx5e_channel_param *cparam)
1480{
1481	int err;
1482	int tc;
1483
1484	for (tc = 0; tc < c->num_tc; tc++) {
1485		/* open completion queue */
1486		err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
1487		    &mlx5e_tx_cq_comp, c->ix);
1488		if (err)
1489			goto err_close_tx_cqs;
1490	}
1491	return (0);
1492
1493err_close_tx_cqs:
1494	for (tc--; tc >= 0; tc--)
1495		mlx5e_close_cq(&c->sq[tc].cq);
1496
1497	return (err);
1498}
1499
1500static void
1501mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1502{
1503	int tc;
1504
1505	for (tc = 0; tc < c->num_tc; tc++)
1506		mlx5e_close_cq(&c->sq[tc].cq);
1507}
1508
1509static int
1510mlx5e_open_sqs(struct mlx5e_channel *c,
1511    struct mlx5e_channel_param *cparam)
1512{
1513	int err;
1514	int tc;
1515
1516	for (tc = 0; tc < c->num_tc; tc++) {
1517		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1518		if (err)
1519			goto err_close_sqs;
1520	}
1521
1522	return (0);
1523
1524err_close_sqs:
1525	for (tc--; tc >= 0; tc--)
1526		mlx5e_close_sq_wait(&c->sq[tc]);
1527
1528	return (err);
1529}
1530
1531static void
1532mlx5e_close_sqs_wait(struct mlx5e_channel *c)
1533{
1534	int tc;
1535
1536	for (tc = 0; tc < c->num_tc; tc++)
1537		mlx5e_close_sq_wait(&c->sq[tc]);
1538}
1539
1540static void
1541mlx5e_chan_mtx_init(struct mlx5e_channel *c)
1542{
1543	int tc;
1544
1545	mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1546
1547	callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
1548
1549	for (tc = 0; tc < c->num_tc; tc++) {
1550		struct mlx5e_sq *sq = c->sq + tc;
1551
1552		mtx_init(&sq->lock, "mlx5tx",
1553		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1554		mtx_init(&sq->comp_lock, "mlx5comp",
1555		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1556
1557		callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
1558
1559		sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
1560
1561		/* ensure the TX completion event factor is not zero */
1562		if (sq->cev_factor == 0)
1563			sq->cev_factor = 1;
1564	}
1565}
1566
1567static void
1568mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
1569{
1570	int tc;
1571
1572	mtx_destroy(&c->rq.mtx);
1573
1574	for (tc = 0; tc < c->num_tc; tc++) {
1575		mtx_destroy(&c->sq[tc].lock);
1576		mtx_destroy(&c->sq[tc].comp_lock);
1577	}
1578}
1579
1580static int
1581mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1582    struct mlx5e_channel_param *cparam,
1583    struct mlx5e_channel *volatile *cp)
1584{
1585	struct mlx5e_channel *c;
1586	int err;
1587
1588	c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO);
1589	c->priv = priv;
1590	c->ix = ix;
1591	c->cpu = 0;
1592	c->ifp = priv->ifp;
1593	c->mkey_be = cpu_to_be32(priv->mr.key);
1594	c->num_tc = priv->num_tc;
1595
1596	/* init mutexes */
1597	mlx5e_chan_mtx_init(c);
1598
1599	/* open transmit completion queue */
1600	err = mlx5e_open_tx_cqs(c, cparam);
1601	if (err)
1602		goto err_free;
1603
1604	/* open receive completion queue */
1605	err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
1606	    &mlx5e_rx_cq_comp, c->ix);
1607	if (err)
1608		goto err_close_tx_cqs;
1609
1610	err = mlx5e_open_sqs(c, cparam);
1611	if (err)
1612		goto err_close_rx_cq;
1613
1614	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1615	if (err)
1616		goto err_close_sqs;
1617
1618	/* store channel pointer */
1619	*cp = c;
1620
1621	/* poll receive queue initially */
1622	c->rq.cq.mcq.comp(&c->rq.cq.mcq);
1623
1624	return (0);
1625
1626err_close_sqs:
1627	mlx5e_close_sqs_wait(c);
1628
1629err_close_rx_cq:
1630	mlx5e_close_cq(&c->rq.cq);
1631
1632err_close_tx_cqs:
1633	mlx5e_close_tx_cqs(c);
1634
1635err_free:
1636	/* destroy mutexes */
1637	mlx5e_chan_mtx_destroy(c);
1638	free(c, M_MLX5EN);
1639	return (err);
1640}
1641
1642static void
1643mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
1644{
1645	struct mlx5e_channel *c = *pp;
1646
1647	/* check if channel is already closed */
1648	if (c == NULL)
1649		return;
1650	mlx5e_close_rq(&c->rq);
1651}
1652
1653static void
1654mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
1655{
1656	struct mlx5e_channel *c = *pp;
1657
1658	/* check if channel is already closed */
1659	if (c == NULL)
1660		return;
1661	/* ensure channel pointer is no longer used */
1662	*pp = NULL;
1663
1664	mlx5e_close_rq_wait(&c->rq);
1665	mlx5e_close_sqs_wait(c);
1666	mlx5e_close_cq(&c->rq.cq);
1667	mlx5e_close_tx_cqs(c);
1668	/* destroy mutexes */
1669	mlx5e_chan_mtx_destroy(c);
1670	free(c, M_MLX5EN);
1671}
1672
1673static int
1674mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
1675{
1676	u32 r, n;
1677
1678	r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
1679	    MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
1680	if (r > MJUM16BYTES)
1681		return (-ENOMEM);
1682
1683	if (r > MJUM9BYTES)
1684		r = MJUM16BYTES;
1685	else if (r > MJUMPAGESIZE)
1686		r = MJUM9BYTES;
1687	else if (r > MCLBYTES)
1688		r = MJUMPAGESIZE;
1689	else
1690		r = MCLBYTES;
1691
1692	/*
1693	 * n + 1 must be a power of two, because stride size must be.
1694	 * Stride size is 16 * (n + 1), as the first segment is
1695	 * control.
1696	 */
1697	for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
1698		;
1699
1700	*wqe_sz = r;
1701	*nsegs = n;
1702	return (0);
1703}
1704
1705static void
1706mlx5e_build_rq_param(struct mlx5e_priv *priv,
1707    struct mlx5e_rq_param *param)
1708{
1709	void *rqc = param->rqc;
1710	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1711	u32 wqe_sz, nsegs;
1712
1713	mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
1714	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1715	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1716	MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
1717	    nsegs * sizeof(struct mlx5_wqe_data_seg)));
1718	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1719	MLX5_SET(wq, wq, pd, priv->pdn);
1720
1721	param->wq.buf_numa_node = 0;
1722	param->wq.db_numa_node = 0;
1723	param->wq.linear = 1;
1724}
1725
1726static void
1727mlx5e_build_sq_param(struct mlx5e_priv *priv,
1728    struct mlx5e_sq_param *param)
1729{
1730	void *sqc = param->sqc;
1731	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1732
1733	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1734	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1735	MLX5_SET(wq, wq, pd, priv->pdn);
1736
1737	param->wq.buf_numa_node = 0;
1738	param->wq.db_numa_node = 0;
1739	param->wq.linear = 1;
1740}
1741
1742static void
1743mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1744    struct mlx5e_cq_param *param)
1745{
1746	void *cqc = param->cqc;
1747
1748	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1749}
1750
1751static void
1752mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1753    struct mlx5e_cq_param *param)
1754{
1755	void *cqc = param->cqc;
1756
1757
1758	/*
1759	 * TODO The sysctl to control on/off is a bool value for now, which means
1760	 * we only support CSUM, once HASH is implemnted we'll need to address that.
1761	 */
1762	if (priv->params.cqe_zipping_en) {
1763		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1764		MLX5_SET(cqc, cqc, cqe_compression_en, 1);
1765	}
1766
1767	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1768	MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1769	MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1770
1771	switch (priv->params.rx_cq_moderation_mode) {
1772	case 0:
1773		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1774		break;
1775	default:
1776		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1777			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1778		else
1779			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1780		break;
1781	}
1782
1783	mlx5e_build_common_cq_param(priv, param);
1784}
1785
1786static void
1787mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1788    struct mlx5e_cq_param *param)
1789{
1790	void *cqc = param->cqc;
1791
1792	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1793	MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
1794	MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
1795
1796	switch (priv->params.tx_cq_moderation_mode) {
1797	case 0:
1798		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1799		break;
1800	default:
1801		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1802			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1803		else
1804			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1805		break;
1806	}
1807
1808	mlx5e_build_common_cq_param(priv, param);
1809}
1810
1811static void
1812mlx5e_build_channel_param(struct mlx5e_priv *priv,
1813    struct mlx5e_channel_param *cparam)
1814{
1815	memset(cparam, 0, sizeof(*cparam));
1816
1817	mlx5e_build_rq_param(priv, &cparam->rq);
1818	mlx5e_build_sq_param(priv, &cparam->sq);
1819	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1820	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1821}
1822
1823static int
1824mlx5e_open_channels(struct mlx5e_priv *priv)
1825{
1826	struct mlx5e_channel_param cparam;
1827	void *ptr;
1828	int err;
1829	int i;
1830	int j;
1831
1832	priv->channel = malloc(priv->params.num_channels *
1833	    sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO);
1834
1835	mlx5e_build_channel_param(priv, &cparam);
1836	for (i = 0; i < priv->params.num_channels; i++) {
1837		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1838		if (err)
1839			goto err_close_channels;
1840	}
1841
1842	for (j = 0; j < priv->params.num_channels; j++) {
1843		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1844		if (err)
1845			goto err_close_channels;
1846	}
1847
1848	return (0);
1849
1850err_close_channels:
1851	for (i--; i >= 0; i--) {
1852		mlx5e_close_channel(&priv->channel[i]);
1853		mlx5e_close_channel_wait(&priv->channel[i]);
1854	}
1855
1856	/* remove "volatile" attribute from "channel" pointer */
1857	ptr = __DECONST(void *, priv->channel);
1858	priv->channel = NULL;
1859
1860	free(ptr, M_MLX5EN);
1861
1862	return (err);
1863}
1864
1865static void
1866mlx5e_close_channels(struct mlx5e_priv *priv)
1867{
1868	void *ptr;
1869	int i;
1870
1871	if (priv->channel == NULL)
1872		return;
1873
1874	for (i = 0; i < priv->params.num_channels; i++)
1875		mlx5e_close_channel(&priv->channel[i]);
1876	for (i = 0; i < priv->params.num_channels; i++)
1877		mlx5e_close_channel_wait(&priv->channel[i]);
1878
1879	/* remove "volatile" attribute from "channel" pointer */
1880	ptr = __DECONST(void *, priv->channel);
1881	priv->channel = NULL;
1882
1883	free(ptr, M_MLX5EN);
1884}
1885
1886static int
1887mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
1888{
1889
1890	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1891		uint8_t cq_mode;
1892
1893		switch (priv->params.tx_cq_moderation_mode) {
1894		case 0:
1895			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1896			break;
1897		default:
1898			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1899			break;
1900		}
1901
1902		return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
1903		    priv->params.tx_cq_moderation_usec,
1904		    priv->params.tx_cq_moderation_pkts,
1905		    cq_mode));
1906	}
1907
1908	return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
1909	    priv->params.tx_cq_moderation_usec,
1910	    priv->params.tx_cq_moderation_pkts));
1911}
1912
1913static int
1914mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
1915{
1916
1917	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1918		uint8_t cq_mode;
1919		int retval;
1920
1921		switch (priv->params.rx_cq_moderation_mode) {
1922		case 0:
1923			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1924			break;
1925		default:
1926			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1927			break;
1928		}
1929
1930		retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
1931		    priv->params.rx_cq_moderation_usec,
1932		    priv->params.rx_cq_moderation_pkts,
1933		    cq_mode);
1934
1935		return (retval);
1936	}
1937
1938	return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
1939	    priv->params.rx_cq_moderation_usec,
1940	    priv->params.rx_cq_moderation_pkts));
1941}
1942
1943static int
1944mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1945{
1946	int err;
1947	int i;
1948
1949	if (c == NULL)
1950		return (EINVAL);
1951
1952	err = mlx5e_refresh_rq_params(priv, &c->rq);
1953	if (err)
1954		goto done;
1955
1956	for (i = 0; i != c->num_tc; i++) {
1957		err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
1958		if (err)
1959			goto done;
1960	}
1961done:
1962	return (err);
1963}
1964
1965int
1966mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
1967{
1968	int i;
1969
1970	if (priv->channel == NULL)
1971		return (EINVAL);
1972
1973	for (i = 0; i < priv->params.num_channels; i++) {
1974		int err;
1975
1976		err = mlx5e_refresh_channel_params_sub(priv, priv->channel[i]);
1977		if (err)
1978			return (err);
1979	}
1980	return (0);
1981}
1982
1983static int
1984mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
1985{
1986	struct mlx5_core_dev *mdev = priv->mdev;
1987	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1988	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1989
1990	memset(in, 0, sizeof(in));
1991
1992	MLX5_SET(tisc, tisc, prio, tc);
1993	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1994
1995	return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
1996}
1997
1998static void
1999mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
2000{
2001	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2002}
2003
2004static int
2005mlx5e_open_tises(struct mlx5e_priv *priv)
2006{
2007	int num_tc = priv->num_tc;
2008	int err;
2009	int tc;
2010
2011	for (tc = 0; tc < num_tc; tc++) {
2012		err = mlx5e_open_tis(priv, tc);
2013		if (err)
2014			goto err_close_tises;
2015	}
2016
2017	return (0);
2018
2019err_close_tises:
2020	for (tc--; tc >= 0; tc--)
2021		mlx5e_close_tis(priv, tc);
2022
2023	return (err);
2024}
2025
2026static void
2027mlx5e_close_tises(struct mlx5e_priv *priv)
2028{
2029	int num_tc = priv->num_tc;
2030	int tc;
2031
2032	for (tc = 0; tc < num_tc; tc++)
2033		mlx5e_close_tis(priv, tc);
2034}
2035
2036static int
2037mlx5e_open_rqt(struct mlx5e_priv *priv)
2038{
2039	struct mlx5_core_dev *mdev = priv->mdev;
2040	u32 *in;
2041	u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
2042	void *rqtc;
2043	int inlen;
2044	int err;
2045	int sz;
2046	int i;
2047
2048	sz = 1 << priv->params.rx_hash_log_tbl_sz;
2049
2050	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2051	in = mlx5_vzalloc(inlen);
2052	if (in == NULL)
2053		return (-ENOMEM);
2054	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2055
2056	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2057	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2058
2059	for (i = 0; i < sz; i++) {
2060		int ix = i;
2061#ifdef RSS
2062		ix = rss_get_indirection_to_bucket(ix);
2063#endif
2064		/* ensure we don't overflow */
2065		ix %= priv->params.num_channels;
2066
2067		/* apply receive side scaling stride, if any */
2068		ix -= ix % (int)priv->params.channels_rsss;
2069
2070		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
2071	}
2072
2073	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2074
2075	memset(out, 0, sizeof(out));
2076	err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
2077	if (!err)
2078		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2079
2080	kvfree(in);
2081
2082	return (err);
2083}
2084
2085static void
2086mlx5e_close_rqt(struct mlx5e_priv *priv)
2087{
2088	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
2089	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
2090
2091	memset(in, 0, sizeof(in));
2092
2093	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2094	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2095
2096	mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
2097	    sizeof(out));
2098}
2099
2100static void
2101mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2102{
2103	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2104	__be32 *hkey;
2105
2106	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2107
2108#define	ROUGH_MAX_L2_L3_HDR_SZ 256
2109
2110#define	MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2111			  MLX5_HASH_FIELD_SEL_DST_IP)
2112
2113#define	MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2114			  MLX5_HASH_FIELD_SEL_DST_IP   |\
2115			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2116			  MLX5_HASH_FIELD_SEL_L4_DPORT)
2117
2118#define	MLX5_HASH_IP_IPSEC_SPI	(MLX5_HASH_FIELD_SEL_SRC_IP   |\
2119				 MLX5_HASH_FIELD_SEL_DST_IP   |\
2120				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2121
2122	if (priv->params.hw_lro_en) {
2123		MLX5_SET(tirc, tirc, lro_enable_mask,
2124		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2125		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2126		MLX5_SET(tirc, tirc, lro_max_msg_sz,
2127		    (priv->params.lro_wqe_sz -
2128		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2129		/* TODO: add the option to choose timer value dynamically */
2130		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2131		    MLX5_CAP_ETH(priv->mdev,
2132		    lro_timer_supported_periods[2]));
2133	}
2134
2135	/* setup parameters for hashing TIR type, if any */
2136	switch (tt) {
2137	case MLX5E_TT_ANY:
2138		MLX5_SET(tirc, tirc, disp_type,
2139		    MLX5_TIRC_DISP_TYPE_DIRECT);
2140		MLX5_SET(tirc, tirc, inline_rqn,
2141		    priv->channel[0]->rq.rqn);
2142		break;
2143	default:
2144		MLX5_SET(tirc, tirc, disp_type,
2145		    MLX5_TIRC_DISP_TYPE_INDIRECT);
2146		MLX5_SET(tirc, tirc, indirect_table,
2147		    priv->rqtn);
2148		MLX5_SET(tirc, tirc, rx_hash_fn,
2149		    MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2150		hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2151#ifdef RSS
2152		/*
2153		 * The FreeBSD RSS implementation does currently not
2154		 * support symmetric Toeplitz hashes:
2155		 */
2156		MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2157		rss_getkey((uint8_t *)hkey);
2158#else
2159		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2160		hkey[0] = cpu_to_be32(0xD181C62C);
2161		hkey[1] = cpu_to_be32(0xF7F4DB5B);
2162		hkey[2] = cpu_to_be32(0x1983A2FC);
2163		hkey[3] = cpu_to_be32(0x943E1ADB);
2164		hkey[4] = cpu_to_be32(0xD9389E6B);
2165		hkey[5] = cpu_to_be32(0xD1039C2C);
2166		hkey[6] = cpu_to_be32(0xA74499AD);
2167		hkey[7] = cpu_to_be32(0x593D56D9);
2168		hkey[8] = cpu_to_be32(0xF3253C06);
2169		hkey[9] = cpu_to_be32(0x2ADC1FFC);
2170#endif
2171		break;
2172	}
2173
2174	switch (tt) {
2175	case MLX5E_TT_IPV4_TCP:
2176		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2177		    MLX5_L3_PROT_TYPE_IPV4);
2178		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2179		    MLX5_L4_PROT_TYPE_TCP);
2180#ifdef RSS
2181		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2182			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2183			    MLX5_HASH_IP);
2184		} else
2185#endif
2186		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2187		    MLX5_HASH_ALL);
2188		break;
2189
2190	case MLX5E_TT_IPV6_TCP:
2191		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2192		    MLX5_L3_PROT_TYPE_IPV6);
2193		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2194		    MLX5_L4_PROT_TYPE_TCP);
2195#ifdef RSS
2196		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2197			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2198			    MLX5_HASH_IP);
2199		} else
2200#endif
2201		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2202		    MLX5_HASH_ALL);
2203		break;
2204
2205	case MLX5E_TT_IPV4_UDP:
2206		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2207		    MLX5_L3_PROT_TYPE_IPV4);
2208		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2209		    MLX5_L4_PROT_TYPE_UDP);
2210#ifdef RSS
2211		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2212			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2213			    MLX5_HASH_IP);
2214		} else
2215#endif
2216		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2217		    MLX5_HASH_ALL);
2218		break;
2219
2220	case MLX5E_TT_IPV6_UDP:
2221		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2222		    MLX5_L3_PROT_TYPE_IPV6);
2223		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2224		    MLX5_L4_PROT_TYPE_UDP);
2225#ifdef RSS
2226		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2227			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2228			    MLX5_HASH_IP);
2229		} else
2230#endif
2231		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2232		    MLX5_HASH_ALL);
2233		break;
2234
2235	case MLX5E_TT_IPV4_IPSEC_AH:
2236		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2237		    MLX5_L3_PROT_TYPE_IPV4);
2238		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2239		    MLX5_HASH_IP_IPSEC_SPI);
2240		break;
2241
2242	case MLX5E_TT_IPV6_IPSEC_AH:
2243		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2244		    MLX5_L3_PROT_TYPE_IPV6);
2245		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2246		    MLX5_HASH_IP_IPSEC_SPI);
2247		break;
2248
2249	case MLX5E_TT_IPV4_IPSEC_ESP:
2250		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2251		    MLX5_L3_PROT_TYPE_IPV4);
2252		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2253		    MLX5_HASH_IP_IPSEC_SPI);
2254		break;
2255
2256	case MLX5E_TT_IPV6_IPSEC_ESP:
2257		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2258		    MLX5_L3_PROT_TYPE_IPV6);
2259		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2260		    MLX5_HASH_IP_IPSEC_SPI);
2261		break;
2262
2263	case MLX5E_TT_IPV4:
2264		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2265		    MLX5_L3_PROT_TYPE_IPV4);
2266		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2267		    MLX5_HASH_IP);
2268		break;
2269
2270	case MLX5E_TT_IPV6:
2271		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2272		    MLX5_L3_PROT_TYPE_IPV6);
2273		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2274		    MLX5_HASH_IP);
2275		break;
2276
2277	default:
2278		break;
2279	}
2280}
2281
2282static int
2283mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2284{
2285	struct mlx5_core_dev *mdev = priv->mdev;
2286	u32 *in;
2287	void *tirc;
2288	int inlen;
2289	int err;
2290
2291	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2292	in = mlx5_vzalloc(inlen);
2293	if (in == NULL)
2294		return (-ENOMEM);
2295	tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2296
2297	mlx5e_build_tir_ctx(priv, tirc, tt);
2298
2299	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2300
2301	kvfree(in);
2302
2303	return (err);
2304}
2305
2306static void
2307mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2308{
2309	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2310}
2311
2312static int
2313mlx5e_open_tirs(struct mlx5e_priv *priv)
2314{
2315	int err;
2316	int i;
2317
2318	for (i = 0; i < MLX5E_NUM_TT; i++) {
2319		err = mlx5e_open_tir(priv, i);
2320		if (err)
2321			goto err_close_tirs;
2322	}
2323
2324	return (0);
2325
2326err_close_tirs:
2327	for (i--; i >= 0; i--)
2328		mlx5e_close_tir(priv, i);
2329
2330	return (err);
2331}
2332
2333static void
2334mlx5e_close_tirs(struct mlx5e_priv *priv)
2335{
2336	int i;
2337
2338	for (i = 0; i < MLX5E_NUM_TT; i++)
2339		mlx5e_close_tir(priv, i);
2340}
2341
2342/*
2343 * SW MTU does not include headers,
2344 * HW MTU includes all headers and checksums.
2345 */
2346static int
2347mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2348{
2349	struct mlx5e_priv *priv = ifp->if_softc;
2350	struct mlx5_core_dev *mdev = priv->mdev;
2351	int hw_mtu;
2352	int err;
2353
2354	hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2355
2356	err = mlx5_set_port_mtu(mdev, hw_mtu);
2357	if (err) {
2358		if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
2359		    __func__, sw_mtu, err);
2360		return (err);
2361	}
2362
2363	/* Update vport context MTU */
2364	err = mlx5_set_vport_mtu(mdev, hw_mtu);
2365	if (err) {
2366		if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n",
2367		    __func__, err);
2368	}
2369
2370	ifp->if_mtu = sw_mtu;
2371
2372	err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2373	if (err || !hw_mtu) {
2374		/* fallback to port oper mtu */
2375		err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2376	}
2377	if (err) {
2378		if_printf(ifp, "Query port MTU, after setting new "
2379		    "MTU value, failed\n");
2380		return (err);
2381	} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2382		err = -E2BIG,
2383		if_printf(ifp, "Port MTU %d is smaller than "
2384                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2385	} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
2386		err = -EINVAL;
2387                if_printf(ifp, "Port MTU %d is bigger than "
2388                    "ifp mtu %d\n", hw_mtu, sw_mtu);
2389	}
2390	priv->params_ethtool.hw_mtu = hw_mtu;
2391
2392	return (err);
2393}
2394
2395int
2396mlx5e_open_locked(struct ifnet *ifp)
2397{
2398	struct mlx5e_priv *priv = ifp->if_softc;
2399	int err;
2400	u16 set_id;
2401
2402	/* check if already opened */
2403	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2404		return (0);
2405
2406#ifdef RSS
2407	if (rss_getnumbuckets() > priv->params.num_channels) {
2408		if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
2409		    "channels(%u) available\n", rss_getnumbuckets(),
2410		    priv->params.num_channels);
2411	}
2412#endif
2413	err = mlx5e_open_tises(priv);
2414	if (err) {
2415		if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
2416		    __func__, err);
2417		return (err);
2418	}
2419	err = mlx5_vport_alloc_q_counter(priv->mdev,
2420	    MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
2421	if (err) {
2422		if_printf(priv->ifp,
2423		    "%s: mlx5_vport_alloc_q_counter failed: %d\n",
2424		    __func__, err);
2425		goto err_close_tises;
2426	}
2427	/* store counter set ID */
2428	priv->counter_set_id = set_id;
2429
2430	err = mlx5e_open_channels(priv);
2431	if (err) {
2432		if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
2433		    __func__, err);
2434		goto err_dalloc_q_counter;
2435	}
2436	err = mlx5e_open_rqt(priv);
2437	if (err) {
2438		if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
2439		    __func__, err);
2440		goto err_close_channels;
2441	}
2442	err = mlx5e_open_tirs(priv);
2443	if (err) {
2444		if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
2445		    __func__, err);
2446		goto err_close_rqls;
2447	}
2448	err = mlx5e_open_flow_table(priv);
2449	if (err) {
2450		if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
2451		    __func__, err);
2452		goto err_close_tirs;
2453	}
2454	err = mlx5e_add_all_vlan_rules(priv);
2455	if (err) {
2456		if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
2457		    __func__, err);
2458		goto err_close_flow_table;
2459	}
2460	set_bit(MLX5E_STATE_OPENED, &priv->state);
2461
2462	mlx5e_update_carrier(priv);
2463	mlx5e_set_rx_mode_core(priv);
2464
2465	return (0);
2466
2467err_close_flow_table:
2468	mlx5e_close_flow_table(priv);
2469
2470err_close_tirs:
2471	mlx5e_close_tirs(priv);
2472
2473err_close_rqls:
2474	mlx5e_close_rqt(priv);
2475
2476err_close_channels:
2477	mlx5e_close_channels(priv);
2478
2479err_dalloc_q_counter:
2480	mlx5_vport_dealloc_q_counter(priv->mdev,
2481	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2482
2483err_close_tises:
2484	mlx5e_close_tises(priv);
2485
2486	return (err);
2487}
2488
2489static void
2490mlx5e_open(void *arg)
2491{
2492	struct mlx5e_priv *priv = arg;
2493
2494	PRIV_LOCK(priv);
2495	if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2496		if_printf(priv->ifp,
2497		    "%s: Setting port status to up failed\n",
2498		    __func__);
2499
2500	mlx5e_open_locked(priv->ifp);
2501	priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2502	PRIV_UNLOCK(priv);
2503}
2504
2505int
2506mlx5e_close_locked(struct ifnet *ifp)
2507{
2508	struct mlx5e_priv *priv = ifp->if_softc;
2509
2510	/* check if already closed */
2511	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2512		return (0);
2513
2514	clear_bit(MLX5E_STATE_OPENED, &priv->state);
2515
2516	mlx5e_set_rx_mode_core(priv);
2517	mlx5e_del_all_vlan_rules(priv);
2518	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2519	mlx5e_close_flow_table(priv);
2520	mlx5e_close_tirs(priv);
2521	mlx5e_close_rqt(priv);
2522	mlx5e_close_channels(priv);
2523	mlx5_vport_dealloc_q_counter(priv->mdev,
2524	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2525	mlx5e_close_tises(priv);
2526
2527	return (0);
2528}
2529
2530#if (__FreeBSD_version >= 1100000)
2531static uint64_t
2532mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
2533{
2534	struct mlx5e_priv *priv = ifp->if_softc;
2535	u64 retval;
2536
2537	/* PRIV_LOCK(priv); XXX not allowed */
2538	switch (cnt) {
2539	case IFCOUNTER_IPACKETS:
2540		retval = priv->stats.vport.rx_packets;
2541		break;
2542	case IFCOUNTER_IERRORS:
2543		retval = priv->stats.vport.rx_error_packets +
2544		    priv->stats.pport.alignment_err +
2545		    priv->stats.pport.check_seq_err +
2546		    priv->stats.pport.crc_align_errors +
2547		    priv->stats.pport.in_range_len_errors +
2548		    priv->stats.pport.jabbers +
2549		    priv->stats.pport.out_of_range_len +
2550		    priv->stats.pport.oversize_pkts +
2551		    priv->stats.pport.symbol_err +
2552		    priv->stats.pport.too_long_errors +
2553		    priv->stats.pport.undersize_pkts +
2554		    priv->stats.pport.unsupported_op_rx;
2555		break;
2556	case IFCOUNTER_IQDROPS:
2557		retval = priv->stats.vport.rx_out_of_buffer +
2558		    priv->stats.pport.drop_events;
2559		break;
2560	case IFCOUNTER_OPACKETS:
2561		retval = priv->stats.vport.tx_packets;
2562		break;
2563	case IFCOUNTER_OERRORS:
2564		retval = priv->stats.vport.tx_error_packets;
2565		break;
2566	case IFCOUNTER_IBYTES:
2567		retval = priv->stats.vport.rx_bytes;
2568		break;
2569	case IFCOUNTER_OBYTES:
2570		retval = priv->stats.vport.tx_bytes;
2571		break;
2572	case IFCOUNTER_IMCASTS:
2573		retval = priv->stats.vport.rx_multicast_packets;
2574		break;
2575	case IFCOUNTER_OMCASTS:
2576		retval = priv->stats.vport.tx_multicast_packets;
2577		break;
2578	case IFCOUNTER_OQDROPS:
2579		retval = priv->stats.vport.tx_queue_dropped;
2580		break;
2581	case IFCOUNTER_COLLISIONS:
2582		retval = priv->stats.pport.collisions;
2583		break;
2584	default:
2585		retval = if_get_counter_default(ifp, cnt);
2586		break;
2587	}
2588	/* PRIV_UNLOCK(priv); XXX not allowed */
2589	return (retval);
2590}
2591#endif
2592
2593static void
2594mlx5e_set_rx_mode(struct ifnet *ifp)
2595{
2596	struct mlx5e_priv *priv = ifp->if_softc;
2597
2598	schedule_work(&priv->set_rx_mode_work);
2599}
2600
2601static int
2602mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2603{
2604	struct mlx5e_priv *priv;
2605	struct ifreq *ifr;
2606	struct ifi2creq i2c;
2607	int error = 0;
2608	int mask = 0;
2609	int size_read = 0;
2610	int module_num;
2611	int max_mtu;
2612	uint8_t read_addr;
2613
2614	priv = ifp->if_softc;
2615
2616	/* check if detaching */
2617	if (priv == NULL || priv->gone != 0)
2618		return (ENXIO);
2619
2620	switch (command) {
2621	case SIOCSIFMTU:
2622		ifr = (struct ifreq *)data;
2623
2624		PRIV_LOCK(priv);
2625		mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
2626
2627		if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
2628		    ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
2629			int was_opened;
2630
2631			was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2632			if (was_opened)
2633				mlx5e_close_locked(ifp);
2634
2635			/* set new MTU */
2636			mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
2637
2638			if (was_opened)
2639				mlx5e_open_locked(ifp);
2640		} else {
2641			error = EINVAL;
2642			if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
2643			    MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
2644		}
2645		PRIV_UNLOCK(priv);
2646		break;
2647	case SIOCSIFFLAGS:
2648		if ((ifp->if_flags & IFF_UP) &&
2649		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2650			mlx5e_set_rx_mode(ifp);
2651			break;
2652		}
2653		PRIV_LOCK(priv);
2654		if (ifp->if_flags & IFF_UP) {
2655			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2656				if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2657					mlx5e_open_locked(ifp);
2658				ifp->if_drv_flags |= IFF_DRV_RUNNING;
2659				mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
2660			}
2661		} else {
2662			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2663				mlx5_set_port_status(priv->mdev,
2664				    MLX5_PORT_DOWN);
2665				if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2666					mlx5e_close_locked(ifp);
2667				mlx5e_update_carrier(priv);
2668				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2669			}
2670		}
2671		PRIV_UNLOCK(priv);
2672		break;
2673	case SIOCADDMULTI:
2674	case SIOCDELMULTI:
2675		mlx5e_set_rx_mode(ifp);
2676		break;
2677	case SIOCSIFMEDIA:
2678	case SIOCGIFMEDIA:
2679	case SIOCGIFXMEDIA:
2680		ifr = (struct ifreq *)data;
2681		error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
2682		break;
2683	case SIOCSIFCAP:
2684		ifr = (struct ifreq *)data;
2685		PRIV_LOCK(priv);
2686		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2687
2688		if (mask & IFCAP_TXCSUM) {
2689			ifp->if_capenable ^= IFCAP_TXCSUM;
2690			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2691
2692			if (IFCAP_TSO4 & ifp->if_capenable &&
2693			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2694				ifp->if_capenable &= ~IFCAP_TSO4;
2695				ifp->if_hwassist &= ~CSUM_IP_TSO;
2696				if_printf(ifp,
2697				    "tso4 disabled due to -txcsum.\n");
2698			}
2699		}
2700		if (mask & IFCAP_TXCSUM_IPV6) {
2701			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2702			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2703
2704			if (IFCAP_TSO6 & ifp->if_capenable &&
2705			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2706				ifp->if_capenable &= ~IFCAP_TSO6;
2707				ifp->if_hwassist &= ~CSUM_IP6_TSO;
2708				if_printf(ifp,
2709				    "tso6 disabled due to -txcsum6.\n");
2710			}
2711		}
2712		if (mask & IFCAP_RXCSUM)
2713			ifp->if_capenable ^= IFCAP_RXCSUM;
2714		if (mask & IFCAP_RXCSUM_IPV6)
2715			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2716		if (mask & IFCAP_TSO4) {
2717			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2718			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2719				if_printf(ifp, "enable txcsum first.\n");
2720				error = EAGAIN;
2721				goto out;
2722			}
2723			ifp->if_capenable ^= IFCAP_TSO4;
2724			ifp->if_hwassist ^= CSUM_IP_TSO;
2725		}
2726		if (mask & IFCAP_TSO6) {
2727			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2728			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2729				if_printf(ifp, "enable txcsum6 first.\n");
2730				error = EAGAIN;
2731				goto out;
2732			}
2733			ifp->if_capenable ^= IFCAP_TSO6;
2734			ifp->if_hwassist ^= CSUM_IP6_TSO;
2735		}
2736		if (mask & IFCAP_VLAN_HWFILTER) {
2737			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2738				mlx5e_disable_vlan_filter(priv);
2739			else
2740				mlx5e_enable_vlan_filter(priv);
2741
2742			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2743		}
2744		if (mask & IFCAP_VLAN_HWTAGGING)
2745			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2746		if (mask & IFCAP_WOL_MAGIC)
2747			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2748
2749		VLAN_CAPABILITIES(ifp);
2750		/* turn off LRO means also turn of HW LRO - if it's on */
2751		if (mask & IFCAP_LRO) {
2752			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2753			bool need_restart = false;
2754
2755			ifp->if_capenable ^= IFCAP_LRO;
2756
2757			/* figure out if updating HW LRO is needed */
2758			if (!(ifp->if_capenable & IFCAP_LRO)) {
2759				if (priv->params.hw_lro_en) {
2760					priv->params.hw_lro_en = false;
2761					need_restart = true;
2762				}
2763			} else {
2764				if (priv->params.hw_lro_en == false &&
2765				    priv->params_ethtool.hw_lro != 0) {
2766					priv->params.hw_lro_en = true;
2767					need_restart = true;
2768				}
2769			}
2770			if (was_opened && need_restart) {
2771				mlx5e_close_locked(ifp);
2772				mlx5e_open_locked(ifp);
2773			}
2774		}
2775out:
2776		PRIV_UNLOCK(priv);
2777		break;
2778
2779	case SIOCGI2C:
2780		ifr = (struct ifreq *)data;
2781
2782		/*
2783		 * Copy from the user-space address ifr_data to the
2784		 * kernel-space address i2c
2785		 */
2786		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
2787		if (error)
2788			break;
2789
2790		if (i2c.len > sizeof(i2c.data)) {
2791			error = EINVAL;
2792			break;
2793		}
2794
2795		PRIV_LOCK(priv);
2796		/* Get module_num which is required for the query_eeprom */
2797		error = mlx5_query_module_num(priv->mdev, &module_num);
2798		if (error) {
2799			if_printf(ifp, "Query module num failed, eeprom "
2800			    "reading is not supported\n");
2801			error = EINVAL;
2802			goto err_i2c;
2803		}
2804		/* Check if module is present before doing an access */
2805		if (mlx5_query_module_status(priv->mdev, module_num) !=
2806		    MLX5_MODULE_STATUS_PLUGGED) {
2807			error = EINVAL;
2808			goto err_i2c;
2809		}
2810		/*
2811		 * Currently 0XA0 and 0xA2 are the only addresses permitted.
2812		 * The internal conversion is as follows:
2813		 */
2814		if (i2c.dev_addr == 0xA0)
2815			read_addr = MLX5E_I2C_ADDR_LOW;
2816		else if (i2c.dev_addr == 0xA2)
2817			read_addr = MLX5E_I2C_ADDR_HIGH;
2818		else {
2819			if_printf(ifp, "Query eeprom failed, "
2820			    "Invalid Address: %X\n", i2c.dev_addr);
2821			error = EINVAL;
2822			goto err_i2c;
2823		}
2824		error = mlx5_query_eeprom(priv->mdev,
2825		    read_addr, MLX5E_EEPROM_LOW_PAGE,
2826		    (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
2827		    (uint32_t *)i2c.data, &size_read);
2828		if (error) {
2829			if_printf(ifp, "Query eeprom failed, eeprom "
2830			    "reading is not supported\n");
2831			error = EINVAL;
2832			goto err_i2c;
2833		}
2834
2835		if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
2836			error = mlx5_query_eeprom(priv->mdev,
2837			    read_addr, MLX5E_EEPROM_LOW_PAGE,
2838			    (uint32_t)(i2c.offset + size_read),
2839			    (uint32_t)(i2c.len - size_read), module_num,
2840			    (uint32_t *)(i2c.data + size_read), &size_read);
2841		}
2842		if (error) {
2843			if_printf(ifp, "Query eeprom failed, eeprom "
2844			    "reading is not supported\n");
2845			error = EINVAL;
2846			goto err_i2c;
2847		}
2848
2849		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2850err_i2c:
2851		PRIV_UNLOCK(priv);
2852		break;
2853
2854	default:
2855		error = ether_ioctl(ifp, command, data);
2856		break;
2857	}
2858	return (error);
2859}
2860
2861static int
2862mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2863{
2864	/*
2865	 * TODO: uncoment once FW really sets all these bits if
2866	 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
2867	 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
2868	 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
2869	 * -ENOTSUPP;
2870	 */
2871
2872	/* TODO: add more must-to-have features */
2873
2874	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2875		return (-ENODEV);
2876
2877	return (0);
2878}
2879
2880static u16
2881mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2882{
2883	const int min_size = ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN;
2884	const int max_size = MLX5E_MAX_TX_INLINE;
2885	const int bf_buf_size =
2886	    ((1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U) -
2887	    (sizeof(struct mlx5e_tx_wqe) - 2);
2888
2889	/* verify against driver limits */
2890	if (bf_buf_size > max_size)
2891		return (max_size);
2892	else if (bf_buf_size < min_size)
2893		return (min_size);
2894	else
2895		return (bf_buf_size);
2896}
2897
2898static void
2899mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
2900    struct mlx5e_priv *priv,
2901    int num_comp_vectors)
2902{
2903	/*
2904	 * TODO: Consider link speed for setting "log_sq_size",
2905	 * "log_rq_size" and "cq_moderation_xxx":
2906	 */
2907	priv->params.log_sq_size =
2908	    MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2909	priv->params.log_rq_size =
2910	    MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2911	priv->params.rx_cq_moderation_usec =
2912	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
2913	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
2914	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2915	priv->params.rx_cq_moderation_mode =
2916	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
2917	priv->params.rx_cq_moderation_pkts =
2918	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2919	priv->params.tx_cq_moderation_usec =
2920	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2921	priv->params.tx_cq_moderation_pkts =
2922	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
2923	priv->params.min_rx_wqes =
2924	    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
2925	priv->params.rx_hash_log_tbl_sz =
2926	    (order_base_2(num_comp_vectors) >
2927	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
2928	    order_base_2(num_comp_vectors) :
2929	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
2930	priv->params.num_tc = 1;
2931	priv->params.default_vlan_prio = 0;
2932	priv->counter_set_id = -1;
2933	priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
2934	mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
2935
2936	/*
2937	 * hw lro is currently defaulted to off. when it won't anymore we
2938	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
2939	 */
2940	priv->params.hw_lro_en = false;
2941	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2942
2943	priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression);
2944
2945	priv->mdev = mdev;
2946	priv->params.num_channels = num_comp_vectors;
2947	priv->params.channels_rsss = 1;
2948	priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
2949	priv->queue_mapping_channel_mask =
2950	    roundup_pow_of_two(num_comp_vectors) - 1;
2951	priv->num_tc = priv->params.num_tc;
2952	priv->default_vlan_prio = priv->params.default_vlan_prio;
2953
2954	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2955	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2956	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2957}
2958
2959static int
2960mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2961    struct mlx5_core_mr *mr)
2962{
2963	struct ifnet *ifp = priv->ifp;
2964	struct mlx5_core_dev *mdev = priv->mdev;
2965	struct mlx5_create_mkey_mbox_in *in;
2966	int err;
2967
2968	in = mlx5_vzalloc(sizeof(*in));
2969	if (in == NULL) {
2970		if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
2971		return (-ENOMEM);
2972	}
2973	in->seg.flags = MLX5_PERM_LOCAL_WRITE |
2974	    MLX5_PERM_LOCAL_READ |
2975	    MLX5_ACCESS_MODE_PA;
2976	in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
2977	in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2978
2979	err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
2980	    NULL);
2981	if (err)
2982		if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
2983		    __func__, err);
2984
2985	kvfree(in);
2986
2987	return (err);
2988}
2989
2990static const char *mlx5e_vport_stats_desc[] = {
2991	MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
2992};
2993
2994static const char *mlx5e_pport_stats_desc[] = {
2995	MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
2996};
2997
2998static void
2999mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
3000{
3001	mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
3002	sx_init(&priv->state_lock, "mlx5state");
3003	callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
3004	MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
3005}
3006
3007static void
3008mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
3009{
3010	mtx_destroy(&priv->async_events_mtx);
3011	sx_destroy(&priv->state_lock);
3012}
3013
3014static int
3015sysctl_firmware(SYSCTL_HANDLER_ARGS)
3016{
3017	/*
3018	 * %d.%d%.d the string format.
3019	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
3020	 * We need at most 5 chars to store that.
3021	 * It also has: two "." and NULL at the end, which means we need 18
3022	 * (5*3 + 3) chars at most.
3023	 */
3024	char fw[18];
3025	struct mlx5e_priv *priv = arg1;
3026	int error;
3027
3028	snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3029	    fw_rev_sub(priv->mdev));
3030	error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3031	return (error);
3032}
3033
3034u8
3035mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
3036{
3037	u8 min_inline_mode;
3038
3039	min_inline_mode = MLX5_INLINE_MODE_L2;
3040	mlx5_query_min_inline(mdev, &min_inline_mode);
3041	if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
3042	    !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
3043		min_inline_mode = MLX5_INLINE_MODE_L2;
3044
3045	return (min_inline_mode);
3046}
3047
3048static void
3049mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3050{
3051	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3052	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3053	    sysctl_firmware, "A", "HCA firmware version");
3054
3055	SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3056	    OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3057	    "Board ID");
3058}
3059
3060static void
3061mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3062{
3063#if (__FreeBSD_version < 1100000)
3064	char path[64];
3065
3066#endif
3067	/* Only receiving pauseframes is enabled by default */
3068	priv->params.tx_pauseframe_control = 0;
3069	priv->params.rx_pauseframe_control = 1;
3070
3071#if (__FreeBSD_version < 1100000)
3072	/* compute path for sysctl */
3073	snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3074	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3075
3076	/* try to fetch tunable, if any */
3077	TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3078
3079	/* compute path for sysctl */
3080	snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3081	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3082
3083	/* try to fetch tunable, if any */
3084	TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3085#endif
3086
3087	/* register pausframe SYSCTLs */
3088	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3089	    OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3090	    &priv->params.tx_pauseframe_control, 0,
3091	    "Set to enable TX pause frames. Clear to disable.");
3092
3093	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3094	    OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3095	    &priv->params.rx_pauseframe_control, 0,
3096	    "Set to enable RX pause frames. Clear to disable.");
3097
3098	/* range check */
3099	priv->params.tx_pauseframe_control =
3100	    priv->params.tx_pauseframe_control ? 1 : 0;
3101	priv->params.rx_pauseframe_control =
3102	    priv->params.rx_pauseframe_control ? 1 : 0;
3103
3104	/* update firmware */
3105	mlx5_set_port_pause(priv->mdev, 1,
3106	    priv->params.rx_pauseframe_control,
3107	    priv->params.tx_pauseframe_control);
3108}
3109
3110static void *
3111mlx5e_create_ifp(struct mlx5_core_dev *mdev)
3112{
3113	static volatile int mlx5_en_unit;
3114	struct ifnet *ifp;
3115	struct mlx5e_priv *priv;
3116	u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
3117	struct sysctl_oid_list *child;
3118	int ncv = mdev->priv.eq_table.num_comp_vectors;
3119	char unit[16];
3120	int err;
3121	int i;
3122	u32 eth_proto_cap;
3123
3124	if (mlx5e_check_required_hca_cap(mdev)) {
3125		mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
3126		return (NULL);
3127	}
3128	priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO);
3129	mlx5e_priv_mtx_init(priv);
3130
3131	ifp = priv->ifp = if_alloc(IFT_ETHER);
3132	if (ifp == NULL) {
3133		mlx5_core_err(mdev, "if_alloc() failed\n");
3134		goto err_free_priv;
3135	}
3136	ifp->if_softc = priv;
3137	if_initname(ifp, "mce", atomic_fetchadd_int(&mlx5_en_unit, 1));
3138	ifp->if_mtu = ETHERMTU;
3139	ifp->if_init = mlx5e_open;
3140	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3141	ifp->if_ioctl = mlx5e_ioctl;
3142	ifp->if_transmit = mlx5e_xmit;
3143	ifp->if_qflush = if_qflush;
3144#if (__FreeBSD_version >= 1100000)
3145	ifp->if_get_counter = mlx5e_get_counter;
3146#endif
3147	ifp->if_snd.ifq_maxlen = ifqmaxlen;
3148	/*
3149         * Set driver features
3150         */
3151	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
3152	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
3153	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
3154	ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
3155	ifp->if_capabilities |= IFCAP_LRO;
3156	ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
3157	ifp->if_capabilities |= IFCAP_HWSTATS;
3158
3159	/* set TSO limits so that we don't have to drop TX packets */
3160	ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
3161	ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
3162	ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
3163
3164	ifp->if_capenable = ifp->if_capabilities;
3165	ifp->if_hwassist = 0;
3166	if (ifp->if_capenable & IFCAP_TSO)
3167		ifp->if_hwassist |= CSUM_TSO;
3168	if (ifp->if_capenable & IFCAP_TXCSUM)
3169		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3170	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3171		ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3172
3173	/* ifnet sysctl tree */
3174	sysctl_ctx_init(&priv->sysctl_ctx);
3175	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
3176	    OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
3177	if (priv->sysctl_ifnet == NULL) {
3178		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3179		goto err_free_sysctl;
3180	}
3181	snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
3182	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3183	    OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
3184	if (priv->sysctl_ifnet == NULL) {
3185		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3186		goto err_free_sysctl;
3187	}
3188
3189	/* HW sysctl tree */
3190	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
3191	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
3192	    OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
3193	if (priv->sysctl_hw == NULL) {
3194		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3195		goto err_free_sysctl;
3196	}
3197	mlx5e_build_ifp_priv(mdev, priv, ncv);
3198	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
3199	if (err) {
3200		if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
3201		    __func__, err);
3202		goto err_free_sysctl;
3203	}
3204	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
3205	if (err) {
3206		if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
3207		    __func__, err);
3208		goto err_unmap_free_uar;
3209	}
3210	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
3211	if (err) {
3212		if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
3213		    __func__, err);
3214		goto err_dealloc_pd;
3215	}
3216	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
3217	if (err) {
3218		if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
3219		    __func__, err);
3220		goto err_dealloc_transport_domain;
3221	}
3222	mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
3223
3224	/* check if we should generate a random MAC address */
3225	if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
3226	    is_zero_ether_addr(dev_addr)) {
3227		random_ether_addr(dev_addr);
3228		if_printf(ifp, "Assigned random MAC address\n");
3229	}
3230
3231	/* set default MTU */
3232	mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
3233
3234	/* Set desc */
3235	device_set_desc(mdev->pdev->dev.bsddev, mlx5e_version);
3236
3237	/* Set default media status */
3238	priv->media_status_last = IFM_AVALID;
3239	priv->media_active_last = IFM_ETHER | IFM_AUTO |
3240	    IFM_ETH_RXPAUSE | IFM_FDX;
3241
3242	/* setup default pauseframes configuration */
3243	mlx5e_setup_pauseframes(priv);
3244
3245	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
3246	if (err) {
3247		eth_proto_cap = 0;
3248		if_printf(ifp, "%s: Query port media capability failed, %d\n",
3249		    __func__, err);
3250	}
3251
3252	/* Setup supported medias */
3253	ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
3254	    mlx5e_media_change, mlx5e_media_status);
3255
3256	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
3257		if (mlx5e_mode_table[i].baudrate == 0)
3258			continue;
3259		if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
3260			ifmedia_add(&priv->media,
3261			    mlx5e_mode_table[i].subtype |
3262			    IFM_ETHER, 0, NULL);
3263			ifmedia_add(&priv->media,
3264			    mlx5e_mode_table[i].subtype |
3265			    IFM_ETHER | IFM_FDX |
3266			    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3267		}
3268	}
3269
3270	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3271	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3272	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3273
3274	/* Set autoselect by default */
3275	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3276	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
3277	ether_ifattach(ifp, dev_addr);
3278
3279	/* Register for VLAN events */
3280	priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
3281	    mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
3282	priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
3283	    mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
3284
3285	/* Link is down by default */
3286	if_link_state_change(ifp, LINK_STATE_DOWN);
3287
3288	mlx5e_enable_async_events(priv);
3289
3290	mlx5e_add_hw_stats(priv);
3291
3292	mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3293	    "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
3294	    priv->stats.vport.arg);
3295
3296	mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3297	    "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
3298	    priv->stats.pport.arg);
3299
3300	mlx5e_create_ethtool(priv);
3301
3302	mtx_lock(&priv->async_events_mtx);
3303	mlx5e_update_stats(priv);
3304	mtx_unlock(&priv->async_events_mtx);
3305
3306	return (priv);
3307
3308err_dealloc_transport_domain:
3309	mlx5_dealloc_transport_domain(mdev, priv->tdn);
3310
3311err_dealloc_pd:
3312	mlx5_core_dealloc_pd(mdev, priv->pdn);
3313
3314err_unmap_free_uar:
3315	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3316
3317err_free_sysctl:
3318	sysctl_ctx_free(&priv->sysctl_ctx);
3319
3320	if_free(ifp);
3321
3322err_free_priv:
3323	mlx5e_priv_mtx_destroy(priv);
3324	free(priv, M_MLX5EN);
3325	return (NULL);
3326}
3327
3328static void
3329mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
3330{
3331	struct mlx5e_priv *priv = vpriv;
3332	struct ifnet *ifp = priv->ifp;
3333
3334	/* don't allow more IOCTLs */
3335	priv->gone = 1;
3336
3337	/*
3338	 * Clear the device description to avoid use after free,
3339	 * because the bsddev is not destroyed when this module is
3340	 * unloaded:
3341	 */
3342	device_set_desc(mdev->pdev->dev.bsddev, NULL);
3343
3344	/* XXX wait a bit to allow IOCTL handlers to complete */
3345	pause("W", hz);
3346
3347	/* stop watchdog timer */
3348	callout_drain(&priv->watchdog);
3349
3350	if (priv->vlan_attach != NULL)
3351		EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
3352	if (priv->vlan_detach != NULL)
3353		EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
3354
3355	/* make sure device gets closed */
3356	PRIV_LOCK(priv);
3357	mlx5e_close_locked(ifp);
3358	PRIV_UNLOCK(priv);
3359
3360	/* unregister device */
3361	ifmedia_removeall(&priv->media);
3362	ether_ifdetach(ifp);
3363	if_free(ifp);
3364
3365	/* destroy all remaining sysctl nodes */
3366	if (priv->sysctl_debug)
3367		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3368	sysctl_ctx_free(&priv->stats.vport.ctx);
3369	sysctl_ctx_free(&priv->stats.pport.ctx);
3370	sysctl_ctx_free(&priv->sysctl_ctx);
3371
3372	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3373	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
3374	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3375	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3376	mlx5e_disable_async_events(priv);
3377	flush_scheduled_work();
3378	mlx5e_priv_mtx_destroy(priv);
3379	free(priv, M_MLX5EN);
3380}
3381
3382static void *
3383mlx5e_get_ifp(void *vpriv)
3384{
3385	struct mlx5e_priv *priv = vpriv;
3386
3387	return (priv->ifp);
3388}
3389
3390static struct mlx5_interface mlx5e_interface = {
3391	.add = mlx5e_create_ifp,
3392	.remove = mlx5e_destroy_ifp,
3393	.event = mlx5e_async_event,
3394	.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3395	.get_dev = mlx5e_get_ifp,
3396};
3397
3398void
3399mlx5e_init(void)
3400{
3401	mlx5_register_interface(&mlx5e_interface);
3402}
3403
3404void
3405mlx5e_cleanup(void)
3406{
3407	mlx5_unregister_interface(&mlx5e_interface);
3408}
3409
3410module_init_order(mlx5e_init, SI_ORDER_THIRD);
3411module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
3412
3413#if (__FreeBSD_version >= 1100000)
3414MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
3415#endif
3416MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
3417MODULE_VERSION(mlx5en, 1);
3418