mlx5_en_main.c revision 353240
1/*-
2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 353240 2019-10-07 09:46:33Z hselasky $
26 */
27
28#include "en.h"
29
30#include <sys/sockio.h>
31#include <machine/atomic.h>
32
33#ifndef ETH_DRIVER_VERSION
34#define	ETH_DRIVER_VERSION	"3.5.1"
35#endif
36#define DRIVER_RELDATE	"April 2019"
37
38static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver "
39	ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
40
41static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
42
43struct mlx5e_channel_param {
44	struct mlx5e_rq_param rq;
45	struct mlx5e_sq_param sq;
46	struct mlx5e_cq_param rx_cq;
47	struct mlx5e_cq_param tx_cq;
48};
49
50struct media {
51	u32	subtype;
52	u64	baudrate;
53};
54
55static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = {
56
57	[MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = {
58		.subtype = IFM_1000_CX_SGMII,
59		.baudrate = IF_Mbps(1000ULL),
60	},
61	[MLX5E_1000BASE_KX][MLX5E_KX] = {
62		.subtype = IFM_1000_KX,
63		.baudrate = IF_Mbps(1000ULL),
64	},
65	[MLX5E_10GBASE_CX4][MLX5E_CX4] = {
66		.subtype = IFM_10G_CX4,
67		.baudrate = IF_Gbps(10ULL),
68	},
69	[MLX5E_10GBASE_KX4][MLX5E_KX4] = {
70		.subtype = IFM_10G_KX4,
71		.baudrate = IF_Gbps(10ULL),
72	},
73	[MLX5E_10GBASE_KR][MLX5E_KR] = {
74		.subtype = IFM_10G_KR,
75		.baudrate = IF_Gbps(10ULL),
76	},
77	[MLX5E_20GBASE_KR2][MLX5E_KR2] = {
78		.subtype = IFM_20G_KR2,
79		.baudrate = IF_Gbps(20ULL),
80	},
81	[MLX5E_40GBASE_CR4][MLX5E_CR4] = {
82		.subtype = IFM_40G_CR4,
83		.baudrate = IF_Gbps(40ULL),
84	},
85	[MLX5E_40GBASE_KR4][MLX5E_KR4] = {
86		.subtype = IFM_40G_KR4,
87		.baudrate = IF_Gbps(40ULL),
88	},
89	[MLX5E_56GBASE_R4][MLX5E_R] = {
90		.subtype = IFM_56G_R4,
91		.baudrate = IF_Gbps(56ULL),
92	},
93	[MLX5E_10GBASE_CR][MLX5E_CR1] = {
94		.subtype = IFM_10G_CR1,
95		.baudrate = IF_Gbps(10ULL),
96	},
97	[MLX5E_10GBASE_SR][MLX5E_SR] = {
98		.subtype = IFM_10G_SR,
99		.baudrate = IF_Gbps(10ULL),
100	},
101	[MLX5E_10GBASE_ER_LR][MLX5E_ER] = {
102		.subtype = IFM_10G_ER,
103		.baudrate = IF_Gbps(10ULL),
104	},
105	[MLX5E_10GBASE_ER_LR][MLX5E_LR] = {
106		.subtype = IFM_10G_LR,
107		.baudrate = IF_Gbps(10ULL),
108	},
109	[MLX5E_40GBASE_SR4][MLX5E_SR4] = {
110		.subtype = IFM_40G_SR4,
111		.baudrate = IF_Gbps(40ULL),
112	},
113	[MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = {
114		.subtype = IFM_40G_LR4,
115		.baudrate = IF_Gbps(40ULL),
116	},
117	[MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = {
118		.subtype = IFM_40G_ER4,
119		.baudrate = IF_Gbps(40ULL),
120	},
121	[MLX5E_100GBASE_CR4][MLX5E_CR4] = {
122		.subtype = IFM_100G_CR4,
123		.baudrate = IF_Gbps(100ULL),
124	},
125	[MLX5E_100GBASE_SR4][MLX5E_SR4] = {
126		.subtype = IFM_100G_SR4,
127		.baudrate = IF_Gbps(100ULL),
128	},
129	[MLX5E_100GBASE_KR4][MLX5E_KR4] = {
130		.subtype = IFM_100G_KR4,
131		.baudrate = IF_Gbps(100ULL),
132	},
133	[MLX5E_100GBASE_LR4][MLX5E_LR4] = {
134		.subtype = IFM_100G_LR4,
135		.baudrate = IF_Gbps(100ULL),
136	},
137	[MLX5E_100BASE_TX][MLX5E_TX] = {
138		.subtype = IFM_100_TX,
139		.baudrate = IF_Mbps(100ULL),
140	},
141	[MLX5E_1000BASE_T][MLX5E_T] = {
142		.subtype = IFM_1000_T,
143		.baudrate = IF_Mbps(1000ULL),
144	},
145	[MLX5E_10GBASE_T][MLX5E_T] = {
146		.subtype = IFM_10G_T,
147		.baudrate = IF_Gbps(10ULL),
148	},
149	[MLX5E_25GBASE_CR][MLX5E_CR] = {
150		.subtype = IFM_25G_CR,
151		.baudrate = IF_Gbps(25ULL),
152	},
153	[MLX5E_25GBASE_KR][MLX5E_KR] = {
154		.subtype = IFM_25G_KR,
155		.baudrate = IF_Gbps(25ULL),
156	},
157	[MLX5E_25GBASE_SR][MLX5E_SR] = {
158		.subtype = IFM_25G_SR,
159		.baudrate = IF_Gbps(25ULL),
160	},
161	[MLX5E_50GBASE_CR2][MLX5E_CR2] = {
162		.subtype = IFM_50G_CR2,
163		.baudrate = IF_Gbps(50ULL),
164	},
165	[MLX5E_50GBASE_KR2][MLX5E_KR2] = {
166		.subtype = IFM_50G_KR2,
167		.baudrate = IF_Gbps(50ULL),
168	},
169};
170
171static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = {
172	[MLX5E_SGMII_100M][MLX5E_SGMII] = {
173		.subtype = IFM_100_SGMII,
174		.baudrate = IF_Mbps(100),
175	},
176	[MLX5E_1000BASE_X_SGMII][MLX5E_KX] = {
177		.subtype = IFM_1000_KX,
178		.baudrate = IF_Mbps(1000),
179	},
180	[MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = {
181		.subtype = IFM_1000_CX_SGMII,
182		.baudrate = IF_Mbps(1000),
183	},
184	[MLX5E_1000BASE_X_SGMII][MLX5E_CX] = {
185		.subtype = IFM_1000_CX,
186		.baudrate = IF_Mbps(1000),
187	},
188	[MLX5E_1000BASE_X_SGMII][MLX5E_LX] = {
189		.subtype = IFM_1000_LX,
190		.baudrate = IF_Mbps(1000),
191	},
192	[MLX5E_1000BASE_X_SGMII][MLX5E_SX] = {
193		.subtype = IFM_1000_SX,
194		.baudrate = IF_Mbps(1000),
195	},
196	[MLX5E_1000BASE_X_SGMII][MLX5E_T] = {
197		.subtype = IFM_1000_T,
198		.baudrate = IF_Mbps(1000),
199	},
200	[MLX5E_5GBASE_R][MLX5E_T] = {
201		.subtype = IFM_5000_T,
202		.baudrate = IF_Mbps(5000),
203	},
204	[MLX5E_5GBASE_R][MLX5E_KR] = {
205		.subtype = IFM_5000_KR,
206		.baudrate = IF_Mbps(5000),
207	},
208	[MLX5E_5GBASE_R][MLX5E_KR1] = {
209		.subtype = IFM_5000_KR1,
210		.baudrate = IF_Mbps(5000),
211	},
212	[MLX5E_5GBASE_R][MLX5E_KR_S] = {
213		.subtype = IFM_5000_KR_S,
214		.baudrate = IF_Mbps(5000),
215	},
216	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = {
217		.subtype = IFM_10G_ER,
218		.baudrate = IF_Gbps(10ULL),
219	},
220	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = {
221		.subtype = IFM_10G_KR,
222		.baudrate = IF_Gbps(10ULL),
223	},
224	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = {
225		.subtype = IFM_10G_LR,
226		.baudrate = IF_Gbps(10ULL),
227	},
228	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = {
229		.subtype = IFM_10G_SR,
230		.baudrate = IF_Gbps(10ULL),
231	},
232	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = {
233		.subtype = IFM_10G_T,
234		.baudrate = IF_Gbps(10ULL),
235	},
236	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = {
237		.subtype = IFM_10G_AOC,
238		.baudrate = IF_Gbps(10ULL),
239	},
240	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = {
241		.subtype = IFM_10G_CR1,
242		.baudrate = IF_Gbps(10ULL),
243	},
244	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = {
245		.subtype = IFM_40G_CR4,
246		.baudrate = IF_Gbps(40ULL),
247	},
248	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = {
249		.subtype = IFM_40G_KR4,
250		.baudrate = IF_Gbps(40ULL),
251	},
252	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = {
253		.subtype = IFM_40G_LR4,
254		.baudrate = IF_Gbps(40ULL),
255	},
256	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = {
257		.subtype = IFM_40G_SR4,
258		.baudrate = IF_Gbps(40ULL),
259	},
260	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = {
261		.subtype = IFM_40G_ER4,
262		.baudrate = IF_Gbps(40ULL),
263	},
264
265	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = {
266		.subtype = IFM_25G_CR,
267		.baudrate = IF_Gbps(25ULL),
268	},
269	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = {
270		.subtype = IFM_25G_KR,
271		.baudrate = IF_Gbps(25ULL),
272	},
273	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = {
274		.subtype = IFM_25G_SR,
275		.baudrate = IF_Gbps(25ULL),
276	},
277	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = {
278		.subtype = IFM_25G_ACC,
279		.baudrate = IF_Gbps(25ULL),
280	},
281	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = {
282		.subtype = IFM_25G_AOC,
283		.baudrate = IF_Gbps(25ULL),
284	},
285	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = {
286		.subtype = IFM_25G_CR1,
287		.baudrate = IF_Gbps(25ULL),
288	},
289	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = {
290		.subtype = IFM_25G_CR_S,
291		.baudrate = IF_Gbps(25ULL),
292	},
293	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = {
294		.subtype = IFM_5000_KR1,
295		.baudrate = IF_Gbps(25ULL),
296	},
297	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = {
298		.subtype = IFM_25G_KR_S,
299		.baudrate = IF_Gbps(25ULL),
300	},
301	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = {
302		.subtype = IFM_25G_LR,
303		.baudrate = IF_Gbps(25ULL),
304	},
305	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = {
306		.subtype = IFM_25G_T,
307		.baudrate = IF_Gbps(25ULL),
308	},
309	[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = {
310		.subtype = IFM_50G_CR2,
311		.baudrate = IF_Gbps(50ULL),
312	},
313	[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = {
314		.subtype = IFM_50G_KR2,
315		.baudrate = IF_Gbps(50ULL),
316	},
317	[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = {
318		.subtype = IFM_50G_SR2,
319		.baudrate = IF_Gbps(50ULL),
320	},
321	[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = {
322		.subtype = IFM_50G_LR2,
323		.baudrate = IF_Gbps(50ULL),
324	},
325	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = {
326		.subtype = IFM_50G_LR,
327		.baudrate = IF_Gbps(50ULL),
328	},
329	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = {
330		.subtype = IFM_50G_SR,
331		.baudrate = IF_Gbps(50ULL),
332	},
333	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = {
334		.subtype = IFM_50G_CP,
335		.baudrate = IF_Gbps(50ULL),
336	},
337	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = {
338		.subtype = IFM_50G_FR,
339		.baudrate = IF_Gbps(50ULL),
340	},
341	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = {
342		.subtype = IFM_50G_KR_PAM4,
343		.baudrate = IF_Gbps(50ULL),
344	},
345	[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = {
346		.subtype = IFM_100G_CR4,
347		.baudrate = IF_Gbps(100ULL),
348	},
349	[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = {
350		.subtype = IFM_100G_KR4,
351		.baudrate = IF_Gbps(100ULL),
352	},
353	[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = {
354		.subtype = IFM_100G_LR4,
355		.baudrate = IF_Gbps(100ULL),
356	},
357	[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = {
358		.subtype = IFM_100G_SR4,
359		.baudrate = IF_Gbps(100ULL),
360	},
361	[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = {
362		.subtype = IFM_100G_SR2,
363		.baudrate = IF_Gbps(100ULL),
364	},
365	[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = {
366		.subtype = IFM_100G_CP2,
367		.baudrate = IF_Gbps(100ULL),
368	},
369	[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = {
370		.subtype = IFM_100G_KR2_PAM4,
371		.baudrate = IF_Gbps(100ULL),
372	},
373	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = {
374		.subtype = IFM_200G_DR4,
375		.baudrate = IF_Gbps(200ULL),
376	},
377	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = {
378		.subtype = IFM_200G_LR4,
379		.baudrate = IF_Gbps(200ULL),
380	},
381	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = {
382		.subtype = IFM_200G_SR4,
383		.baudrate = IF_Gbps(200ULL),
384	},
385	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = {
386		.subtype = IFM_200G_FR4,
387		.baudrate = IF_Gbps(200ULL),
388	},
389	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = {
390		.subtype = IFM_200G_CR4_PAM4,
391		.baudrate = IF_Gbps(200ULL),
392	},
393	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = {
394		.subtype = IFM_200G_KR4_PAM4,
395		.baudrate = IF_Gbps(200ULL),
396	},
397};
398
399MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
400
401static void
402mlx5e_update_carrier(struct mlx5e_priv *priv)
403{
404	struct mlx5_core_dev *mdev = priv->mdev;
405	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
406	u32 eth_proto_oper;
407	int error;
408	u8 port_state;
409	u8 is_er_type;
410	u8 i, j;
411	bool ext;
412	struct media media_entry = {};
413
414	port_state = mlx5_query_vport_state(mdev,
415	    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
416
417	if (port_state == VPORT_STATE_UP) {
418		priv->media_status_last |= IFM_ACTIVE;
419	} else {
420		priv->media_status_last &= ~IFM_ACTIVE;
421		priv->media_active_last = IFM_ETHER;
422		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
423		return;
424	}
425
426	error = mlx5_query_port_ptys(mdev, out, sizeof(out),
427	    MLX5_PTYS_EN, 1);
428	if (error) {
429		priv->media_active_last = IFM_ETHER;
430		priv->ifp->if_baudrate = 1;
431		mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n",
432		    error);
433		return;
434	}
435
436	ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
437	eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
438	    eth_proto_oper);
439
440	i = ilog2(eth_proto_oper);
441
442	for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) {
443		media_entry = ext ? mlx5e_ext_mode_table[i][j] :
444		    mlx5e_mode_table[i][j];
445		if (media_entry.baudrate != 0)
446			break;
447	}
448
449	if (media_entry.subtype == 0) {
450		mlx5_en_err(priv->ifp,
451		    "Could not find operational media subtype\n");
452		return;
453	}
454
455	switch (media_entry.subtype) {
456	case IFM_10G_ER:
457		error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
458		if (error != 0) {
459			mlx5_en_err(priv->ifp,
460			    "query port pddr failed: %d\n", error);
461		}
462		if (error != 0 || is_er_type == 0)
463			media_entry.subtype = IFM_10G_LR;
464		break;
465	case IFM_40G_LR4:
466		error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
467		if (error != 0) {
468			mlx5_en_err(priv->ifp,
469			    "query port pddr failed: %d\n", error);
470		}
471		if (error == 0 && is_er_type != 0)
472			media_entry.subtype = IFM_40G_ER4;
473		break;
474	}
475	priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX;
476	priv->ifp->if_baudrate = media_entry.baudrate;
477
478	if_link_state_change(priv->ifp, LINK_STATE_UP);
479}
480
481static void
482mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
483{
484	struct mlx5e_priv *priv = dev->if_softc;
485
486	ifmr->ifm_status = priv->media_status_last;
487	ifmr->ifm_active = priv->media_active_last |
488	    (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
489	    (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
490
491}
492
493static u32
494mlx5e_find_link_mode(u32 subtype, bool ext)
495{
496	u32 i;
497	u32 j;
498	u32 link_mode = 0;
499	u32 speeds_num = 0;
500	struct media media_entry = {};
501
502	switch (subtype) {
503	case IFM_10G_LR:
504		subtype = IFM_10G_ER;
505		break;
506	case IFM_40G_ER4:
507		subtype = IFM_40G_LR4;
508		break;
509	}
510
511	speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER :
512	    MLX5E_LINK_SPEEDS_NUMBER;
513
514	for (i = 0; i != speeds_num; i++) {
515		for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) {
516			media_entry = ext ? mlx5e_ext_mode_table[i][j] :
517			    mlx5e_mode_table[i][j];
518			if (media_entry.baudrate == 0)
519				continue;
520			if (media_entry.subtype == subtype) {
521				link_mode |= MLX5E_PROT_MASK(i);
522			}
523		}
524	}
525
526	return (link_mode);
527}
528
529static int
530mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
531{
532	return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
533	    priv->params.rx_pauseframe_control,
534	    priv->params.tx_pauseframe_control,
535	    priv->params.rx_priority_flow_control,
536	    priv->params.tx_priority_flow_control));
537}
538
539static int
540mlx5e_set_port_pfc(struct mlx5e_priv *priv)
541{
542	int error;
543
544	if (priv->gone != 0) {
545		error = -ENXIO;
546	} else if (priv->params.rx_pauseframe_control ||
547	    priv->params.tx_pauseframe_control) {
548		mlx5_en_err(priv->ifp,
549		    "Global pauseframes must be disabled before enabling PFC.\n");
550		error = -EINVAL;
551	} else {
552		error = mlx5e_set_port_pause_and_pfc(priv);
553	}
554	return (error);
555}
556
557static int
558mlx5e_media_change(struct ifnet *dev)
559{
560	struct mlx5e_priv *priv = dev->if_softc;
561	struct mlx5_core_dev *mdev = priv->mdev;
562	u32 eth_proto_cap;
563	u32 link_mode;
564	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
565	int was_opened;
566	int locked;
567	int error;
568	bool ext;
569
570	locked = PRIV_LOCKED(priv);
571	if (!locked)
572		PRIV_LOCK(priv);
573
574	if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
575		error = EINVAL;
576		goto done;
577	}
578
579	error = mlx5_query_port_ptys(mdev, out, sizeof(out),
580	    MLX5_PTYS_EN, 1);
581	if (error != 0) {
582		mlx5_en_err(dev, "Query port media capability failed\n");
583		goto done;
584	}
585
586	ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
587	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext);
588
589	/* query supported capabilities */
590	eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
591	    eth_proto_capability);
592
593	/* check for autoselect */
594	if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
595		link_mode = eth_proto_cap;
596		if (link_mode == 0) {
597			mlx5_en_err(dev, "Port media capability is zero\n");
598			error = EINVAL;
599			goto done;
600		}
601	} else {
602		link_mode = link_mode & eth_proto_cap;
603		if (link_mode == 0) {
604			mlx5_en_err(dev, "Not supported link mode requested\n");
605			error = EINVAL;
606			goto done;
607		}
608	}
609	if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
610		/* check if PFC is enabled */
611		if (priv->params.rx_priority_flow_control ||
612		    priv->params.tx_priority_flow_control) {
613			mlx5_en_err(dev, "PFC must be disabled before enabling global pauseframes.\n");
614			error = EINVAL;
615			goto done;
616		}
617	}
618	/* update pauseframe control bits */
619	priv->params.rx_pauseframe_control =
620	    (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
621	priv->params.tx_pauseframe_control =
622	    (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
623
624	/* check if device is opened */
625	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
626
627	/* reconfigure the hardware */
628	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
629	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext);
630	error = -mlx5e_set_port_pause_and_pfc(priv);
631	if (was_opened)
632		mlx5_set_port_status(mdev, MLX5_PORT_UP);
633
634done:
635	if (!locked)
636		PRIV_UNLOCK(priv);
637	return (error);
638}
639
640static void
641mlx5e_update_carrier_work(struct work_struct *work)
642{
643	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
644	    update_carrier_work);
645
646	PRIV_LOCK(priv);
647	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
648		mlx5e_update_carrier(priv);
649	PRIV_UNLOCK(priv);
650}
651
652#define	MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f)    \
653	s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c);
654
655#define	MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f)    \
656	s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c);
657
658static void
659mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
660{
661	struct mlx5_core_dev *mdev = priv->mdev;
662	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
663	const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
664	void *out;
665	void *in;
666	int err;
667
668	/* allocate firmware request structures */
669	in = mlx5_vzalloc(sz);
670	out = mlx5_vzalloc(sz);
671	if (in == NULL || out == NULL)
672		goto free_out;
673
674	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
675	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
676	if (err != 0)
677		goto free_out;
678
679	MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64)
680	MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
681
682	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
683	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
684	if (err != 0)
685		goto free_out;
686
687	MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
688
689	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP);
690	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
691	if (err != 0)
692		goto free_out;
693
694	MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
695
696free_out:
697	/* free firmware request structures */
698	kvfree(in);
699	kvfree(out);
700}
701
702/*
703 * This function reads the physical port counters from the firmware
704 * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
705 * macros. The output is converted from big-endian 64-bit values into
706 * host endian ones and stored in the "priv->stats.pport" structure.
707 */
708static void
709mlx5e_update_pport_counters(struct mlx5e_priv *priv)
710{
711	struct mlx5_core_dev *mdev = priv->mdev;
712	struct mlx5e_pport_stats *s = &priv->stats.pport;
713	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
714	u32 *in;
715	u32 *out;
716	const u64 *ptr;
717	unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
718	unsigned x;
719	unsigned y;
720	unsigned z;
721
722	/* allocate firmware request structures */
723	in = mlx5_vzalloc(sz);
724	out = mlx5_vzalloc(sz);
725	if (in == NULL || out == NULL)
726		goto free_out;
727
728	/*
729	 * Get pointer to the 64-bit counter set which is located at a
730	 * fixed offset in the output firmware request structure:
731	 */
732	ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
733
734	MLX5_SET(ppcnt_reg, in, local_port, 1);
735
736	/* read IEEE802_3 counter group using predefined counter layout */
737	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
738	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
739	for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
740	     x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
741		s->arg[y] = be64toh(ptr[x]);
742
743	/* read RFC2819 counter group using predefined counter layout */
744	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
745	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
746	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
747		s->arg[y] = be64toh(ptr[x]);
748
749	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
750	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
751		s_debug->arg[y] = be64toh(ptr[x]);
752
753	/* read RFC2863 counter group using predefined counter layout */
754	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
755	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
756	for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
757		s_debug->arg[y] = be64toh(ptr[x]);
758
759	/* read physical layer stats counter group using predefined counter layout */
760	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
761	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
762	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
763		s_debug->arg[y] = be64toh(ptr[x]);
764
765	/* read Extended Ethernet counter group using predefined counter layout */
766	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
767	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
768	for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++)
769		s_debug->arg[y] = be64toh(ptr[x]);
770
771	/* read Extended Statistical Group */
772	if (MLX5_CAP_GEN(mdev, pcam_reg) &&
773	    MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) &&
774	    MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) {
775		/* read Extended Statistical counter group using predefined counter layout */
776		MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
777		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
778
779		for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++)
780			s_debug->arg[y] = be64toh(ptr[x]);
781	}
782
783	/* read PCIE counters */
784	mlx5e_update_pcie_counters(priv);
785
786	/* read per-priority counters */
787	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
788
789	/* iterate all the priorities */
790	for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
791		MLX5_SET(ppcnt_reg, in, prio_tc, z);
792		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
793
794		/* read per priority stats counter group using predefined counter layout */
795		for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
796		    MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
797			s->arg[y] = be64toh(ptr[x]);
798	}
799
800free_out:
801	/* free firmware request structures */
802	kvfree(in);
803	kvfree(out);
804}
805
806static void
807mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
808{
809	u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
810	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
811
812	if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
813		return;
814
815	MLX5_SET(query_vnic_env_in, in, opcode,
816	    MLX5_CMD_OP_QUERY_VNIC_ENV);
817	MLX5_SET(query_vnic_env_in, in, op_mod, 0);
818	MLX5_SET(query_vnic_env_in, in, other_vport, 0);
819
820	if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0)
821		return;
822
823	priv->stats.vport.rx_steer_missed_packets =
824	    MLX5_GET64(query_vnic_env_out, out,
825	    vport_env.nic_receive_steering_discard);
826}
827
828/*
829 * This function is called regularly to collect all statistics
830 * counters from the firmware. The values can be viewed through the
831 * sysctl interface. Execution is serialized using the priv's global
832 * configuration lock.
833 */
834static void
835mlx5e_update_stats_locked(struct mlx5e_priv *priv)
836{
837	struct mlx5_core_dev *mdev = priv->mdev;
838	struct mlx5e_vport_stats *s = &priv->stats.vport;
839	struct mlx5e_sq_stats *sq_stats;
840	struct buf_ring *sq_br;
841#if (__FreeBSD_version < 1100000)
842	struct ifnet *ifp = priv->ifp;
843#endif
844
845	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
846	u32 *out;
847	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
848	u64 tso_packets = 0;
849	u64 tso_bytes = 0;
850	u64 tx_queue_dropped = 0;
851	u64 tx_defragged = 0;
852	u64 tx_offload_none = 0;
853	u64 lro_packets = 0;
854	u64 lro_bytes = 0;
855	u64 sw_lro_queued = 0;
856	u64 sw_lro_flushed = 0;
857	u64 rx_csum_none = 0;
858	u64 rx_wqe_err = 0;
859	u64 rx_packets = 0;
860	u64 rx_bytes = 0;
861	u32 rx_out_of_buffer = 0;
862	int i;
863	int j;
864
865	out = mlx5_vzalloc(outlen);
866	if (out == NULL)
867		goto free_out;
868
869	/* Collect firts the SW counters and then HW for consistency */
870	for (i = 0; i < priv->params.num_channels; i++) {
871		struct mlx5e_channel *pch = priv->channel + i;
872		struct mlx5e_rq *rq = &pch->rq;
873		struct mlx5e_rq_stats *rq_stats = &pch->rq.stats;
874
875		/* collect stats from LRO */
876		rq_stats->sw_lro_queued = rq->lro.lro_queued;
877		rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
878		sw_lro_queued += rq_stats->sw_lro_queued;
879		sw_lro_flushed += rq_stats->sw_lro_flushed;
880		lro_packets += rq_stats->lro_packets;
881		lro_bytes += rq_stats->lro_bytes;
882		rx_csum_none += rq_stats->csum_none;
883		rx_wqe_err += rq_stats->wqe_err;
884		rx_packets += rq_stats->packets;
885		rx_bytes += rq_stats->bytes;
886
887		for (j = 0; j < priv->num_tc; j++) {
888			sq_stats = &pch->sq[j].stats;
889			sq_br = pch->sq[j].br;
890
891			tso_packets += sq_stats->tso_packets;
892			tso_bytes += sq_stats->tso_bytes;
893			tx_queue_dropped += sq_stats->dropped;
894			if (sq_br != NULL)
895				tx_queue_dropped += sq_br->br_drops;
896			tx_defragged += sq_stats->defragged;
897			tx_offload_none += sq_stats->csum_offload_none;
898		}
899	}
900
901	/* update counters */
902	s->tso_packets = tso_packets;
903	s->tso_bytes = tso_bytes;
904	s->tx_queue_dropped = tx_queue_dropped;
905	s->tx_defragged = tx_defragged;
906	s->lro_packets = lro_packets;
907	s->lro_bytes = lro_bytes;
908	s->sw_lro_queued = sw_lro_queued;
909	s->sw_lro_flushed = sw_lro_flushed;
910	s->rx_csum_none = rx_csum_none;
911	s->rx_wqe_err = rx_wqe_err;
912	s->rx_packets = rx_packets;
913	s->rx_bytes = rx_bytes;
914
915	mlx5e_grp_vnic_env_update_stats(priv);
916
917	/* HW counters */
918	memset(in, 0, sizeof(in));
919
920	MLX5_SET(query_vport_counter_in, in, opcode,
921	    MLX5_CMD_OP_QUERY_VPORT_COUNTER);
922	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
923	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
924
925	memset(out, 0, outlen);
926
927	/* get number of out-of-buffer drops first */
928	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 &&
929	    mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
930	    &rx_out_of_buffer) == 0) {
931		s->rx_out_of_buffer = rx_out_of_buffer;
932	}
933
934	/* get port statistics */
935	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) {
936#define	MLX5_GET_CTR(out, x) \
937	MLX5_GET64(query_vport_counter_out, out, x)
938
939		s->rx_error_packets =
940		    MLX5_GET_CTR(out, received_errors.packets);
941		s->rx_error_bytes =
942		    MLX5_GET_CTR(out, received_errors.octets);
943		s->tx_error_packets =
944		    MLX5_GET_CTR(out, transmit_errors.packets);
945		s->tx_error_bytes =
946		    MLX5_GET_CTR(out, transmit_errors.octets);
947
948		s->rx_unicast_packets =
949		    MLX5_GET_CTR(out, received_eth_unicast.packets);
950		s->rx_unicast_bytes =
951		    MLX5_GET_CTR(out, received_eth_unicast.octets);
952		s->tx_unicast_packets =
953		    MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
954		s->tx_unicast_bytes =
955		    MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
956
957		s->rx_multicast_packets =
958		    MLX5_GET_CTR(out, received_eth_multicast.packets);
959		s->rx_multicast_bytes =
960		    MLX5_GET_CTR(out, received_eth_multicast.octets);
961		s->tx_multicast_packets =
962		    MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
963		s->tx_multicast_bytes =
964		    MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
965
966		s->rx_broadcast_packets =
967		    MLX5_GET_CTR(out, received_eth_broadcast.packets);
968		s->rx_broadcast_bytes =
969		    MLX5_GET_CTR(out, received_eth_broadcast.octets);
970		s->tx_broadcast_packets =
971		    MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
972		s->tx_broadcast_bytes =
973		    MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
974
975		s->tx_packets = s->tx_unicast_packets +
976		    s->tx_multicast_packets + s->tx_broadcast_packets;
977		s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes +
978		    s->tx_broadcast_bytes;
979
980		/* Update calculated offload counters */
981		s->tx_csum_offload = s->tx_packets - tx_offload_none;
982		s->rx_csum_good = s->rx_packets - s->rx_csum_none;
983	}
984
985	/* Get physical port counters */
986	mlx5e_update_pport_counters(priv);
987
988	s->tx_jumbo_packets =
989	    priv->stats.port_stats_debug.tx_stat_p1519to2047octets +
990	    priv->stats.port_stats_debug.tx_stat_p2048to4095octets +
991	    priv->stats.port_stats_debug.tx_stat_p4096to8191octets +
992	    priv->stats.port_stats_debug.tx_stat_p8192to10239octets;
993
994#if (__FreeBSD_version < 1100000)
995	/* no get_counters interface in fbsd 10 */
996	ifp->if_ipackets = s->rx_packets;
997	ifp->if_ierrors = priv->stats.pport.in_range_len_errors +
998	    priv->stats.pport.out_of_range_len +
999	    priv->stats.pport.too_long_errors +
1000	    priv->stats.pport.check_seq_err +
1001	    priv->stats.pport.alignment_err;
1002	ifp->if_iqdrops = s->rx_out_of_buffer;
1003	ifp->if_opackets = s->tx_packets;
1004	ifp->if_oerrors = priv->stats.port_stats_debug.out_discards;
1005	ifp->if_snd.ifq_drops = s->tx_queue_dropped;
1006	ifp->if_ibytes = s->rx_bytes;
1007	ifp->if_obytes = s->tx_bytes;
1008	ifp->if_collisions =
1009	    priv->stats.pport.collisions;
1010#endif
1011
1012free_out:
1013	kvfree(out);
1014
1015	/* Update diagnostics, if any */
1016	if (priv->params_ethtool.diag_pci_enable ||
1017	    priv->params_ethtool.diag_general_enable) {
1018		int error = mlx5_core_get_diagnostics_full(mdev,
1019		    priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
1020		    priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
1021		if (error != 0)
1022			mlx5_en_err(priv->ifp,
1023			    "Failed reading diagnostics: %d\n", error);
1024	}
1025}
1026
1027static void
1028mlx5e_update_stats_work(struct work_struct *work)
1029{
1030	struct mlx5e_priv *priv;
1031
1032	priv  = container_of(work, struct mlx5e_priv, update_stats_work);
1033	PRIV_LOCK(priv);
1034	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
1035		mlx5e_update_stats_locked(priv);
1036	PRIV_UNLOCK(priv);
1037}
1038
1039static void
1040mlx5e_update_stats(void *arg)
1041{
1042	struct mlx5e_priv *priv = arg;
1043
1044	queue_work(priv->wq, &priv->update_stats_work);
1045
1046	callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
1047}
1048
1049static void
1050mlx5e_async_event_sub(struct mlx5e_priv *priv,
1051    enum mlx5_dev_event event)
1052{
1053	switch (event) {
1054	case MLX5_DEV_EVENT_PORT_UP:
1055	case MLX5_DEV_EVENT_PORT_DOWN:
1056		queue_work(priv->wq, &priv->update_carrier_work);
1057		break;
1058
1059	default:
1060		break;
1061	}
1062}
1063
1064static void
1065mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
1066    enum mlx5_dev_event event, unsigned long param)
1067{
1068	struct mlx5e_priv *priv = vpriv;
1069
1070	mtx_lock(&priv->async_events_mtx);
1071	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
1072		mlx5e_async_event_sub(priv, event);
1073	mtx_unlock(&priv->async_events_mtx);
1074}
1075
1076static void
1077mlx5e_enable_async_events(struct mlx5e_priv *priv)
1078{
1079	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
1080}
1081
1082static void
1083mlx5e_disable_async_events(struct mlx5e_priv *priv)
1084{
1085	mtx_lock(&priv->async_events_mtx);
1086	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
1087	mtx_unlock(&priv->async_events_mtx);
1088}
1089
1090static const char *mlx5e_rq_stats_desc[] = {
1091	MLX5E_RQ_STATS(MLX5E_STATS_DESC)
1092};
1093
1094static int
1095mlx5e_create_rq(struct mlx5e_channel *c,
1096    struct mlx5e_rq_param *param,
1097    struct mlx5e_rq *rq)
1098{
1099	struct mlx5e_priv *priv = c->priv;
1100	struct mlx5_core_dev *mdev = priv->mdev;
1101	char buffer[16];
1102	void *rqc = param->rqc;
1103	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1104	int wq_sz;
1105	int err;
1106	int i;
1107	u32 nsegs, wqe_sz;
1108
1109	err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
1110	if (err != 0)
1111		goto done;
1112
1113	/* Create DMA descriptor TAG */
1114	if ((err = -bus_dma_tag_create(
1115	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1116	    1,				/* any alignment */
1117	    0,				/* no boundary */
1118	    BUS_SPACE_MAXADDR,		/* lowaddr */
1119	    BUS_SPACE_MAXADDR,		/* highaddr */
1120	    NULL, NULL,			/* filter, filterarg */
1121	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsize */
1122	    nsegs,			/* nsegments */
1123	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsegsize */
1124	    0,				/* flags */
1125	    NULL, NULL,			/* lockfunc, lockfuncarg */
1126	    &rq->dma_tag)))
1127		goto done;
1128
1129	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1130	    &rq->wq_ctrl);
1131	if (err)
1132		goto err_free_dma_tag;
1133
1134	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
1135
1136	err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
1137	if (err != 0)
1138		goto err_rq_wq_destroy;
1139
1140	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
1141
1142	err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz);
1143	if (err)
1144		goto err_rq_wq_destroy;
1145
1146	rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1147	for (i = 0; i != wq_sz; i++) {
1148		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
1149		int j;
1150
1151		err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
1152		if (err != 0) {
1153			while (i--)
1154				bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
1155			goto err_rq_mbuf_free;
1156		}
1157
1158		/* set value for constant fields */
1159		for (j = 0; j < rq->nsegs; j++)
1160			wqe->data[j].lkey = c->mkey_be;
1161	}
1162
1163	INIT_WORK(&rq->dim.work, mlx5e_dim_work);
1164	if (priv->params.rx_cq_moderation_mode < 2) {
1165		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
1166	} else {
1167		void *cqc = container_of(param,
1168		    struct mlx5e_channel_param, rq)->rx_cq.cqc;
1169
1170		switch (MLX5_GET(cqc, cqc, cq_period_mode)) {
1171		case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
1172			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1173			break;
1174		case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
1175			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
1176			break;
1177		default:
1178			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
1179			break;
1180		}
1181	}
1182
1183	rq->ifp = c->ifp;
1184	rq->channel = c;
1185	rq->ix = c->ix;
1186
1187	snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
1188	mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1189	    buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
1190	    rq->stats.arg);
1191	return (0);
1192
1193err_rq_mbuf_free:
1194	free(rq->mbuf, M_MLX5EN);
1195	tcp_lro_free(&rq->lro);
1196err_rq_wq_destroy:
1197	mlx5_wq_destroy(&rq->wq_ctrl);
1198err_free_dma_tag:
1199	bus_dma_tag_destroy(rq->dma_tag);
1200done:
1201	return (err);
1202}
1203
1204static void
1205mlx5e_destroy_rq(struct mlx5e_rq *rq)
1206{
1207	int wq_sz;
1208	int i;
1209
1210	/* destroy all sysctl nodes */
1211	sysctl_ctx_free(&rq->stats.ctx);
1212
1213	/* free leftover LRO packets, if any */
1214	tcp_lro_free(&rq->lro);
1215
1216	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
1217	for (i = 0; i != wq_sz; i++) {
1218		if (rq->mbuf[i].mbuf != NULL) {
1219			bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
1220			m_freem(rq->mbuf[i].mbuf);
1221		}
1222		bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
1223	}
1224	free(rq->mbuf, M_MLX5EN);
1225	mlx5_wq_destroy(&rq->wq_ctrl);
1226	bus_dma_tag_destroy(rq->dma_tag);
1227}
1228
1229static int
1230mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
1231{
1232	struct mlx5e_channel *c = rq->channel;
1233	struct mlx5e_priv *priv = c->priv;
1234	struct mlx5_core_dev *mdev = priv->mdev;
1235
1236	void *in;
1237	void *rqc;
1238	void *wq;
1239	int inlen;
1240	int err;
1241
1242	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
1243	    sizeof(u64) * rq->wq_ctrl.buf.npages;
1244	in = mlx5_vzalloc(inlen);
1245	if (in == NULL)
1246		return (-ENOMEM);
1247
1248	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
1249	wq = MLX5_ADDR_OF(rqc, rqc, wq);
1250
1251	memcpy(rqc, param->rqc, sizeof(param->rqc));
1252
1253	MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
1254	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
1255	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
1256	if (priv->counter_set_id >= 0)
1257		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
1258	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
1259	    PAGE_SHIFT);
1260	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
1261
1262	mlx5_fill_page_array(&rq->wq_ctrl.buf,
1263	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1264
1265	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
1266
1267	kvfree(in);
1268
1269	return (err);
1270}
1271
1272static int
1273mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
1274{
1275	struct mlx5e_channel *c = rq->channel;
1276	struct mlx5e_priv *priv = c->priv;
1277	struct mlx5_core_dev *mdev = priv->mdev;
1278
1279	void *in;
1280	void *rqc;
1281	int inlen;
1282	int err;
1283
1284	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1285	in = mlx5_vzalloc(inlen);
1286	if (in == NULL)
1287		return (-ENOMEM);
1288
1289	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1290
1291	MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
1292	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
1293	MLX5_SET(rqc, rqc, state, next_state);
1294
1295	err = mlx5_core_modify_rq(mdev, in, inlen);
1296
1297	kvfree(in);
1298
1299	return (err);
1300}
1301
1302static void
1303mlx5e_disable_rq(struct mlx5e_rq *rq)
1304{
1305	struct mlx5e_channel *c = rq->channel;
1306	struct mlx5e_priv *priv = c->priv;
1307	struct mlx5_core_dev *mdev = priv->mdev;
1308
1309	mlx5_core_destroy_rq(mdev, rq->rqn);
1310}
1311
1312static int
1313mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
1314{
1315	struct mlx5e_channel *c = rq->channel;
1316	struct mlx5e_priv *priv = c->priv;
1317	struct mlx5_wq_ll *wq = &rq->wq;
1318	int i;
1319
1320	for (i = 0; i < 1000; i++) {
1321		if (wq->cur_sz >= priv->params.min_rx_wqes)
1322			return (0);
1323
1324		msleep(4);
1325	}
1326	return (-ETIMEDOUT);
1327}
1328
1329static int
1330mlx5e_open_rq(struct mlx5e_channel *c,
1331    struct mlx5e_rq_param *param,
1332    struct mlx5e_rq *rq)
1333{
1334	int err;
1335
1336	err = mlx5e_create_rq(c, param, rq);
1337	if (err)
1338		return (err);
1339
1340	err = mlx5e_enable_rq(rq, param);
1341	if (err)
1342		goto err_destroy_rq;
1343
1344	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1345	if (err)
1346		goto err_disable_rq;
1347
1348	c->rq.enabled = 1;
1349
1350	return (0);
1351
1352err_disable_rq:
1353	mlx5e_disable_rq(rq);
1354err_destroy_rq:
1355	mlx5e_destroy_rq(rq);
1356
1357	return (err);
1358}
1359
1360static void
1361mlx5e_close_rq(struct mlx5e_rq *rq)
1362{
1363	mtx_lock(&rq->mtx);
1364	rq->enabled = 0;
1365	callout_stop(&rq->watchdog);
1366	mtx_unlock(&rq->mtx);
1367
1368	callout_drain(&rq->watchdog);
1369
1370	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
1371}
1372
1373static void
1374mlx5e_close_rq_wait(struct mlx5e_rq *rq)
1375{
1376
1377	mlx5e_disable_rq(rq);
1378	mlx5e_close_cq(&rq->cq);
1379	cancel_work_sync(&rq->dim.work);
1380	mlx5e_destroy_rq(rq);
1381}
1382
1383void
1384mlx5e_free_sq_db(struct mlx5e_sq *sq)
1385{
1386	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1387	int x;
1388
1389	for (x = 0; x != wq_sz; x++) {
1390		if (sq->mbuf[x].mbuf != NULL) {
1391			bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map);
1392			m_freem(sq->mbuf[x].mbuf);
1393		}
1394		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1395	}
1396	free(sq->mbuf, M_MLX5EN);
1397}
1398
1399int
1400mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
1401{
1402	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1403	int err;
1404	int x;
1405
1406	sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1407
1408	/* Create DMA descriptor MAPs */
1409	for (x = 0; x != wq_sz; x++) {
1410		err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
1411		if (err != 0) {
1412			while (x--)
1413				bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1414			free(sq->mbuf, M_MLX5EN);
1415			return (err);
1416		}
1417	}
1418	return (0);
1419}
1420
1421static const char *mlx5e_sq_stats_desc[] = {
1422	MLX5E_SQ_STATS(MLX5E_STATS_DESC)
1423};
1424
1425void
1426mlx5e_update_sq_inline(struct mlx5e_sq *sq)
1427{
1428	sq->max_inline = sq->priv->params.tx_max_inline;
1429	sq->min_inline_mode = sq->priv->params.tx_min_inline_mode;
1430
1431	/*
1432	 * Check if trust state is DSCP or if inline mode is NONE which
1433	 * indicates CX-5 or newer hardware.
1434	 */
1435	if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP ||
1436	    sq->min_inline_mode == MLX5_INLINE_MODE_NONE) {
1437		if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert))
1438			sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN;
1439		else
1440			sq->min_insert_caps = MLX5E_INSERT_NON_VLAN;
1441	} else {
1442		sq->min_insert_caps = 0;
1443	}
1444}
1445
1446static void
1447mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1448{
1449	int i;
1450
1451	for (i = 0; i != c->num_tc; i++) {
1452		mtx_lock(&c->sq[i].lock);
1453		mlx5e_update_sq_inline(&c->sq[i]);
1454		mtx_unlock(&c->sq[i].lock);
1455	}
1456}
1457
1458void
1459mlx5e_refresh_sq_inline(struct mlx5e_priv *priv)
1460{
1461	int i;
1462
1463	/* check if channels are closed */
1464	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
1465		return;
1466
1467	for (i = 0; i < priv->params.num_channels; i++)
1468		mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]);
1469}
1470
1471static int
1472mlx5e_create_sq(struct mlx5e_channel *c,
1473    int tc,
1474    struct mlx5e_sq_param *param,
1475    struct mlx5e_sq *sq)
1476{
1477	struct mlx5e_priv *priv = c->priv;
1478	struct mlx5_core_dev *mdev = priv->mdev;
1479	char buffer[16];
1480	void *sqc = param->sqc;
1481	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
1482	int err;
1483
1484	/* Create DMA descriptor TAG */
1485	if ((err = -bus_dma_tag_create(
1486	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1487	    1,				/* any alignment */
1488	    0,				/* no boundary */
1489	    BUS_SPACE_MAXADDR,		/* lowaddr */
1490	    BUS_SPACE_MAXADDR,		/* highaddr */
1491	    NULL, NULL,			/* filter, filterarg */
1492	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
1493	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
1494	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
1495	    0,				/* flags */
1496	    NULL, NULL,			/* lockfunc, lockfuncarg */
1497	    &sq->dma_tag)))
1498		goto done;
1499
1500	err = mlx5_alloc_map_uar(mdev, &sq->uar);
1501	if (err)
1502		goto err_free_dma_tag;
1503
1504	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1505	    &sq->wq_ctrl);
1506	if (err)
1507		goto err_unmap_free_uar;
1508
1509	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1510	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1511
1512	err = mlx5e_alloc_sq_db(sq);
1513	if (err)
1514		goto err_sq_wq_destroy;
1515
1516	sq->mkey_be = c->mkey_be;
1517	sq->ifp = priv->ifp;
1518	sq->priv = priv;
1519	sq->tc = tc;
1520
1521	mlx5e_update_sq_inline(sq);
1522
1523	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1524	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1525	    buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1526	    sq->stats.arg);
1527
1528	return (0);
1529
1530err_sq_wq_destroy:
1531	mlx5_wq_destroy(&sq->wq_ctrl);
1532
1533err_unmap_free_uar:
1534	mlx5_unmap_free_uar(mdev, &sq->uar);
1535
1536err_free_dma_tag:
1537	bus_dma_tag_destroy(sq->dma_tag);
1538done:
1539	return (err);
1540}
1541
1542static void
1543mlx5e_destroy_sq(struct mlx5e_sq *sq)
1544{
1545	/* destroy all sysctl nodes */
1546	sysctl_ctx_free(&sq->stats.ctx);
1547
1548	mlx5e_free_sq_db(sq);
1549	mlx5_wq_destroy(&sq->wq_ctrl);
1550	mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1551	bus_dma_tag_destroy(sq->dma_tag);
1552}
1553
1554int
1555mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1556    int tis_num)
1557{
1558	void *in;
1559	void *sqc;
1560	void *wq;
1561	int inlen;
1562	int err;
1563
1564	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1565	    sizeof(u64) * sq->wq_ctrl.buf.npages;
1566	in = mlx5_vzalloc(inlen);
1567	if (in == NULL)
1568		return (-ENOMEM);
1569
1570	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1571	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1572
1573	memcpy(sqc, param->sqc, sizeof(param->sqc));
1574
1575	MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1576	MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1577	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1578	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1579	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1580
1581	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1582	MLX5_SET(wq, wq, uar_page, sq->uar.index);
1583	MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1584	    PAGE_SHIFT);
1585	MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1586
1587	mlx5_fill_page_array(&sq->wq_ctrl.buf,
1588	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1589
1590	err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1591
1592	kvfree(in);
1593
1594	return (err);
1595}
1596
1597int
1598mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1599{
1600	void *in;
1601	void *sqc;
1602	int inlen;
1603	int err;
1604
1605	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1606	in = mlx5_vzalloc(inlen);
1607	if (in == NULL)
1608		return (-ENOMEM);
1609
1610	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1611
1612	MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1613	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1614	MLX5_SET(sqc, sqc, state, next_state);
1615
1616	err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1617
1618	kvfree(in);
1619
1620	return (err);
1621}
1622
1623void
1624mlx5e_disable_sq(struct mlx5e_sq *sq)
1625{
1626
1627	mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1628}
1629
1630static int
1631mlx5e_open_sq(struct mlx5e_channel *c,
1632    int tc,
1633    struct mlx5e_sq_param *param,
1634    struct mlx5e_sq *sq)
1635{
1636	int err;
1637
1638	err = mlx5e_create_sq(c, tc, param, sq);
1639	if (err)
1640		return (err);
1641
1642	err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1643	if (err)
1644		goto err_destroy_sq;
1645
1646	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1647	if (err)
1648		goto err_disable_sq;
1649
1650	WRITE_ONCE(sq->running, 1);
1651
1652	return (0);
1653
1654err_disable_sq:
1655	mlx5e_disable_sq(sq);
1656err_destroy_sq:
1657	mlx5e_destroy_sq(sq);
1658
1659	return (err);
1660}
1661
1662static void
1663mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1664{
1665	/* fill up remainder with NOPs */
1666	while (sq->cev_counter != 0) {
1667		while (!mlx5e_sq_has_room_for(sq, 1)) {
1668			if (can_sleep != 0) {
1669				mtx_unlock(&sq->lock);
1670				msleep(4);
1671				mtx_lock(&sq->lock);
1672			} else {
1673				goto done;
1674			}
1675		}
1676		/* send a single NOP */
1677		mlx5e_send_nop(sq, 1);
1678		atomic_thread_fence_rel();
1679	}
1680done:
1681	/* Check if we need to write the doorbell */
1682	if (likely(sq->doorbell.d64 != 0)) {
1683		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1684		sq->doorbell.d64 = 0;
1685	}
1686}
1687
1688void
1689mlx5e_sq_cev_timeout(void *arg)
1690{
1691	struct mlx5e_sq *sq = arg;
1692
1693	mtx_assert(&sq->lock, MA_OWNED);
1694
1695	/* check next state */
1696	switch (sq->cev_next_state) {
1697	case MLX5E_CEV_STATE_SEND_NOPS:
1698		/* fill TX ring with NOPs, if any */
1699		mlx5e_sq_send_nops_locked(sq, 0);
1700
1701		/* check if completed */
1702		if (sq->cev_counter == 0) {
1703			sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1704			return;
1705		}
1706		break;
1707	default:
1708		/* send NOPs on next timeout */
1709		sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1710		break;
1711	}
1712
1713	/* restart timer */
1714	callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1715}
1716
1717void
1718mlx5e_drain_sq(struct mlx5e_sq *sq)
1719{
1720	int error;
1721	struct mlx5_core_dev *mdev= sq->priv->mdev;
1722
1723	/*
1724	 * Check if already stopped.
1725	 *
1726	 * NOTE: Serialization of this function is managed by the
1727	 * caller ensuring the priv's state lock is locked or in case
1728	 * of rate limit support, a single thread manages drain and
1729	 * resume of SQs. The "running" variable can therefore safely
1730	 * be read without any locks.
1731	 */
1732	if (READ_ONCE(sq->running) == 0)
1733		return;
1734
1735	/* don't put more packets into the SQ */
1736	WRITE_ONCE(sq->running, 0);
1737
1738	/* serialize access to DMA rings */
1739	mtx_lock(&sq->lock);
1740
1741	/* teardown event factor timer, if any */
1742	sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1743	callout_stop(&sq->cev_callout);
1744
1745	/* send dummy NOPs in order to flush the transmit ring */
1746	mlx5e_sq_send_nops_locked(sq, 1);
1747	mtx_unlock(&sq->lock);
1748
1749	/* make sure it is safe to free the callout */
1750	callout_drain(&sq->cev_callout);
1751
1752	/* wait till SQ is empty or link is down */
1753	mtx_lock(&sq->lock);
1754	while (sq->cc != sq->pc &&
1755	    (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1756	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1757		mtx_unlock(&sq->lock);
1758		msleep(1);
1759		sq->cq.mcq.comp(&sq->cq.mcq);
1760		mtx_lock(&sq->lock);
1761	}
1762	mtx_unlock(&sq->lock);
1763
1764	/* error out remaining requests */
1765	error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1766	if (error != 0) {
1767		mlx5_en_err(sq->ifp,
1768		    "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1769	}
1770
1771	/* wait till SQ is empty */
1772	mtx_lock(&sq->lock);
1773	while (sq->cc != sq->pc &&
1774	       mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1775		mtx_unlock(&sq->lock);
1776		msleep(1);
1777		sq->cq.mcq.comp(&sq->cq.mcq);
1778		mtx_lock(&sq->lock);
1779	}
1780	mtx_unlock(&sq->lock);
1781}
1782
1783static void
1784mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1785{
1786
1787	mlx5e_drain_sq(sq);
1788	mlx5e_disable_sq(sq);
1789	mlx5e_destroy_sq(sq);
1790}
1791
1792static int
1793mlx5e_create_cq(struct mlx5e_priv *priv,
1794    struct mlx5e_cq_param *param,
1795    struct mlx5e_cq *cq,
1796    mlx5e_cq_comp_t *comp,
1797    int eq_ix)
1798{
1799	struct mlx5_core_dev *mdev = priv->mdev;
1800	struct mlx5_core_cq *mcq = &cq->mcq;
1801	int eqn_not_used;
1802	int irqn;
1803	int err;
1804	u32 i;
1805
1806	param->wq.buf_numa_node = 0;
1807	param->wq.db_numa_node = 0;
1808
1809	err = mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1810	if (err)
1811		return (err);
1812
1813	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1814	    &cq->wq_ctrl);
1815	if (err)
1816		return (err);
1817
1818	mcq->cqe_sz = 64;
1819	mcq->set_ci_db = cq->wq_ctrl.db.db;
1820	mcq->arm_db = cq->wq_ctrl.db.db + 1;
1821	*mcq->set_ci_db = 0;
1822	*mcq->arm_db = 0;
1823	mcq->vector = eq_ix;
1824	mcq->comp = comp;
1825	mcq->event = mlx5e_cq_error_event;
1826	mcq->irqn = irqn;
1827	mcq->uar = &priv->cq_uar;
1828
1829	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1830		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1831
1832		cqe->op_own = 0xf1;
1833	}
1834
1835	cq->priv = priv;
1836
1837	return (0);
1838}
1839
1840static void
1841mlx5e_destroy_cq(struct mlx5e_cq *cq)
1842{
1843	mlx5_wq_destroy(&cq->wq_ctrl);
1844}
1845
1846static int
1847mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1848{
1849	struct mlx5_core_cq *mcq = &cq->mcq;
1850	void *in;
1851	void *cqc;
1852	int inlen;
1853	int irqn_not_used;
1854	int eqn;
1855	int err;
1856
1857	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1858	    sizeof(u64) * cq->wq_ctrl.buf.npages;
1859	in = mlx5_vzalloc(inlen);
1860	if (in == NULL)
1861		return (-ENOMEM);
1862
1863	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1864
1865	memcpy(cqc, param->cqc, sizeof(param->cqc));
1866
1867	mlx5_fill_page_array(&cq->wq_ctrl.buf,
1868	    (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1869
1870	mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1871
1872	MLX5_SET(cqc, cqc, c_eqn, eqn);
1873	MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1874	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1875	    PAGE_SHIFT);
1876	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1877
1878	err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
1879
1880	kvfree(in);
1881
1882	if (err)
1883		return (err);
1884
1885	mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
1886
1887	return (0);
1888}
1889
1890static void
1891mlx5e_disable_cq(struct mlx5e_cq *cq)
1892{
1893
1894	mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
1895}
1896
1897int
1898mlx5e_open_cq(struct mlx5e_priv *priv,
1899    struct mlx5e_cq_param *param,
1900    struct mlx5e_cq *cq,
1901    mlx5e_cq_comp_t *comp,
1902    int eq_ix)
1903{
1904	int err;
1905
1906	err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
1907	if (err)
1908		return (err);
1909
1910	err = mlx5e_enable_cq(cq, param, eq_ix);
1911	if (err)
1912		goto err_destroy_cq;
1913
1914	return (0);
1915
1916err_destroy_cq:
1917	mlx5e_destroy_cq(cq);
1918
1919	return (err);
1920}
1921
1922void
1923mlx5e_close_cq(struct mlx5e_cq *cq)
1924{
1925	mlx5e_disable_cq(cq);
1926	mlx5e_destroy_cq(cq);
1927}
1928
1929static int
1930mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1931    struct mlx5e_channel_param *cparam)
1932{
1933	int err;
1934	int tc;
1935
1936	for (tc = 0; tc < c->num_tc; tc++) {
1937		/* open completion queue */
1938		err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
1939		    &mlx5e_tx_cq_comp, c->ix);
1940		if (err)
1941			goto err_close_tx_cqs;
1942	}
1943	return (0);
1944
1945err_close_tx_cqs:
1946	for (tc--; tc >= 0; tc--)
1947		mlx5e_close_cq(&c->sq[tc].cq);
1948
1949	return (err);
1950}
1951
1952static void
1953mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1954{
1955	int tc;
1956
1957	for (tc = 0; tc < c->num_tc; tc++)
1958		mlx5e_close_cq(&c->sq[tc].cq);
1959}
1960
1961static int
1962mlx5e_open_sqs(struct mlx5e_channel *c,
1963    struct mlx5e_channel_param *cparam)
1964{
1965	int err;
1966	int tc;
1967
1968	for (tc = 0; tc < c->num_tc; tc++) {
1969		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1970		if (err)
1971			goto err_close_sqs;
1972	}
1973
1974	return (0);
1975
1976err_close_sqs:
1977	for (tc--; tc >= 0; tc--)
1978		mlx5e_close_sq_wait(&c->sq[tc]);
1979
1980	return (err);
1981}
1982
1983static void
1984mlx5e_close_sqs_wait(struct mlx5e_channel *c)
1985{
1986	int tc;
1987
1988	for (tc = 0; tc < c->num_tc; tc++)
1989		mlx5e_close_sq_wait(&c->sq[tc]);
1990}
1991
1992static void
1993mlx5e_chan_mtx_init(struct mlx5e_channel *c)
1994{
1995	int tc;
1996
1997	mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1998
1999	callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
2000
2001	for (tc = 0; tc < c->num_tc; tc++) {
2002		struct mlx5e_sq *sq = c->sq + tc;
2003
2004		mtx_init(&sq->lock, "mlx5tx",
2005		    MTX_NETWORK_LOCK " TX", MTX_DEF);
2006		mtx_init(&sq->comp_lock, "mlx5comp",
2007		    MTX_NETWORK_LOCK " TX", MTX_DEF);
2008
2009		callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
2010
2011		sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
2012
2013		/* ensure the TX completion event factor is not zero */
2014		if (sq->cev_factor == 0)
2015			sq->cev_factor = 1;
2016	}
2017}
2018
2019static void
2020mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
2021{
2022	int tc;
2023
2024	mtx_destroy(&c->rq.mtx);
2025
2026	for (tc = 0; tc < c->num_tc; tc++) {
2027		mtx_destroy(&c->sq[tc].lock);
2028		mtx_destroy(&c->sq[tc].comp_lock);
2029	}
2030}
2031
2032static int
2033mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
2034    struct mlx5e_channel_param *cparam,
2035    struct mlx5e_channel *c)
2036{
2037	int err;
2038
2039	memset(c, 0, sizeof(*c));
2040
2041	c->priv = priv;
2042	c->ix = ix;
2043	c->ifp = priv->ifp;
2044	c->mkey_be = cpu_to_be32(priv->mr.key);
2045	c->num_tc = priv->num_tc;
2046
2047	/* init mutexes */
2048	mlx5e_chan_mtx_init(c);
2049
2050	/* open transmit completion queue */
2051	err = mlx5e_open_tx_cqs(c, cparam);
2052	if (err)
2053		goto err_free;
2054
2055	/* open receive completion queue */
2056	err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
2057	    &mlx5e_rx_cq_comp, c->ix);
2058	if (err)
2059		goto err_close_tx_cqs;
2060
2061	err = mlx5e_open_sqs(c, cparam);
2062	if (err)
2063		goto err_close_rx_cq;
2064
2065	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
2066	if (err)
2067		goto err_close_sqs;
2068
2069	/* poll receive queue initially */
2070	c->rq.cq.mcq.comp(&c->rq.cq.mcq);
2071
2072	return (0);
2073
2074err_close_sqs:
2075	mlx5e_close_sqs_wait(c);
2076
2077err_close_rx_cq:
2078	mlx5e_close_cq(&c->rq.cq);
2079
2080err_close_tx_cqs:
2081	mlx5e_close_tx_cqs(c);
2082
2083err_free:
2084	/* destroy mutexes */
2085	mlx5e_chan_mtx_destroy(c);
2086	return (err);
2087}
2088
2089static void
2090mlx5e_close_channel(struct mlx5e_channel *c)
2091{
2092	mlx5e_close_rq(&c->rq);
2093}
2094
2095static void
2096mlx5e_close_channel_wait(struct mlx5e_channel *c)
2097{
2098	mlx5e_close_rq_wait(&c->rq);
2099	mlx5e_close_sqs_wait(c);
2100	mlx5e_close_tx_cqs(c);
2101	/* destroy mutexes */
2102	mlx5e_chan_mtx_destroy(c);
2103}
2104
2105static int
2106mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
2107{
2108	u32 r, n;
2109
2110	r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
2111	    MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
2112	if (r > MJUM16BYTES)
2113		return (-ENOMEM);
2114
2115	if (r > MJUM9BYTES)
2116		r = MJUM16BYTES;
2117	else if (r > MJUMPAGESIZE)
2118		r = MJUM9BYTES;
2119	else if (r > MCLBYTES)
2120		r = MJUMPAGESIZE;
2121	else
2122		r = MCLBYTES;
2123
2124	/*
2125	 * n + 1 must be a power of two, because stride size must be.
2126	 * Stride size is 16 * (n + 1), as the first segment is
2127	 * control.
2128	 */
2129	for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
2130		;
2131
2132	if (n > MLX5E_MAX_BUSDMA_RX_SEGS)
2133		return (-ENOMEM);
2134
2135	*wqe_sz = r;
2136	*nsegs = n;
2137	return (0);
2138}
2139
2140static void
2141mlx5e_build_rq_param(struct mlx5e_priv *priv,
2142    struct mlx5e_rq_param *param)
2143{
2144	void *rqc = param->rqc;
2145	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2146	u32 wqe_sz, nsegs;
2147
2148	mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
2149	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
2150	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
2151	MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
2152	    nsegs * sizeof(struct mlx5_wqe_data_seg)));
2153	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
2154	MLX5_SET(wq, wq, pd, priv->pdn);
2155
2156	param->wq.buf_numa_node = 0;
2157	param->wq.db_numa_node = 0;
2158	param->wq.linear = 1;
2159}
2160
2161static void
2162mlx5e_build_sq_param(struct mlx5e_priv *priv,
2163    struct mlx5e_sq_param *param)
2164{
2165	void *sqc = param->sqc;
2166	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2167
2168	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
2169	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
2170	MLX5_SET(wq, wq, pd, priv->pdn);
2171
2172	param->wq.buf_numa_node = 0;
2173	param->wq.db_numa_node = 0;
2174	param->wq.linear = 1;
2175}
2176
2177static void
2178mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2179    struct mlx5e_cq_param *param)
2180{
2181	void *cqc = param->cqc;
2182
2183	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
2184}
2185
2186static void
2187mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr)
2188{
2189
2190	*ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE);
2191
2192	/* apply LRO restrictions */
2193	if (priv->params.hw_lro_en &&
2194	    ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) {
2195		ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO;
2196	}
2197}
2198
2199static void
2200mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
2201    struct mlx5e_cq_param *param)
2202{
2203	struct net_dim_cq_moder curr;
2204	void *cqc = param->cqc;
2205
2206	/*
2207	 * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE
2208	 * format is more beneficial for FreeBSD use case.
2209	 *
2210	 * Adding support for MLX5_CQE_FORMAT_CSUM will require changes
2211	 * in mlx5e_decompress_cqe.
2212	 */
2213	if (priv->params.cqe_zipping_en) {
2214		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH);
2215		MLX5_SET(cqc, cqc, cqe_compression_en, 1);
2216	}
2217
2218	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
2219
2220	switch (priv->params.rx_cq_moderation_mode) {
2221	case 0:
2222		MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
2223		MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
2224		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2225		break;
2226	case 1:
2227		MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
2228		MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
2229		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
2230			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
2231		else
2232			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2233		break;
2234	case 2:
2235		mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr);
2236		MLX5_SET(cqc, cqc, cq_period, curr.usec);
2237		MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
2238		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2239		break;
2240	case 3:
2241		mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr);
2242		MLX5_SET(cqc, cqc, cq_period, curr.usec);
2243		MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
2244		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
2245			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
2246		else
2247			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2248		break;
2249	default:
2250		break;
2251	}
2252
2253	mlx5e_dim_build_cq_param(priv, param);
2254
2255	mlx5e_build_common_cq_param(priv, param);
2256}
2257
2258static void
2259mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2260    struct mlx5e_cq_param *param)
2261{
2262	void *cqc = param->cqc;
2263
2264	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
2265	MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
2266	MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
2267
2268	switch (priv->params.tx_cq_moderation_mode) {
2269	case 0:
2270		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2271		break;
2272	default:
2273		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
2274			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
2275		else
2276			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2277		break;
2278	}
2279
2280	mlx5e_build_common_cq_param(priv, param);
2281}
2282
2283static void
2284mlx5e_build_channel_param(struct mlx5e_priv *priv,
2285    struct mlx5e_channel_param *cparam)
2286{
2287	memset(cparam, 0, sizeof(*cparam));
2288
2289	mlx5e_build_rq_param(priv, &cparam->rq);
2290	mlx5e_build_sq_param(priv, &cparam->sq);
2291	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
2292	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
2293}
2294
2295static int
2296mlx5e_open_channels(struct mlx5e_priv *priv)
2297{
2298	struct mlx5e_channel_param cparam;
2299	int err;
2300	int i;
2301	int j;
2302
2303	mlx5e_build_channel_param(priv, &cparam);
2304	for (i = 0; i < priv->params.num_channels; i++) {
2305		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
2306		if (err)
2307			goto err_close_channels;
2308	}
2309
2310	for (j = 0; j < priv->params.num_channels; j++) {
2311		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq);
2312		if (err)
2313			goto err_close_channels;
2314	}
2315
2316	return (0);
2317
2318err_close_channels:
2319	while (i--) {
2320		mlx5e_close_channel(&priv->channel[i]);
2321		mlx5e_close_channel_wait(&priv->channel[i]);
2322	}
2323	return (err);
2324}
2325
2326static void
2327mlx5e_close_channels(struct mlx5e_priv *priv)
2328{
2329	int i;
2330
2331	for (i = 0; i < priv->params.num_channels; i++)
2332		mlx5e_close_channel(&priv->channel[i]);
2333	for (i = 0; i < priv->params.num_channels; i++)
2334		mlx5e_close_channel_wait(&priv->channel[i]);
2335}
2336
2337static int
2338mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
2339{
2340
2341	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
2342		uint8_t cq_mode;
2343
2344		switch (priv->params.tx_cq_moderation_mode) {
2345		case 0:
2346		case 2:
2347			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2348			break;
2349		default:
2350			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2351			break;
2352		}
2353
2354		return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
2355		    priv->params.tx_cq_moderation_usec,
2356		    priv->params.tx_cq_moderation_pkts,
2357		    cq_mode));
2358	}
2359
2360	return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
2361	    priv->params.tx_cq_moderation_usec,
2362	    priv->params.tx_cq_moderation_pkts));
2363}
2364
2365static int
2366mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
2367{
2368
2369	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
2370		uint8_t cq_mode;
2371		uint8_t dim_mode;
2372		int retval;
2373
2374		switch (priv->params.rx_cq_moderation_mode) {
2375		case 0:
2376		case 2:
2377			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2378			dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2379			break;
2380		default:
2381			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2382			dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
2383			break;
2384		}
2385
2386		/* tear down dynamic interrupt moderation */
2387		mtx_lock(&rq->mtx);
2388		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
2389		mtx_unlock(&rq->mtx);
2390
2391		/* wait for dynamic interrupt moderation work task, if any */
2392		cancel_work_sync(&rq->dim.work);
2393
2394		if (priv->params.rx_cq_moderation_mode >= 2) {
2395			struct net_dim_cq_moder curr;
2396
2397			mlx5e_get_default_profile(priv, dim_mode, &curr);
2398
2399			retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2400			    curr.usec, curr.pkts, cq_mode);
2401
2402			/* set dynamic interrupt moderation mode and zero defaults */
2403			mtx_lock(&rq->mtx);
2404			rq->dim.mode = dim_mode;
2405			rq->dim.state = 0;
2406			rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE;
2407			mtx_unlock(&rq->mtx);
2408		} else {
2409			retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2410			    priv->params.rx_cq_moderation_usec,
2411			    priv->params.rx_cq_moderation_pkts,
2412			    cq_mode);
2413		}
2414		return (retval);
2415	}
2416
2417	return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
2418	    priv->params.rx_cq_moderation_usec,
2419	    priv->params.rx_cq_moderation_pkts));
2420}
2421
2422static int
2423mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
2424{
2425	int err;
2426	int i;
2427
2428	err = mlx5e_refresh_rq_params(priv, &c->rq);
2429	if (err)
2430		goto done;
2431
2432	for (i = 0; i != c->num_tc; i++) {
2433		err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
2434		if (err)
2435			goto done;
2436	}
2437done:
2438	return (err);
2439}
2440
2441int
2442mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
2443{
2444	int i;
2445
2446	/* check if channels are closed */
2447	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2448		return (EINVAL);
2449
2450	for (i = 0; i < priv->params.num_channels; i++) {
2451		int err;
2452
2453		err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]);
2454		if (err)
2455			return (err);
2456	}
2457	return (0);
2458}
2459
2460static int
2461mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
2462{
2463	struct mlx5_core_dev *mdev = priv->mdev;
2464	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
2465	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2466
2467	memset(in, 0, sizeof(in));
2468
2469	MLX5_SET(tisc, tisc, prio, tc);
2470	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
2471
2472	return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
2473}
2474
2475static void
2476mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
2477{
2478	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2479}
2480
2481static int
2482mlx5e_open_tises(struct mlx5e_priv *priv)
2483{
2484	int num_tc = priv->num_tc;
2485	int err;
2486	int tc;
2487
2488	for (tc = 0; tc < num_tc; tc++) {
2489		err = mlx5e_open_tis(priv, tc);
2490		if (err)
2491			goto err_close_tises;
2492	}
2493
2494	return (0);
2495
2496err_close_tises:
2497	for (tc--; tc >= 0; tc--)
2498		mlx5e_close_tis(priv, tc);
2499
2500	return (err);
2501}
2502
2503static void
2504mlx5e_close_tises(struct mlx5e_priv *priv)
2505{
2506	int num_tc = priv->num_tc;
2507	int tc;
2508
2509	for (tc = 0; tc < num_tc; tc++)
2510		mlx5e_close_tis(priv, tc);
2511}
2512
2513static int
2514mlx5e_open_rqt(struct mlx5e_priv *priv)
2515{
2516	struct mlx5_core_dev *mdev = priv->mdev;
2517	u32 *in;
2518	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
2519	void *rqtc;
2520	int inlen;
2521	int err;
2522	int sz;
2523	int i;
2524
2525	sz = 1 << priv->params.rx_hash_log_tbl_sz;
2526
2527	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2528	in = mlx5_vzalloc(inlen);
2529	if (in == NULL)
2530		return (-ENOMEM);
2531	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2532
2533	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2534	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2535
2536	for (i = 0; i < sz; i++) {
2537		int ix = i;
2538#ifdef RSS
2539		ix = rss_get_indirection_to_bucket(ix);
2540#endif
2541		/* ensure we don't overflow */
2542		ix %= priv->params.num_channels;
2543
2544		/* apply receive side scaling stride, if any */
2545		ix -= ix % (int)priv->params.channels_rsss;
2546
2547		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn);
2548	}
2549
2550	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2551
2552	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
2553	if (!err)
2554		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2555
2556	kvfree(in);
2557
2558	return (err);
2559}
2560
2561static void
2562mlx5e_close_rqt(struct mlx5e_priv *priv)
2563{
2564	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
2565	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
2566
2567	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2568	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2569
2570	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
2571}
2572
2573static void
2574mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2575{
2576	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2577	__be32 *hkey;
2578
2579	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2580
2581#define	ROUGH_MAX_L2_L3_HDR_SZ 256
2582
2583#define	MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2584			  MLX5_HASH_FIELD_SEL_DST_IP)
2585
2586#define	MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2587			  MLX5_HASH_FIELD_SEL_DST_IP   |\
2588			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2589			  MLX5_HASH_FIELD_SEL_L4_DPORT)
2590
2591#define	MLX5_HASH_IP_IPSEC_SPI	(MLX5_HASH_FIELD_SEL_SRC_IP   |\
2592				 MLX5_HASH_FIELD_SEL_DST_IP   |\
2593				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2594
2595	if (priv->params.hw_lro_en) {
2596		MLX5_SET(tirc, tirc, lro_enable_mask,
2597		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2598		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2599		MLX5_SET(tirc, tirc, lro_max_msg_sz,
2600		    (priv->params.lro_wqe_sz -
2601		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2602		/* TODO: add the option to choose timer value dynamically */
2603		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2604		    MLX5_CAP_ETH(priv->mdev,
2605		    lro_timer_supported_periods[2]));
2606	}
2607
2608	/* setup parameters for hashing TIR type, if any */
2609	switch (tt) {
2610	case MLX5E_TT_ANY:
2611		MLX5_SET(tirc, tirc, disp_type,
2612		    MLX5_TIRC_DISP_TYPE_DIRECT);
2613		MLX5_SET(tirc, tirc, inline_rqn,
2614		    priv->channel[0].rq.rqn);
2615		break;
2616	default:
2617		MLX5_SET(tirc, tirc, disp_type,
2618		    MLX5_TIRC_DISP_TYPE_INDIRECT);
2619		MLX5_SET(tirc, tirc, indirect_table,
2620		    priv->rqtn);
2621		MLX5_SET(tirc, tirc, rx_hash_fn,
2622		    MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2623		hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2624#ifdef RSS
2625		/*
2626		 * The FreeBSD RSS implementation does currently not
2627		 * support symmetric Toeplitz hashes:
2628		 */
2629		MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2630		rss_getkey((uint8_t *)hkey);
2631#else
2632		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2633		hkey[0] = cpu_to_be32(0xD181C62C);
2634		hkey[1] = cpu_to_be32(0xF7F4DB5B);
2635		hkey[2] = cpu_to_be32(0x1983A2FC);
2636		hkey[3] = cpu_to_be32(0x943E1ADB);
2637		hkey[4] = cpu_to_be32(0xD9389E6B);
2638		hkey[5] = cpu_to_be32(0xD1039C2C);
2639		hkey[6] = cpu_to_be32(0xA74499AD);
2640		hkey[7] = cpu_to_be32(0x593D56D9);
2641		hkey[8] = cpu_to_be32(0xF3253C06);
2642		hkey[9] = cpu_to_be32(0x2ADC1FFC);
2643#endif
2644		break;
2645	}
2646
2647	switch (tt) {
2648	case MLX5E_TT_IPV4_TCP:
2649		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2650		    MLX5_L3_PROT_TYPE_IPV4);
2651		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2652		    MLX5_L4_PROT_TYPE_TCP);
2653#ifdef RSS
2654		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2655			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2656			    MLX5_HASH_IP);
2657		} else
2658#endif
2659		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2660		    MLX5_HASH_ALL);
2661		break;
2662
2663	case MLX5E_TT_IPV6_TCP:
2664		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2665		    MLX5_L3_PROT_TYPE_IPV6);
2666		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2667		    MLX5_L4_PROT_TYPE_TCP);
2668#ifdef RSS
2669		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2670			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2671			    MLX5_HASH_IP);
2672		} else
2673#endif
2674		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2675		    MLX5_HASH_ALL);
2676		break;
2677
2678	case MLX5E_TT_IPV4_UDP:
2679		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2680		    MLX5_L3_PROT_TYPE_IPV4);
2681		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2682		    MLX5_L4_PROT_TYPE_UDP);
2683#ifdef RSS
2684		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2685			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2686			    MLX5_HASH_IP);
2687		} else
2688#endif
2689		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2690		    MLX5_HASH_ALL);
2691		break;
2692
2693	case MLX5E_TT_IPV6_UDP:
2694		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2695		    MLX5_L3_PROT_TYPE_IPV6);
2696		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2697		    MLX5_L4_PROT_TYPE_UDP);
2698#ifdef RSS
2699		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2700			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2701			    MLX5_HASH_IP);
2702		} else
2703#endif
2704		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2705		    MLX5_HASH_ALL);
2706		break;
2707
2708	case MLX5E_TT_IPV4_IPSEC_AH:
2709		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2710		    MLX5_L3_PROT_TYPE_IPV4);
2711		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2712		    MLX5_HASH_IP_IPSEC_SPI);
2713		break;
2714
2715	case MLX5E_TT_IPV6_IPSEC_AH:
2716		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2717		    MLX5_L3_PROT_TYPE_IPV6);
2718		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2719		    MLX5_HASH_IP_IPSEC_SPI);
2720		break;
2721
2722	case MLX5E_TT_IPV4_IPSEC_ESP:
2723		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2724		    MLX5_L3_PROT_TYPE_IPV4);
2725		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2726		    MLX5_HASH_IP_IPSEC_SPI);
2727		break;
2728
2729	case MLX5E_TT_IPV6_IPSEC_ESP:
2730		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2731		    MLX5_L3_PROT_TYPE_IPV6);
2732		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2733		    MLX5_HASH_IP_IPSEC_SPI);
2734		break;
2735
2736	case MLX5E_TT_IPV4:
2737		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2738		    MLX5_L3_PROT_TYPE_IPV4);
2739		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2740		    MLX5_HASH_IP);
2741		break;
2742
2743	case MLX5E_TT_IPV6:
2744		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2745		    MLX5_L3_PROT_TYPE_IPV6);
2746		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2747		    MLX5_HASH_IP);
2748		break;
2749
2750	default:
2751		break;
2752	}
2753}
2754
2755static int
2756mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2757{
2758	struct mlx5_core_dev *mdev = priv->mdev;
2759	u32 *in;
2760	void *tirc;
2761	int inlen;
2762	int err;
2763
2764	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2765	in = mlx5_vzalloc(inlen);
2766	if (in == NULL)
2767		return (-ENOMEM);
2768	tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2769
2770	mlx5e_build_tir_ctx(priv, tirc, tt);
2771
2772	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2773
2774	kvfree(in);
2775
2776	return (err);
2777}
2778
2779static void
2780mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2781{
2782	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2783}
2784
2785static int
2786mlx5e_open_tirs(struct mlx5e_priv *priv)
2787{
2788	int err;
2789	int i;
2790
2791	for (i = 0; i < MLX5E_NUM_TT; i++) {
2792		err = mlx5e_open_tir(priv, i);
2793		if (err)
2794			goto err_close_tirs;
2795	}
2796
2797	return (0);
2798
2799err_close_tirs:
2800	for (i--; i >= 0; i--)
2801		mlx5e_close_tir(priv, i);
2802
2803	return (err);
2804}
2805
2806static void
2807mlx5e_close_tirs(struct mlx5e_priv *priv)
2808{
2809	int i;
2810
2811	for (i = 0; i < MLX5E_NUM_TT; i++)
2812		mlx5e_close_tir(priv, i);
2813}
2814
2815/*
2816 * SW MTU does not include headers,
2817 * HW MTU includes all headers and checksums.
2818 */
2819static int
2820mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2821{
2822	struct mlx5e_priv *priv = ifp->if_softc;
2823	struct mlx5_core_dev *mdev = priv->mdev;
2824	int hw_mtu;
2825	int err;
2826
2827	hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2828
2829	err = mlx5_set_port_mtu(mdev, hw_mtu);
2830	if (err) {
2831		mlx5_en_err(ifp, "mlx5_set_port_mtu failed setting %d, err=%d\n",
2832		    sw_mtu, err);
2833		return (err);
2834	}
2835
2836	/* Update vport context MTU */
2837	err = mlx5_set_vport_mtu(mdev, hw_mtu);
2838	if (err) {
2839		mlx5_en_err(ifp,
2840		    "Failed updating vport context with MTU size, err=%d\n",
2841		    err);
2842	}
2843
2844	ifp->if_mtu = sw_mtu;
2845
2846	err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2847	if (err || !hw_mtu) {
2848		/* fallback to port oper mtu */
2849		err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2850	}
2851	if (err) {
2852		mlx5_en_err(ifp,
2853		    "Query port MTU, after setting new MTU value, failed\n");
2854		return (err);
2855	} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2856		err = -E2BIG,
2857		mlx5_en_err(ifp,
2858		    "Port MTU %d is smaller than ifp mtu %d\n",
2859		    hw_mtu, sw_mtu);
2860	} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
2861		err = -EINVAL;
2862                mlx5_en_err(ifp,
2863		    "Port MTU %d is bigger than ifp mtu %d\n",
2864		    hw_mtu, sw_mtu);
2865	}
2866	priv->params_ethtool.hw_mtu = hw_mtu;
2867
2868	return (err);
2869}
2870
2871int
2872mlx5e_open_locked(struct ifnet *ifp)
2873{
2874	struct mlx5e_priv *priv = ifp->if_softc;
2875	int err;
2876	u16 set_id;
2877
2878	/* check if already opened */
2879	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2880		return (0);
2881
2882#ifdef RSS
2883	if (rss_getnumbuckets() > priv->params.num_channels) {
2884		mlx5_en_info(ifp,
2885		    "NOTE: There are more RSS buckets(%u) than channels(%u) available\n",
2886		    rss_getnumbuckets(), priv->params.num_channels);
2887	}
2888#endif
2889	err = mlx5e_open_tises(priv);
2890	if (err) {
2891		mlx5_en_err(ifp, "mlx5e_open_tises failed, %d\n", err);
2892		return (err);
2893	}
2894	err = mlx5_vport_alloc_q_counter(priv->mdev,
2895	    MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
2896	if (err) {
2897		mlx5_en_err(priv->ifp,
2898		    "mlx5_vport_alloc_q_counter failed: %d\n", err);
2899		goto err_close_tises;
2900	}
2901	/* store counter set ID */
2902	priv->counter_set_id = set_id;
2903
2904	err = mlx5e_open_channels(priv);
2905	if (err) {
2906		mlx5_en_err(ifp,
2907		    "mlx5e_open_channels failed, %d\n", err);
2908		goto err_dalloc_q_counter;
2909	}
2910	err = mlx5e_open_rqt(priv);
2911	if (err) {
2912		mlx5_en_err(ifp, "mlx5e_open_rqt failed, %d\n", err);
2913		goto err_close_channels;
2914	}
2915	err = mlx5e_open_tirs(priv);
2916	if (err) {
2917		mlx5_en_err(ifp, "mlx5e_open_tir failed, %d\n", err);
2918		goto err_close_rqls;
2919	}
2920	err = mlx5e_open_flow_table(priv);
2921	if (err) {
2922		mlx5_en_err(ifp,
2923		    "mlx5e_open_flow_table failed, %d\n", err);
2924		goto err_close_tirs;
2925	}
2926	err = mlx5e_add_all_vlan_rules(priv);
2927	if (err) {
2928		mlx5_en_err(ifp,
2929		    "mlx5e_add_all_vlan_rules failed, %d\n", err);
2930		goto err_close_flow_table;
2931	}
2932	set_bit(MLX5E_STATE_OPENED, &priv->state);
2933
2934	mlx5e_update_carrier(priv);
2935	mlx5e_set_rx_mode_core(priv);
2936
2937	return (0);
2938
2939err_close_flow_table:
2940	mlx5e_close_flow_table(priv);
2941
2942err_close_tirs:
2943	mlx5e_close_tirs(priv);
2944
2945err_close_rqls:
2946	mlx5e_close_rqt(priv);
2947
2948err_close_channels:
2949	mlx5e_close_channels(priv);
2950
2951err_dalloc_q_counter:
2952	mlx5_vport_dealloc_q_counter(priv->mdev,
2953	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2954
2955err_close_tises:
2956	mlx5e_close_tises(priv);
2957
2958	return (err);
2959}
2960
2961static void
2962mlx5e_open(void *arg)
2963{
2964	struct mlx5e_priv *priv = arg;
2965
2966	PRIV_LOCK(priv);
2967	if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2968		mlx5_en_err(priv->ifp,
2969		    "Setting port status to up failed\n");
2970
2971	mlx5e_open_locked(priv->ifp);
2972	priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2973	PRIV_UNLOCK(priv);
2974}
2975
2976int
2977mlx5e_close_locked(struct ifnet *ifp)
2978{
2979	struct mlx5e_priv *priv = ifp->if_softc;
2980
2981	/* check if already closed */
2982	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2983		return (0);
2984
2985	clear_bit(MLX5E_STATE_OPENED, &priv->state);
2986
2987	mlx5e_set_rx_mode_core(priv);
2988	mlx5e_del_all_vlan_rules(priv);
2989	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2990	mlx5e_close_flow_table(priv);
2991	mlx5e_close_tirs(priv);
2992	mlx5e_close_rqt(priv);
2993	mlx5e_close_channels(priv);
2994	mlx5_vport_dealloc_q_counter(priv->mdev,
2995	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2996	mlx5e_close_tises(priv);
2997
2998	return (0);
2999}
3000
3001#if (__FreeBSD_version >= 1100000)
3002static uint64_t
3003mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
3004{
3005	struct mlx5e_priv *priv = ifp->if_softc;
3006	u64 retval;
3007
3008	/* PRIV_LOCK(priv); XXX not allowed */
3009	switch (cnt) {
3010	case IFCOUNTER_IPACKETS:
3011		retval = priv->stats.vport.rx_packets;
3012		break;
3013	case IFCOUNTER_IERRORS:
3014		retval = priv->stats.pport.in_range_len_errors +
3015		    priv->stats.pport.out_of_range_len +
3016		    priv->stats.pport.too_long_errors +
3017		    priv->stats.pport.check_seq_err +
3018		    priv->stats.pport.alignment_err;
3019		break;
3020	case IFCOUNTER_IQDROPS:
3021		retval = priv->stats.vport.rx_out_of_buffer;
3022		break;
3023	case IFCOUNTER_OPACKETS:
3024		retval = priv->stats.vport.tx_packets;
3025		break;
3026	case IFCOUNTER_OERRORS:
3027		retval = priv->stats.port_stats_debug.out_discards;
3028		break;
3029	case IFCOUNTER_IBYTES:
3030		retval = priv->stats.vport.rx_bytes;
3031		break;
3032	case IFCOUNTER_OBYTES:
3033		retval = priv->stats.vport.tx_bytes;
3034		break;
3035	case IFCOUNTER_IMCASTS:
3036		retval = priv->stats.vport.rx_multicast_packets;
3037		break;
3038	case IFCOUNTER_OMCASTS:
3039		retval = priv->stats.vport.tx_multicast_packets;
3040		break;
3041	case IFCOUNTER_OQDROPS:
3042		retval = priv->stats.vport.tx_queue_dropped;
3043		break;
3044	case IFCOUNTER_COLLISIONS:
3045		retval = priv->stats.pport.collisions;
3046		break;
3047	default:
3048		retval = if_get_counter_default(ifp, cnt);
3049		break;
3050	}
3051	/* PRIV_UNLOCK(priv); XXX not allowed */
3052	return (retval);
3053}
3054#endif
3055
3056static void
3057mlx5e_set_rx_mode(struct ifnet *ifp)
3058{
3059	struct mlx5e_priv *priv = ifp->if_softc;
3060
3061	queue_work(priv->wq, &priv->set_rx_mode_work);
3062}
3063
3064static int
3065mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3066{
3067	struct mlx5e_priv *priv;
3068	struct ifreq *ifr;
3069	struct ifi2creq i2c;
3070	int error = 0;
3071	int mask = 0;
3072	int size_read = 0;
3073	int module_status;
3074	int module_num;
3075	int max_mtu;
3076	uint8_t read_addr;
3077
3078	priv = ifp->if_softc;
3079
3080	/* check if detaching */
3081	if (priv == NULL || priv->gone != 0)
3082		return (ENXIO);
3083
3084	switch (command) {
3085	case SIOCSIFMTU:
3086		ifr = (struct ifreq *)data;
3087
3088		PRIV_LOCK(priv);
3089		mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
3090
3091		if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
3092		    ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
3093			int was_opened;
3094
3095			was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3096			if (was_opened)
3097				mlx5e_close_locked(ifp);
3098
3099			/* set new MTU */
3100			mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
3101
3102			if (was_opened)
3103				mlx5e_open_locked(ifp);
3104		} else {
3105			error = EINVAL;
3106			mlx5_en_err(ifp,
3107			    "Invalid MTU value. Min val: %d, Max val: %d\n",
3108			    MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
3109		}
3110		PRIV_UNLOCK(priv);
3111		break;
3112	case SIOCSIFFLAGS:
3113		if ((ifp->if_flags & IFF_UP) &&
3114		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3115			mlx5e_set_rx_mode(ifp);
3116			break;
3117		}
3118		PRIV_LOCK(priv);
3119		if (ifp->if_flags & IFF_UP) {
3120			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3121				if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3122					mlx5e_open_locked(ifp);
3123				ifp->if_drv_flags |= IFF_DRV_RUNNING;
3124				mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
3125			}
3126		} else {
3127			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3128				mlx5_set_port_status(priv->mdev,
3129				    MLX5_PORT_DOWN);
3130				if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
3131					mlx5e_close_locked(ifp);
3132				mlx5e_update_carrier(priv);
3133				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3134			}
3135		}
3136		PRIV_UNLOCK(priv);
3137		break;
3138	case SIOCADDMULTI:
3139	case SIOCDELMULTI:
3140		mlx5e_set_rx_mode(ifp);
3141		break;
3142	case SIOCSIFMEDIA:
3143	case SIOCGIFMEDIA:
3144	case SIOCGIFXMEDIA:
3145		ifr = (struct ifreq *)data;
3146		error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
3147		break;
3148	case SIOCSIFCAP:
3149		ifr = (struct ifreq *)data;
3150		PRIV_LOCK(priv);
3151		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3152
3153		if (mask & IFCAP_TXCSUM) {
3154			ifp->if_capenable ^= IFCAP_TXCSUM;
3155			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3156
3157			if (IFCAP_TSO4 & ifp->if_capenable &&
3158			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
3159				ifp->if_capenable &= ~IFCAP_TSO4;
3160				ifp->if_hwassist &= ~CSUM_IP_TSO;
3161				mlx5_en_err(ifp,
3162				    "tso4 disabled due to -txcsum.\n");
3163			}
3164		}
3165		if (mask & IFCAP_TXCSUM_IPV6) {
3166			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
3167			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3168
3169			if (IFCAP_TSO6 & ifp->if_capenable &&
3170			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
3171				ifp->if_capenable &= ~IFCAP_TSO6;
3172				ifp->if_hwassist &= ~CSUM_IP6_TSO;
3173				mlx5_en_err(ifp,
3174				    "tso6 disabled due to -txcsum6.\n");
3175			}
3176		}
3177		if (mask & IFCAP_RXCSUM)
3178			ifp->if_capenable ^= IFCAP_RXCSUM;
3179		if (mask & IFCAP_RXCSUM_IPV6)
3180			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
3181		if (mask & IFCAP_TSO4) {
3182			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
3183			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
3184				mlx5_en_err(ifp, "enable txcsum first.\n");
3185				error = EAGAIN;
3186				goto out;
3187			}
3188			ifp->if_capenable ^= IFCAP_TSO4;
3189			ifp->if_hwassist ^= CSUM_IP_TSO;
3190		}
3191		if (mask & IFCAP_TSO6) {
3192			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
3193			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
3194				mlx5_en_err(ifp, "enable txcsum6 first.\n");
3195				error = EAGAIN;
3196				goto out;
3197			}
3198			ifp->if_capenable ^= IFCAP_TSO6;
3199			ifp->if_hwassist ^= CSUM_IP6_TSO;
3200		}
3201		if (mask & IFCAP_VLAN_HWFILTER) {
3202			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3203				mlx5e_disable_vlan_filter(priv);
3204			else
3205				mlx5e_enable_vlan_filter(priv);
3206
3207			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3208		}
3209		if (mask & IFCAP_VLAN_HWTAGGING)
3210			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3211		if (mask & IFCAP_WOL_MAGIC)
3212			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3213
3214		VLAN_CAPABILITIES(ifp);
3215		/* turn off LRO means also turn of HW LRO - if it's on */
3216		if (mask & IFCAP_LRO) {
3217			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3218			bool need_restart = false;
3219
3220			ifp->if_capenable ^= IFCAP_LRO;
3221
3222			/* figure out if updating HW LRO is needed */
3223			if (!(ifp->if_capenable & IFCAP_LRO)) {
3224				if (priv->params.hw_lro_en) {
3225					priv->params.hw_lro_en = false;
3226					need_restart = true;
3227				}
3228			} else {
3229				if (priv->params.hw_lro_en == false &&
3230				    priv->params_ethtool.hw_lro != 0) {
3231					priv->params.hw_lro_en = true;
3232					need_restart = true;
3233				}
3234			}
3235			if (was_opened && need_restart) {
3236				mlx5e_close_locked(ifp);
3237				mlx5e_open_locked(ifp);
3238			}
3239		}
3240out:
3241		PRIV_UNLOCK(priv);
3242		break;
3243
3244	case SIOCGI2C:
3245		ifr = (struct ifreq *)data;
3246
3247		/*
3248		 * Copy from the user-space address ifr_data to the
3249		 * kernel-space address i2c
3250		 */
3251		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
3252		if (error)
3253			break;
3254
3255		if (i2c.len > sizeof(i2c.data)) {
3256			error = EINVAL;
3257			break;
3258		}
3259
3260		PRIV_LOCK(priv);
3261		/* Get module_num which is required for the query_eeprom */
3262		error = mlx5_query_module_num(priv->mdev, &module_num);
3263		if (error) {
3264			mlx5_en_err(ifp,
3265			    "Query module num failed, eeprom reading is not supported\n");
3266			error = EINVAL;
3267			goto err_i2c;
3268		}
3269		/* Check if module is present before doing an access */
3270		module_status = mlx5_query_module_status(priv->mdev, module_num);
3271		if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED) {
3272			error = EINVAL;
3273			goto err_i2c;
3274		}
3275		/*
3276		 * Currently 0XA0 and 0xA2 are the only addresses permitted.
3277		 * The internal conversion is as follows:
3278		 */
3279		if (i2c.dev_addr == 0xA0)
3280			read_addr = MLX5_I2C_ADDR_LOW;
3281		else if (i2c.dev_addr == 0xA2)
3282			read_addr = MLX5_I2C_ADDR_HIGH;
3283		else {
3284			mlx5_en_err(ifp,
3285			    "Query eeprom failed, Invalid Address: %X\n",
3286			    i2c.dev_addr);
3287			error = EINVAL;
3288			goto err_i2c;
3289		}
3290		error = mlx5_query_eeprom(priv->mdev,
3291		    read_addr, MLX5_EEPROM_LOW_PAGE,
3292		    (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
3293		    (uint32_t *)i2c.data, &size_read);
3294		if (error) {
3295			mlx5_en_err(ifp,
3296			    "Query eeprom failed, eeprom reading is not supported\n");
3297			error = EINVAL;
3298			goto err_i2c;
3299		}
3300
3301		if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
3302			error = mlx5_query_eeprom(priv->mdev,
3303			    read_addr, MLX5_EEPROM_LOW_PAGE,
3304			    (uint32_t)(i2c.offset + size_read),
3305			    (uint32_t)(i2c.len - size_read), module_num,
3306			    (uint32_t *)(i2c.data + size_read), &size_read);
3307		}
3308		if (error) {
3309			mlx5_en_err(ifp,
3310			    "Query eeprom failed, eeprom reading is not supported\n");
3311			error = EINVAL;
3312			goto err_i2c;
3313		}
3314
3315		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
3316err_i2c:
3317		PRIV_UNLOCK(priv);
3318		break;
3319
3320	default:
3321		error = ether_ioctl(ifp, command, data);
3322		break;
3323	}
3324	return (error);
3325}
3326
3327static int
3328mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3329{
3330	/*
3331	 * TODO: uncoment once FW really sets all these bits if
3332	 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
3333	 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
3334	 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
3335	 * -ENOTSUPP;
3336	 */
3337
3338	/* TODO: add more must-to-have features */
3339
3340	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3341		return (-ENODEV);
3342
3343	return (0);
3344}
3345
3346static u16
3347mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3348{
3349	uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U;
3350
3351	bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2;
3352
3353	/* verify against driver hardware limit */
3354	if (bf_buf_size > MLX5E_MAX_TX_INLINE)
3355		bf_buf_size = MLX5E_MAX_TX_INLINE;
3356
3357	return (bf_buf_size);
3358}
3359
3360static int
3361mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
3362    struct mlx5e_priv *priv,
3363    int num_comp_vectors)
3364{
3365	int err;
3366
3367	/*
3368	 * TODO: Consider link speed for setting "log_sq_size",
3369	 * "log_rq_size" and "cq_moderation_xxx":
3370	 */
3371	priv->params.log_sq_size =
3372	    MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3373	priv->params.log_rq_size =
3374	    MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
3375	priv->params.rx_cq_moderation_usec =
3376	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3377	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
3378	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3379	priv->params.rx_cq_moderation_mode =
3380	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
3381	priv->params.rx_cq_moderation_pkts =
3382	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3383	priv->params.tx_cq_moderation_usec =
3384	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3385	priv->params.tx_cq_moderation_pkts =
3386	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3387	priv->params.min_rx_wqes =
3388	    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
3389	priv->params.rx_hash_log_tbl_sz =
3390	    (order_base_2(num_comp_vectors) >
3391	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
3392	    order_base_2(num_comp_vectors) :
3393	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
3394	priv->params.num_tc = 1;
3395	priv->params.default_vlan_prio = 0;
3396	priv->counter_set_id = -1;
3397	priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3398
3399	err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
3400	if (err)
3401		return (err);
3402
3403	/*
3404	 * hw lro is currently defaulted to off. when it won't anymore we
3405	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
3406	 */
3407	priv->params.hw_lro_en = false;
3408	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
3409
3410	/*
3411	 * CQE zipping is currently defaulted to off. when it won't
3412	 * anymore we will consider the HW capability:
3413	 * "!!MLX5_CAP_GEN(mdev, cqe_compression)"
3414	 */
3415	priv->params.cqe_zipping_en = false;
3416
3417	priv->mdev = mdev;
3418	priv->params.num_channels = num_comp_vectors;
3419	priv->params.channels_rsss = 1;
3420	priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
3421	priv->queue_mapping_channel_mask =
3422	    roundup_pow_of_two(num_comp_vectors) - 1;
3423	priv->num_tc = priv->params.num_tc;
3424	priv->default_vlan_prio = priv->params.default_vlan_prio;
3425
3426	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3427	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3428	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3429
3430	return (0);
3431}
3432
3433static int
3434mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
3435		  struct mlx5_core_mr *mkey)
3436{
3437	struct ifnet *ifp = priv->ifp;
3438	struct mlx5_core_dev *mdev = priv->mdev;
3439	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3440	void *mkc;
3441	u32 *in;
3442	int err;
3443
3444	in = mlx5_vzalloc(inlen);
3445	if (in == NULL) {
3446		mlx5_en_err(ifp, "failed to allocate inbox\n");
3447		return (-ENOMEM);
3448	}
3449
3450	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3451	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
3452	MLX5_SET(mkc, mkc, lw, 1);
3453	MLX5_SET(mkc, mkc, lr, 1);
3454
3455	MLX5_SET(mkc, mkc, pd, pdn);
3456	MLX5_SET(mkc, mkc, length64, 1);
3457	MLX5_SET(mkc, mkc, qpn, 0xffffff);
3458
3459	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
3460	if (err)
3461		mlx5_en_err(ifp, "mlx5_core_create_mkey failed, %d\n",
3462		    err);
3463
3464	kvfree(in);
3465	return (err);
3466}
3467
3468static const char *mlx5e_vport_stats_desc[] = {
3469	MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
3470};
3471
3472static const char *mlx5e_pport_stats_desc[] = {
3473	MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
3474};
3475
3476static void
3477mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
3478{
3479	mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
3480	sx_init(&priv->state_lock, "mlx5state");
3481	callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
3482	MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
3483}
3484
3485static void
3486mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
3487{
3488	mtx_destroy(&priv->async_events_mtx);
3489	sx_destroy(&priv->state_lock);
3490}
3491
3492static int
3493sysctl_firmware(SYSCTL_HANDLER_ARGS)
3494{
3495	/*
3496	 * %d.%d%.d the string format.
3497	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
3498	 * We need at most 5 chars to store that.
3499	 * It also has: two "." and NULL at the end, which means we need 18
3500	 * (5*3 + 3) chars at most.
3501	 */
3502	char fw[18];
3503	struct mlx5e_priv *priv = arg1;
3504	int error;
3505
3506	snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3507	    fw_rev_sub(priv->mdev));
3508	error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3509	return (error);
3510}
3511
3512static void
3513mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
3514{
3515	int i;
3516
3517	for (i = 0; i < ch->num_tc; i++)
3518		mlx5e_drain_sq(&ch->sq[i]);
3519}
3520
3521static void
3522mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
3523{
3524
3525	sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
3526	sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
3527	mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
3528	sq->doorbell.d64 = 0;
3529}
3530
3531void
3532mlx5e_resume_sq(struct mlx5e_sq *sq)
3533{
3534	int err;
3535
3536	/* check if already enabled */
3537	if (READ_ONCE(sq->running) != 0)
3538		return;
3539
3540	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
3541	    MLX5_SQC_STATE_RST);
3542	if (err != 0) {
3543		mlx5_en_err(sq->ifp,
3544		    "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
3545	}
3546
3547	sq->cc = 0;
3548	sq->pc = 0;
3549
3550	/* reset doorbell prior to moving from RST to RDY */
3551	mlx5e_reset_sq_doorbell_record(sq);
3552
3553	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
3554	    MLX5_SQC_STATE_RDY);
3555	if (err != 0) {
3556		mlx5_en_err(sq->ifp,
3557		    "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
3558	}
3559
3560	sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
3561	WRITE_ONCE(sq->running, 1);
3562}
3563
3564static void
3565mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
3566{
3567        int i;
3568
3569	for (i = 0; i < ch->num_tc; i++)
3570		mlx5e_resume_sq(&ch->sq[i]);
3571}
3572
3573static void
3574mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
3575{
3576	struct mlx5e_rq *rq = &ch->rq;
3577	int err;
3578
3579	mtx_lock(&rq->mtx);
3580	rq->enabled = 0;
3581	callout_stop(&rq->watchdog);
3582	mtx_unlock(&rq->mtx);
3583
3584	callout_drain(&rq->watchdog);
3585
3586	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
3587	if (err != 0) {
3588		mlx5_en_err(rq->ifp,
3589		    "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
3590	}
3591
3592	while (!mlx5_wq_ll_is_empty(&rq->wq)) {
3593		msleep(1);
3594		rq->cq.mcq.comp(&rq->cq.mcq);
3595	}
3596
3597	/*
3598	 * Transitioning into RST state will allow the FW to track less ERR state queues,
3599	 * thus reducing the recv queue flushing time
3600	 */
3601	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
3602	if (err != 0) {
3603		mlx5_en_err(rq->ifp,
3604		    "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
3605	}
3606}
3607
3608static void
3609mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
3610{
3611	struct mlx5e_rq *rq = &ch->rq;
3612	int err;
3613
3614	rq->wq.wqe_ctr = 0;
3615	mlx5_wq_ll_update_db_record(&rq->wq);
3616	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3617	if (err != 0) {
3618		mlx5_en_err(rq->ifp,
3619		    "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
3620        }
3621
3622	rq->enabled = 1;
3623
3624	rq->cq.mcq.comp(&rq->cq.mcq);
3625}
3626
3627void
3628mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
3629{
3630	int i;
3631
3632	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3633		return;
3634
3635	for (i = 0; i < priv->params.num_channels; i++) {
3636		if (value)
3637			mlx5e_disable_tx_dma(&priv->channel[i]);
3638		else
3639			mlx5e_enable_tx_dma(&priv->channel[i]);
3640	}
3641}
3642
3643void
3644mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
3645{
3646	int i;
3647
3648	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3649		return;
3650
3651	for (i = 0; i < priv->params.num_channels; i++) {
3652		if (value)
3653			mlx5e_disable_rx_dma(&priv->channel[i]);
3654		else
3655			mlx5e_enable_rx_dma(&priv->channel[i]);
3656	}
3657}
3658
3659static void
3660mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3661{
3662	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3663	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3664	    sysctl_firmware, "A", "HCA firmware version");
3665
3666	SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3667	    OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3668	    "Board ID");
3669}
3670
3671static int
3672mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3673{
3674	struct mlx5e_priv *priv = arg1;
3675	uint8_t temp[MLX5E_MAX_PRIORITY];
3676	uint32_t tx_pfc;
3677	int err;
3678	int i;
3679
3680	PRIV_LOCK(priv);
3681
3682	tx_pfc = priv->params.tx_priority_flow_control;
3683
3684	for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
3685		temp[i] = (tx_pfc >> i) & 1;
3686
3687	err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
3688	if (err || !req->newptr)
3689		goto done;
3690	err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
3691	if (err)
3692		goto done;
3693
3694	priv->params.tx_priority_flow_control = 0;
3695
3696	/* range check input value */
3697	for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
3698		if (temp[i] > 1) {
3699			err = ERANGE;
3700			goto done;
3701		}
3702		priv->params.tx_priority_flow_control |= (temp[i] << i);
3703	}
3704
3705	/* check if update is required */
3706	if (tx_pfc != priv->params.tx_priority_flow_control)
3707		err = -mlx5e_set_port_pfc(priv);
3708done:
3709	if (err != 0)
3710		priv->params.tx_priority_flow_control= tx_pfc;
3711	PRIV_UNLOCK(priv);
3712
3713	return (err);
3714}
3715
3716static int
3717mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3718{
3719	struct mlx5e_priv *priv = arg1;
3720	uint8_t temp[MLX5E_MAX_PRIORITY];
3721	uint32_t rx_pfc;
3722	int err;
3723	int i;
3724
3725	PRIV_LOCK(priv);
3726
3727	rx_pfc = priv->params.rx_priority_flow_control;
3728
3729	for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
3730		temp[i] = (rx_pfc >> i) & 1;
3731
3732	err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
3733	if (err || !req->newptr)
3734		goto done;
3735	err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
3736	if (err)
3737		goto done;
3738
3739	priv->params.rx_priority_flow_control = 0;
3740
3741	/* range check input value */
3742	for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
3743		if (temp[i] > 1) {
3744			err = ERANGE;
3745			goto done;
3746		}
3747		priv->params.rx_priority_flow_control |= (temp[i] << i);
3748	}
3749
3750	/* check if update is required */
3751	if (rx_pfc != priv->params.rx_priority_flow_control) {
3752		err = -mlx5e_set_port_pfc(priv);
3753		if (err == 0)
3754			err = mlx5e_update_buf_lossy(priv);
3755	}
3756done:
3757	if (err != 0)
3758		priv->params.rx_priority_flow_control= rx_pfc;
3759	PRIV_UNLOCK(priv);
3760
3761	return (err);
3762}
3763
3764static void
3765mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3766{
3767#if (__FreeBSD_version < 1100000)
3768	char path[96];
3769#endif
3770	int error;
3771
3772	/* enable pauseframes by default */
3773	priv->params.tx_pauseframe_control = 1;
3774	priv->params.rx_pauseframe_control = 1;
3775
3776	/* disable ports flow control, PFC, by default */
3777	priv->params.tx_priority_flow_control = 0;
3778	priv->params.rx_priority_flow_control = 0;
3779
3780#if (__FreeBSD_version < 1100000)
3781	/* compute path for sysctl */
3782	snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3783	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3784
3785	/* try to fetch tunable, if any */
3786	TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3787
3788	/* compute path for sysctl */
3789	snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3790	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3791
3792	/* try to fetch tunable, if any */
3793	TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3794#endif
3795
3796	/* register pauseframe SYSCTLs */
3797	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3798	    OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3799	    &priv->params.tx_pauseframe_control, 0,
3800	    "Set to enable TX pause frames. Clear to disable.");
3801
3802	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3803	    OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3804	    &priv->params.rx_pauseframe_control, 0,
3805	    "Set to enable RX pause frames. Clear to disable.");
3806
3807	/* register priority flow control, PFC, SYSCTLs */
3808	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3809	    OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
3810	    CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU",
3811	    "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable.");
3812
3813	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3814	    OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
3815	    CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU",
3816	    "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable.");
3817
3818	PRIV_LOCK(priv);
3819
3820	/* range check */
3821	priv->params.tx_pauseframe_control =
3822	    priv->params.tx_pauseframe_control ? 1 : 0;
3823	priv->params.rx_pauseframe_control =
3824	    priv->params.rx_pauseframe_control ? 1 : 0;
3825
3826	/* update firmware */
3827	error = mlx5e_set_port_pause_and_pfc(priv);
3828	if (error == -EINVAL) {
3829		mlx5_en_err(priv->ifp,
3830		    "Global pauseframes must be disabled before enabling PFC.\n");
3831		priv->params.rx_priority_flow_control = 0;
3832		priv->params.tx_priority_flow_control = 0;
3833
3834		/* update firmware */
3835		(void) mlx5e_set_port_pause_and_pfc(priv);
3836	}
3837	PRIV_UNLOCK(priv);
3838}
3839
3840static void *
3841mlx5e_create_ifp(struct mlx5_core_dev *mdev)
3842{
3843	struct ifnet *ifp;
3844	struct mlx5e_priv *priv;
3845	u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
3846	u8 connector_type;
3847	struct sysctl_oid_list *child;
3848	int ncv = mdev->priv.eq_table.num_comp_vectors;
3849	char unit[16];
3850	int err;
3851	int i,j;
3852	u32 eth_proto_cap;
3853	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
3854	bool ext = 0;
3855	u32 speeds_num;
3856	struct media media_entry = {};
3857
3858	if (mlx5e_check_required_hca_cap(mdev)) {
3859		mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
3860		return (NULL);
3861	}
3862	/*
3863	 * Try to allocate the priv and make room for worst-case
3864	 * number of channel structures:
3865	 */
3866	priv = malloc(sizeof(*priv) +
3867	    (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
3868	    M_MLX5EN, M_WAITOK | M_ZERO);
3869	mlx5e_priv_mtx_init(priv);
3870
3871	ifp = priv->ifp = if_alloc(IFT_ETHER);
3872	if (ifp == NULL) {
3873		mlx5_core_err(mdev, "if_alloc() failed\n");
3874		goto err_free_priv;
3875	}
3876	ifp->if_softc = priv;
3877	if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
3878	ifp->if_mtu = ETHERMTU;
3879	ifp->if_init = mlx5e_open;
3880	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3881	ifp->if_ioctl = mlx5e_ioctl;
3882	ifp->if_transmit = mlx5e_xmit;
3883	ifp->if_qflush = if_qflush;
3884#if (__FreeBSD_version >= 1100000)
3885	ifp->if_get_counter = mlx5e_get_counter;
3886#endif
3887	ifp->if_snd.ifq_maxlen = ifqmaxlen;
3888	/*
3889         * Set driver features
3890         */
3891	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
3892	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
3893	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
3894	ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
3895	ifp->if_capabilities |= IFCAP_LRO;
3896	ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
3897	ifp->if_capabilities |= IFCAP_HWSTATS;
3898
3899	/* set TSO limits so that we don't have to drop TX packets */
3900	ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
3901	ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
3902	ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
3903
3904	ifp->if_capenable = ifp->if_capabilities;
3905	ifp->if_hwassist = 0;
3906	if (ifp->if_capenable & IFCAP_TSO)
3907		ifp->if_hwassist |= CSUM_TSO;
3908	if (ifp->if_capenable & IFCAP_TXCSUM)
3909		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3910	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3911		ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3912
3913	/* ifnet sysctl tree */
3914	sysctl_ctx_init(&priv->sysctl_ctx);
3915	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
3916	    OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
3917	if (priv->sysctl_ifnet == NULL) {
3918		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3919		goto err_free_sysctl;
3920	}
3921	snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
3922	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3923	    OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
3924	if (priv->sysctl_ifnet == NULL) {
3925		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3926		goto err_free_sysctl;
3927	}
3928
3929	/* HW sysctl tree */
3930	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
3931	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
3932	    OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
3933	if (priv->sysctl_hw == NULL) {
3934		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3935		goto err_free_sysctl;
3936	}
3937
3938	err = mlx5e_build_ifp_priv(mdev, priv, ncv);
3939	if (err) {
3940		mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err);
3941		goto err_free_sysctl;
3942	}
3943
3944	/* reuse mlx5core's watchdog workqueue */
3945	priv->wq = mdev->priv.health.wq_watchdog;
3946
3947	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
3948	if (err) {
3949		mlx5_en_err(ifp, "mlx5_alloc_map_uar failed, %d\n", err);
3950		goto err_free_wq;
3951	}
3952	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
3953	if (err) {
3954		mlx5_en_err(ifp, "mlx5_core_alloc_pd failed, %d\n", err);
3955		goto err_unmap_free_uar;
3956	}
3957	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
3958	if (err) {
3959		mlx5_en_err(ifp,
3960		    "mlx5_alloc_transport_domain failed, %d\n", err);
3961		goto err_dealloc_pd;
3962	}
3963	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
3964	if (err) {
3965		mlx5_en_err(ifp, "mlx5e_create_mkey failed, %d\n", err);
3966		goto err_dealloc_transport_domain;
3967	}
3968	mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
3969
3970	/* check if we should generate a random MAC address */
3971	if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
3972	    is_zero_ether_addr(dev_addr)) {
3973		random_ether_addr(dev_addr);
3974		mlx5_en_err(ifp, "Assigned random MAC address\n");
3975	}
3976
3977	/* set default MTU */
3978	mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
3979
3980	/* Set default media status */
3981	priv->media_status_last = IFM_AVALID;
3982	priv->media_active_last = IFM_ETHER | IFM_AUTO |
3983	    IFM_ETH_RXPAUSE | IFM_FDX;
3984
3985	/* setup default pauseframes configuration */
3986	mlx5e_setup_pauseframes(priv);
3987
3988	/* Setup supported medias */
3989	//TODO: If we failed to query ptys is it ok to proceed??
3990	if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) {
3991		ext = MLX5_CAP_PCAM_FEATURE(mdev,
3992		    ptys_extended_ethernet);
3993		eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
3994		    eth_proto_capability);
3995		if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
3996			connector_type = MLX5_GET(ptys_reg, out,
3997			    connector_type);
3998	} else {
3999		eth_proto_cap = 0;
4000		mlx5_en_err(ifp, "Query port media capability failed, %d\n", err);
4001	}
4002
4003	ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
4004	    mlx5e_media_change, mlx5e_media_status);
4005
4006	speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER;
4007	for (i = 0; i != speeds_num; i++) {
4008		for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) {
4009			media_entry = ext ? mlx5e_ext_mode_table[i][j] :
4010			    mlx5e_mode_table[i][j];
4011			if (media_entry.baudrate == 0)
4012				continue;
4013			if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
4014				ifmedia_add(&priv->media,
4015				    media_entry.subtype |
4016				    IFM_ETHER, 0, NULL);
4017				ifmedia_add(&priv->media,
4018				    media_entry.subtype |
4019				    IFM_ETHER | IFM_FDX |
4020				    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
4021			}
4022		}
4023	}
4024
4025	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4026	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
4027	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
4028
4029	/* Set autoselect by default */
4030	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
4031	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
4032	ether_ifattach(ifp, dev_addr);
4033
4034	/* Register for VLAN events */
4035	priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
4036	    mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
4037	priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
4038	    mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
4039
4040	/* Link is down by default */
4041	if_link_state_change(ifp, LINK_STATE_DOWN);
4042
4043	mlx5e_enable_async_events(priv);
4044
4045	mlx5e_add_hw_stats(priv);
4046
4047	mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
4048	    "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
4049	    priv->stats.vport.arg);
4050
4051	mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
4052	    "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
4053	    priv->stats.pport.arg);
4054
4055	mlx5e_create_ethtool(priv);
4056
4057	mtx_lock(&priv->async_events_mtx);
4058	mlx5e_update_stats(priv);
4059	mtx_unlock(&priv->async_events_mtx);
4060
4061	return (priv);
4062
4063err_dealloc_transport_domain:
4064	mlx5_dealloc_transport_domain(mdev, priv->tdn);
4065
4066err_dealloc_pd:
4067	mlx5_core_dealloc_pd(mdev, priv->pdn);
4068
4069err_unmap_free_uar:
4070	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
4071
4072err_free_wq:
4073	flush_workqueue(priv->wq);
4074
4075err_free_sysctl:
4076	sysctl_ctx_free(&priv->sysctl_ctx);
4077	if (priv->sysctl_debug)
4078		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
4079	if_free(ifp);
4080
4081err_free_priv:
4082	mlx5e_priv_mtx_destroy(priv);
4083	free(priv, M_MLX5EN);
4084	return (NULL);
4085}
4086
4087static void
4088mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
4089{
4090	struct mlx5e_priv *priv = vpriv;
4091	struct ifnet *ifp = priv->ifp;
4092
4093	/* don't allow more IOCTLs */
4094	priv->gone = 1;
4095
4096	/* XXX wait a bit to allow IOCTL handlers to complete */
4097	pause("W", hz);
4098
4099	/* stop watchdog timer */
4100	callout_drain(&priv->watchdog);
4101
4102	if (priv->vlan_attach != NULL)
4103		EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
4104	if (priv->vlan_detach != NULL)
4105		EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
4106
4107	/* make sure device gets closed */
4108	PRIV_LOCK(priv);
4109	mlx5e_close_locked(ifp);
4110	PRIV_UNLOCK(priv);
4111
4112	/* unregister device */
4113	ifmedia_removeall(&priv->media);
4114	ether_ifdetach(ifp);
4115	if_free(ifp);
4116
4117	/* destroy all remaining sysctl nodes */
4118	sysctl_ctx_free(&priv->stats.vport.ctx);
4119	sysctl_ctx_free(&priv->stats.pport.ctx);
4120	if (priv->sysctl_debug)
4121		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
4122	sysctl_ctx_free(&priv->sysctl_ctx);
4123
4124	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
4125	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
4126	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
4127	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
4128	mlx5e_disable_async_events(priv);
4129	flush_workqueue(priv->wq);
4130	mlx5e_priv_mtx_destroy(priv);
4131	free(priv, M_MLX5EN);
4132}
4133
4134static void *
4135mlx5e_get_ifp(void *vpriv)
4136{
4137	struct mlx5e_priv *priv = vpriv;
4138
4139	return (priv->ifp);
4140}
4141
4142static struct mlx5_interface mlx5e_interface = {
4143	.add = mlx5e_create_ifp,
4144	.remove = mlx5e_destroy_ifp,
4145	.event = mlx5e_async_event,
4146	.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4147	.get_dev = mlx5e_get_ifp,
4148};
4149
4150void
4151mlx5e_init(void)
4152{
4153	mlx5_register_interface(&mlx5e_interface);
4154}
4155
4156void
4157mlx5e_cleanup(void)
4158{
4159	mlx5_unregister_interface(&mlx5e_interface);
4160}
4161
4162static void
4163mlx5e_show_version(void __unused *arg)
4164{
4165
4166	printf("%s", mlx5e_version);
4167}
4168SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL);
4169
4170module_init_order(mlx5e_init, SI_ORDER_THIRD);
4171module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
4172
4173#if (__FreeBSD_version >= 1100000)
4174MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
4175#endif
4176MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
4177MODULE_VERSION(mlx5en, 1);
4178