mlx5_en_main.c revision 353248
1/*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 353248 2019-10-07 09:57:14Z hselasky $ 26 */ 27 28#include "en.h" 29 30#include <sys/sockio.h> 31#include <machine/atomic.h> 32 33#ifndef ETH_DRIVER_VERSION 34#define ETH_DRIVER_VERSION "3.5.1" 35#endif 36#define DRIVER_RELDATE "April 2019" 37 38static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver " 39 ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 40 41static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs); 42 43struct mlx5e_channel_param { 44 struct mlx5e_rq_param rq; 45 struct mlx5e_sq_param sq; 46 struct mlx5e_cq_param rx_cq; 47 struct mlx5e_cq_param tx_cq; 48}; 49 50struct media { 51 u32 subtype; 52 u64 baudrate; 53}; 54 55static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 56 57 [MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = { 58 .subtype = IFM_1000_CX_SGMII, 59 .baudrate = IF_Mbps(1000ULL), 60 }, 61 [MLX5E_1000BASE_KX][MLX5E_KX] = { 62 .subtype = IFM_1000_KX, 63 .baudrate = IF_Mbps(1000ULL), 64 }, 65 [MLX5E_10GBASE_CX4][MLX5E_CX4] = { 66 .subtype = IFM_10G_CX4, 67 .baudrate = IF_Gbps(10ULL), 68 }, 69 [MLX5E_10GBASE_KX4][MLX5E_KX4] = { 70 .subtype = IFM_10G_KX4, 71 .baudrate = IF_Gbps(10ULL), 72 }, 73 [MLX5E_10GBASE_KR][MLX5E_KR] = { 74 .subtype = IFM_10G_KR, 75 .baudrate = IF_Gbps(10ULL), 76 }, 77 [MLX5E_20GBASE_KR2][MLX5E_KR2] = { 78 .subtype = IFM_20G_KR2, 79 .baudrate = IF_Gbps(20ULL), 80 }, 81 [MLX5E_40GBASE_CR4][MLX5E_CR4] = { 82 .subtype = IFM_40G_CR4, 83 .baudrate = IF_Gbps(40ULL), 84 }, 85 [MLX5E_40GBASE_KR4][MLX5E_KR4] = { 86 .subtype = IFM_40G_KR4, 87 .baudrate = IF_Gbps(40ULL), 88 }, 89 [MLX5E_56GBASE_R4][MLX5E_R] = { 90 .subtype = IFM_56G_R4, 91 .baudrate = IF_Gbps(56ULL), 92 }, 93 [MLX5E_10GBASE_CR][MLX5E_CR1] = { 94 .subtype = IFM_10G_CR1, 95 .baudrate = IF_Gbps(10ULL), 96 }, 97 [MLX5E_10GBASE_SR][MLX5E_SR] = { 98 .subtype = IFM_10G_SR, 99 .baudrate = IF_Gbps(10ULL), 100 }, 101 [MLX5E_10GBASE_ER_LR][MLX5E_ER] = { 102 .subtype = IFM_10G_ER, 103 .baudrate = IF_Gbps(10ULL), 104 }, 105 [MLX5E_10GBASE_ER_LR][MLX5E_LR] = { 106 .subtype = IFM_10G_LR, 107 .baudrate = IF_Gbps(10ULL), 108 }, 109 [MLX5E_40GBASE_SR4][MLX5E_SR4] = { 110 .subtype = IFM_40G_SR4, 111 .baudrate = IF_Gbps(40ULL), 112 }, 113 [MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = { 114 .subtype = IFM_40G_LR4, 115 .baudrate = IF_Gbps(40ULL), 116 }, 117 [MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = { 118 .subtype = IFM_40G_ER4, 119 .baudrate = IF_Gbps(40ULL), 120 }, 121 [MLX5E_100GBASE_CR4][MLX5E_CR4] = { 122 .subtype = IFM_100G_CR4, 123 .baudrate = IF_Gbps(100ULL), 124 }, 125 [MLX5E_100GBASE_SR4][MLX5E_SR4] = { 126 .subtype = IFM_100G_SR4, 127 .baudrate = IF_Gbps(100ULL), 128 }, 129 [MLX5E_100GBASE_KR4][MLX5E_KR4] = { 130 .subtype = IFM_100G_KR4, 131 .baudrate = IF_Gbps(100ULL), 132 }, 133 [MLX5E_100GBASE_LR4][MLX5E_LR4] = { 134 .subtype = IFM_100G_LR4, 135 .baudrate = IF_Gbps(100ULL), 136 }, 137 [MLX5E_100BASE_TX][MLX5E_TX] = { 138 .subtype = IFM_100_TX, 139 .baudrate = IF_Mbps(100ULL), 140 }, 141 [MLX5E_1000BASE_T][MLX5E_T] = { 142 .subtype = IFM_1000_T, 143 .baudrate = IF_Mbps(1000ULL), 144 }, 145 [MLX5E_10GBASE_T][MLX5E_T] = { 146 .subtype = IFM_10G_T, 147 .baudrate = IF_Gbps(10ULL), 148 }, 149 [MLX5E_25GBASE_CR][MLX5E_CR] = { 150 .subtype = IFM_25G_CR, 151 .baudrate = IF_Gbps(25ULL), 152 }, 153 [MLX5E_25GBASE_KR][MLX5E_KR] = { 154 .subtype = IFM_25G_KR, 155 .baudrate = IF_Gbps(25ULL), 156 }, 157 [MLX5E_25GBASE_SR][MLX5E_SR] = { 158 .subtype = IFM_25G_SR, 159 .baudrate = IF_Gbps(25ULL), 160 }, 161 [MLX5E_50GBASE_CR2][MLX5E_CR2] = { 162 .subtype = IFM_50G_CR2, 163 .baudrate = IF_Gbps(50ULL), 164 }, 165 [MLX5E_50GBASE_KR2][MLX5E_KR2] = { 166 .subtype = IFM_50G_KR2, 167 .baudrate = IF_Gbps(50ULL), 168 }, 169}; 170 171static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 172 [MLX5E_SGMII_100M][MLX5E_SGMII] = { 173 .subtype = IFM_100_SGMII, 174 .baudrate = IF_Mbps(100), 175 }, 176 [MLX5E_1000BASE_X_SGMII][MLX5E_KX] = { 177 .subtype = IFM_1000_KX, 178 .baudrate = IF_Mbps(1000), 179 }, 180 [MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = { 181 .subtype = IFM_1000_CX_SGMII, 182 .baudrate = IF_Mbps(1000), 183 }, 184 [MLX5E_1000BASE_X_SGMII][MLX5E_CX] = { 185 .subtype = IFM_1000_CX, 186 .baudrate = IF_Mbps(1000), 187 }, 188 [MLX5E_1000BASE_X_SGMII][MLX5E_LX] = { 189 .subtype = IFM_1000_LX, 190 .baudrate = IF_Mbps(1000), 191 }, 192 [MLX5E_1000BASE_X_SGMII][MLX5E_SX] = { 193 .subtype = IFM_1000_SX, 194 .baudrate = IF_Mbps(1000), 195 }, 196 [MLX5E_1000BASE_X_SGMII][MLX5E_T] = { 197 .subtype = IFM_1000_T, 198 .baudrate = IF_Mbps(1000), 199 }, 200 [MLX5E_5GBASE_R][MLX5E_T] = { 201 .subtype = IFM_5000_T, 202 .baudrate = IF_Mbps(5000), 203 }, 204 [MLX5E_5GBASE_R][MLX5E_KR] = { 205 .subtype = IFM_5000_KR, 206 .baudrate = IF_Mbps(5000), 207 }, 208 [MLX5E_5GBASE_R][MLX5E_KR1] = { 209 .subtype = IFM_5000_KR1, 210 .baudrate = IF_Mbps(5000), 211 }, 212 [MLX5E_5GBASE_R][MLX5E_KR_S] = { 213 .subtype = IFM_5000_KR_S, 214 .baudrate = IF_Mbps(5000), 215 }, 216 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = { 217 .subtype = IFM_10G_ER, 218 .baudrate = IF_Gbps(10ULL), 219 }, 220 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = { 221 .subtype = IFM_10G_KR, 222 .baudrate = IF_Gbps(10ULL), 223 }, 224 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = { 225 .subtype = IFM_10G_LR, 226 .baudrate = IF_Gbps(10ULL), 227 }, 228 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = { 229 .subtype = IFM_10G_SR, 230 .baudrate = IF_Gbps(10ULL), 231 }, 232 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = { 233 .subtype = IFM_10G_T, 234 .baudrate = IF_Gbps(10ULL), 235 }, 236 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = { 237 .subtype = IFM_10G_AOC, 238 .baudrate = IF_Gbps(10ULL), 239 }, 240 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = { 241 .subtype = IFM_10G_CR1, 242 .baudrate = IF_Gbps(10ULL), 243 }, 244 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = { 245 .subtype = IFM_40G_CR4, 246 .baudrate = IF_Gbps(40ULL), 247 }, 248 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = { 249 .subtype = IFM_40G_KR4, 250 .baudrate = IF_Gbps(40ULL), 251 }, 252 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = { 253 .subtype = IFM_40G_LR4, 254 .baudrate = IF_Gbps(40ULL), 255 }, 256 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = { 257 .subtype = IFM_40G_SR4, 258 .baudrate = IF_Gbps(40ULL), 259 }, 260 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = { 261 .subtype = IFM_40G_ER4, 262 .baudrate = IF_Gbps(40ULL), 263 }, 264 265 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = { 266 .subtype = IFM_25G_CR, 267 .baudrate = IF_Gbps(25ULL), 268 }, 269 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = { 270 .subtype = IFM_25G_KR, 271 .baudrate = IF_Gbps(25ULL), 272 }, 273 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = { 274 .subtype = IFM_25G_SR, 275 .baudrate = IF_Gbps(25ULL), 276 }, 277 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = { 278 .subtype = IFM_25G_ACC, 279 .baudrate = IF_Gbps(25ULL), 280 }, 281 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = { 282 .subtype = IFM_25G_AOC, 283 .baudrate = IF_Gbps(25ULL), 284 }, 285 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = { 286 .subtype = IFM_25G_CR1, 287 .baudrate = IF_Gbps(25ULL), 288 }, 289 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = { 290 .subtype = IFM_25G_CR_S, 291 .baudrate = IF_Gbps(25ULL), 292 }, 293 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = { 294 .subtype = IFM_5000_KR1, 295 .baudrate = IF_Gbps(25ULL), 296 }, 297 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = { 298 .subtype = IFM_25G_KR_S, 299 .baudrate = IF_Gbps(25ULL), 300 }, 301 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = { 302 .subtype = IFM_25G_LR, 303 .baudrate = IF_Gbps(25ULL), 304 }, 305 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = { 306 .subtype = IFM_25G_T, 307 .baudrate = IF_Gbps(25ULL), 308 }, 309 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = { 310 .subtype = IFM_50G_CR2, 311 .baudrate = IF_Gbps(50ULL), 312 }, 313 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = { 314 .subtype = IFM_50G_KR2, 315 .baudrate = IF_Gbps(50ULL), 316 }, 317 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = { 318 .subtype = IFM_50G_SR2, 319 .baudrate = IF_Gbps(50ULL), 320 }, 321 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = { 322 .subtype = IFM_50G_LR2, 323 .baudrate = IF_Gbps(50ULL), 324 }, 325 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = { 326 .subtype = IFM_50G_LR, 327 .baudrate = IF_Gbps(50ULL), 328 }, 329 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = { 330 .subtype = IFM_50G_SR, 331 .baudrate = IF_Gbps(50ULL), 332 }, 333 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = { 334 .subtype = IFM_50G_CP, 335 .baudrate = IF_Gbps(50ULL), 336 }, 337 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = { 338 .subtype = IFM_50G_FR, 339 .baudrate = IF_Gbps(50ULL), 340 }, 341 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = { 342 .subtype = IFM_50G_KR_PAM4, 343 .baudrate = IF_Gbps(50ULL), 344 }, 345 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = { 346 .subtype = IFM_100G_CR4, 347 .baudrate = IF_Gbps(100ULL), 348 }, 349 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = { 350 .subtype = IFM_100G_KR4, 351 .baudrate = IF_Gbps(100ULL), 352 }, 353 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = { 354 .subtype = IFM_100G_LR4, 355 .baudrate = IF_Gbps(100ULL), 356 }, 357 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = { 358 .subtype = IFM_100G_SR4, 359 .baudrate = IF_Gbps(100ULL), 360 }, 361 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = { 362 .subtype = IFM_100G_SR2, 363 .baudrate = IF_Gbps(100ULL), 364 }, 365 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = { 366 .subtype = IFM_100G_CP2, 367 .baudrate = IF_Gbps(100ULL), 368 }, 369 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = { 370 .subtype = IFM_100G_KR2_PAM4, 371 .baudrate = IF_Gbps(100ULL), 372 }, 373 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = { 374 .subtype = IFM_200G_DR4, 375 .baudrate = IF_Gbps(200ULL), 376 }, 377 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = { 378 .subtype = IFM_200G_LR4, 379 .baudrate = IF_Gbps(200ULL), 380 }, 381 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = { 382 .subtype = IFM_200G_SR4, 383 .baudrate = IF_Gbps(200ULL), 384 }, 385 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = { 386 .subtype = IFM_200G_FR4, 387 .baudrate = IF_Gbps(200ULL), 388 }, 389 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = { 390 .subtype = IFM_200G_CR4_PAM4, 391 .baudrate = IF_Gbps(200ULL), 392 }, 393 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = { 394 .subtype = IFM_200G_KR4_PAM4, 395 .baudrate = IF_Gbps(200ULL), 396 }, 397}; 398 399MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet"); 400 401static void 402mlx5e_update_carrier(struct mlx5e_priv *priv) 403{ 404 struct mlx5_core_dev *mdev = priv->mdev; 405 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 406 u32 eth_proto_oper; 407 int error; 408 u8 port_state; 409 u8 is_er_type; 410 u8 i, j; 411 bool ext; 412 struct media media_entry = {}; 413 414 port_state = mlx5_query_vport_state(mdev, 415 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 416 417 if (port_state == VPORT_STATE_UP) { 418 priv->media_status_last |= IFM_ACTIVE; 419 } else { 420 priv->media_status_last &= ~IFM_ACTIVE; 421 priv->media_active_last = IFM_ETHER; 422 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 423 return; 424 } 425 426 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 427 MLX5_PTYS_EN, 1); 428 if (error) { 429 priv->media_active_last = IFM_ETHER; 430 priv->ifp->if_baudrate = 1; 431 mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n", 432 error); 433 return; 434 } 435 436 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 437 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 438 eth_proto_oper); 439 440 i = ilog2(eth_proto_oper); 441 442 for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) { 443 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 444 mlx5e_mode_table[i][j]; 445 if (media_entry.baudrate != 0) 446 break; 447 } 448 449 if (media_entry.subtype == 0) { 450 mlx5_en_err(priv->ifp, 451 "Could not find operational media subtype\n"); 452 return; 453 } 454 455 switch (media_entry.subtype) { 456 case IFM_10G_ER: 457 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 458 if (error != 0) { 459 mlx5_en_err(priv->ifp, 460 "query port pddr failed: %d\n", error); 461 } 462 if (error != 0 || is_er_type == 0) 463 media_entry.subtype = IFM_10G_LR; 464 break; 465 case IFM_40G_LR4: 466 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 467 if (error != 0) { 468 mlx5_en_err(priv->ifp, 469 "query port pddr failed: %d\n", error); 470 } 471 if (error == 0 && is_er_type != 0) 472 media_entry.subtype = IFM_40G_ER4; 473 break; 474 } 475 priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX; 476 priv->ifp->if_baudrate = media_entry.baudrate; 477 478 if_link_state_change(priv->ifp, LINK_STATE_UP); 479} 480 481static void 482mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 483{ 484 struct mlx5e_priv *priv = dev->if_softc; 485 486 ifmr->ifm_status = priv->media_status_last; 487 ifmr->ifm_active = priv->media_active_last | 488 (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | 489 (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); 490 491} 492 493static u32 494mlx5e_find_link_mode(u32 subtype, bool ext) 495{ 496 u32 i; 497 u32 j; 498 u32 link_mode = 0; 499 u32 speeds_num = 0; 500 struct media media_entry = {}; 501 502 switch (subtype) { 503 case IFM_10G_LR: 504 subtype = IFM_10G_ER; 505 break; 506 case IFM_40G_ER4: 507 subtype = IFM_40G_LR4; 508 break; 509 } 510 511 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : 512 MLX5E_LINK_SPEEDS_NUMBER; 513 514 for (i = 0; i != speeds_num; i++) { 515 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 516 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 517 mlx5e_mode_table[i][j]; 518 if (media_entry.baudrate == 0) 519 continue; 520 if (media_entry.subtype == subtype) { 521 link_mode |= MLX5E_PROT_MASK(i); 522 } 523 } 524 } 525 526 return (link_mode); 527} 528 529static int 530mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) 531{ 532 return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, 533 priv->params.rx_pauseframe_control, 534 priv->params.tx_pauseframe_control, 535 priv->params.rx_priority_flow_control, 536 priv->params.tx_priority_flow_control)); 537} 538 539static int 540mlx5e_set_port_pfc(struct mlx5e_priv *priv) 541{ 542 int error; 543 544 if (priv->gone != 0) { 545 error = -ENXIO; 546 } else if (priv->params.rx_pauseframe_control || 547 priv->params.tx_pauseframe_control) { 548 mlx5_en_err(priv->ifp, 549 "Global pauseframes must be disabled before enabling PFC.\n"); 550 error = -EINVAL; 551 } else { 552 error = mlx5e_set_port_pause_and_pfc(priv); 553 } 554 return (error); 555} 556 557static int 558mlx5e_media_change(struct ifnet *dev) 559{ 560 struct mlx5e_priv *priv = dev->if_softc; 561 struct mlx5_core_dev *mdev = priv->mdev; 562 u32 eth_proto_cap; 563 u32 link_mode; 564 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 565 int was_opened; 566 int locked; 567 int error; 568 bool ext; 569 570 locked = PRIV_LOCKED(priv); 571 if (!locked) 572 PRIV_LOCK(priv); 573 574 if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { 575 error = EINVAL; 576 goto done; 577 } 578 579 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 580 MLX5_PTYS_EN, 1); 581 if (error != 0) { 582 mlx5_en_err(dev, "Query port media capability failed\n"); 583 goto done; 584 } 585 586 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 587 link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext); 588 589 /* query supported capabilities */ 590 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 591 eth_proto_capability); 592 593 /* check for autoselect */ 594 if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { 595 link_mode = eth_proto_cap; 596 if (link_mode == 0) { 597 mlx5_en_err(dev, "Port media capability is zero\n"); 598 error = EINVAL; 599 goto done; 600 } 601 } else { 602 link_mode = link_mode & eth_proto_cap; 603 if (link_mode == 0) { 604 mlx5_en_err(dev, "Not supported link mode requested\n"); 605 error = EINVAL; 606 goto done; 607 } 608 } 609 if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 610 /* check if PFC is enabled */ 611 if (priv->params.rx_priority_flow_control || 612 priv->params.tx_priority_flow_control) { 613 mlx5_en_err(dev, "PFC must be disabled before enabling global pauseframes.\n"); 614 error = EINVAL; 615 goto done; 616 } 617 } 618 /* update pauseframe control bits */ 619 priv->params.rx_pauseframe_control = 620 (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; 621 priv->params.tx_pauseframe_control = 622 (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; 623 624 /* check if device is opened */ 625 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 626 627 /* reconfigure the hardware */ 628 mlx5_set_port_status(mdev, MLX5_PORT_DOWN); 629 mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext); 630 error = -mlx5e_set_port_pause_and_pfc(priv); 631 if (was_opened) 632 mlx5_set_port_status(mdev, MLX5_PORT_UP); 633 634done: 635 if (!locked) 636 PRIV_UNLOCK(priv); 637 return (error); 638} 639 640static void 641mlx5e_update_carrier_work(struct work_struct *work) 642{ 643 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 644 update_carrier_work); 645 646 PRIV_LOCK(priv); 647 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 648 mlx5e_update_carrier(priv); 649 PRIV_UNLOCK(priv); 650} 651 652#define MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f) \ 653 s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c); 654 655#define MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f) \ 656 s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c); 657 658static void 659mlx5e_update_pcie_counters(struct mlx5e_priv *priv) 660{ 661 struct mlx5_core_dev *mdev = priv->mdev; 662 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 663 const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 664 void *out; 665 void *in; 666 int err; 667 668 /* allocate firmware request structures */ 669 in = mlx5_vzalloc(sz); 670 out = mlx5_vzalloc(sz); 671 if (in == NULL || out == NULL) 672 goto free_out; 673 674 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 675 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 676 if (err != 0) 677 goto free_out; 678 679 MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64) 680 MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 681 682 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); 683 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 684 if (err != 0) 685 goto free_out; 686 687 MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 688 689 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP); 690 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 691 if (err != 0) 692 goto free_out; 693 694 MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 695 696free_out: 697 /* free firmware request structures */ 698 kvfree(in); 699 kvfree(out); 700} 701 702/* 703 * This function reads the physical port counters from the firmware 704 * using a pre-defined layout defined by various MLX5E_PPORT_XXX() 705 * macros. The output is converted from big-endian 64-bit values into 706 * host endian ones and stored in the "priv->stats.pport" structure. 707 */ 708static void 709mlx5e_update_pport_counters(struct mlx5e_priv *priv) 710{ 711 struct mlx5_core_dev *mdev = priv->mdev; 712 struct mlx5e_pport_stats *s = &priv->stats.pport; 713 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 714 u32 *in; 715 u32 *out; 716 const u64 *ptr; 717 unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 718 unsigned x; 719 unsigned y; 720 unsigned z; 721 722 /* allocate firmware request structures */ 723 in = mlx5_vzalloc(sz); 724 out = mlx5_vzalloc(sz); 725 if (in == NULL || out == NULL) 726 goto free_out; 727 728 /* 729 * Get pointer to the 64-bit counter set which is located at a 730 * fixed offset in the output firmware request structure: 731 */ 732 ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); 733 734 MLX5_SET(ppcnt_reg, in, local_port, 1); 735 736 /* read IEEE802_3 counter group using predefined counter layout */ 737 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 738 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 739 for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM; 740 x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) 741 s->arg[y] = be64toh(ptr[x]); 742 743 /* read RFC2819 counter group using predefined counter layout */ 744 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 745 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 746 for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) 747 s->arg[y] = be64toh(ptr[x]); 748 749 for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM + 750 MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) 751 s_debug->arg[y] = be64toh(ptr[x]); 752 753 /* read RFC2863 counter group using predefined counter layout */ 754 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 755 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 756 for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) 757 s_debug->arg[y] = be64toh(ptr[x]); 758 759 /* read physical layer stats counter group using predefined counter layout */ 760 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 761 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 762 for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) 763 s_debug->arg[y] = be64toh(ptr[x]); 764 765 /* read Extended Ethernet counter group using predefined counter layout */ 766 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 767 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 768 for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++) 769 s_debug->arg[y] = be64toh(ptr[x]); 770 771 /* read Extended Statistical Group */ 772 if (MLX5_CAP_GEN(mdev, pcam_reg) && 773 MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) && 774 MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) { 775 /* read Extended Statistical counter group using predefined counter layout */ 776 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 777 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 778 779 for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++) 780 s_debug->arg[y] = be64toh(ptr[x]); 781 } 782 783 /* read PCIE counters */ 784 mlx5e_update_pcie_counters(priv); 785 786 /* read per-priority counters */ 787 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 788 789 /* iterate all the priorities */ 790 for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) { 791 MLX5_SET(ppcnt_reg, in, prio_tc, z); 792 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 793 794 /* read per priority stats counter group using predefined counter layout */ 795 for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM / 796 MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++) 797 s->arg[y] = be64toh(ptr[x]); 798 } 799 800free_out: 801 /* free firmware request structures */ 802 kvfree(in); 803 kvfree(out); 804} 805 806static void 807mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) 808{ 809 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {}; 810 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 811 812 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) 813 return; 814 815 MLX5_SET(query_vnic_env_in, in, opcode, 816 MLX5_CMD_OP_QUERY_VNIC_ENV); 817 MLX5_SET(query_vnic_env_in, in, op_mod, 0); 818 MLX5_SET(query_vnic_env_in, in, other_vport, 0); 819 820 if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0) 821 return; 822 823 priv->stats.vport.rx_steer_missed_packets = 824 MLX5_GET64(query_vnic_env_out, out, 825 vport_env.nic_receive_steering_discard); 826} 827 828/* 829 * This function is called regularly to collect all statistics 830 * counters from the firmware. The values can be viewed through the 831 * sysctl interface. Execution is serialized using the priv's global 832 * configuration lock. 833 */ 834static void 835mlx5e_update_stats_locked(struct mlx5e_priv *priv) 836{ 837 struct mlx5_core_dev *mdev = priv->mdev; 838 struct mlx5e_vport_stats *s = &priv->stats.vport; 839 struct mlx5e_sq_stats *sq_stats; 840 struct buf_ring *sq_br; 841#if (__FreeBSD_version < 1100000) 842 struct ifnet *ifp = priv->ifp; 843#endif 844 845 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; 846 u32 *out; 847 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 848 u64 tso_packets = 0; 849 u64 tso_bytes = 0; 850 u64 tx_queue_dropped = 0; 851 u64 tx_defragged = 0; 852 u64 tx_offload_none = 0; 853 u64 lro_packets = 0; 854 u64 lro_bytes = 0; 855 u64 sw_lro_queued = 0; 856 u64 sw_lro_flushed = 0; 857 u64 rx_csum_none = 0; 858 u64 rx_wqe_err = 0; 859 u64 rx_packets = 0; 860 u64 rx_bytes = 0; 861 u32 rx_out_of_buffer = 0; 862 int error; 863 int i; 864 int j; 865 866 out = mlx5_vzalloc(outlen); 867 if (out == NULL) 868 goto free_out; 869 870 /* Collect firts the SW counters and then HW for consistency */ 871 for (i = 0; i < priv->params.num_channels; i++) { 872 struct mlx5e_channel *pch = priv->channel + i; 873 struct mlx5e_rq *rq = &pch->rq; 874 struct mlx5e_rq_stats *rq_stats = &pch->rq.stats; 875 876 /* collect stats from LRO */ 877 rq_stats->sw_lro_queued = rq->lro.lro_queued; 878 rq_stats->sw_lro_flushed = rq->lro.lro_flushed; 879 sw_lro_queued += rq_stats->sw_lro_queued; 880 sw_lro_flushed += rq_stats->sw_lro_flushed; 881 lro_packets += rq_stats->lro_packets; 882 lro_bytes += rq_stats->lro_bytes; 883 rx_csum_none += rq_stats->csum_none; 884 rx_wqe_err += rq_stats->wqe_err; 885 rx_packets += rq_stats->packets; 886 rx_bytes += rq_stats->bytes; 887 888 for (j = 0; j < priv->num_tc; j++) { 889 sq_stats = &pch->sq[j].stats; 890 sq_br = pch->sq[j].br; 891 892 tso_packets += sq_stats->tso_packets; 893 tso_bytes += sq_stats->tso_bytes; 894 tx_queue_dropped += sq_stats->dropped; 895 if (sq_br != NULL) 896 tx_queue_dropped += sq_br->br_drops; 897 tx_defragged += sq_stats->defragged; 898 tx_offload_none += sq_stats->csum_offload_none; 899 } 900 } 901 902 /* update counters */ 903 s->tso_packets = tso_packets; 904 s->tso_bytes = tso_bytes; 905 s->tx_queue_dropped = tx_queue_dropped; 906 s->tx_defragged = tx_defragged; 907 s->lro_packets = lro_packets; 908 s->lro_bytes = lro_bytes; 909 s->sw_lro_queued = sw_lro_queued; 910 s->sw_lro_flushed = sw_lro_flushed; 911 s->rx_csum_none = rx_csum_none; 912 s->rx_wqe_err = rx_wqe_err; 913 s->rx_packets = rx_packets; 914 s->rx_bytes = rx_bytes; 915 916 mlx5e_grp_vnic_env_update_stats(priv); 917 918 /* HW counters */ 919 memset(in, 0, sizeof(in)); 920 921 MLX5_SET(query_vport_counter_in, in, opcode, 922 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 923 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 924 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 925 926 memset(out, 0, outlen); 927 928 /* get number of out-of-buffer drops first */ 929 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && 930 mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, 931 &rx_out_of_buffer) == 0) { 932 s->rx_out_of_buffer = rx_out_of_buffer; 933 } 934 935 /* get port statistics */ 936 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) { 937#define MLX5_GET_CTR(out, x) \ 938 MLX5_GET64(query_vport_counter_out, out, x) 939 940 s->rx_error_packets = 941 MLX5_GET_CTR(out, received_errors.packets); 942 s->rx_error_bytes = 943 MLX5_GET_CTR(out, received_errors.octets); 944 s->tx_error_packets = 945 MLX5_GET_CTR(out, transmit_errors.packets); 946 s->tx_error_bytes = 947 MLX5_GET_CTR(out, transmit_errors.octets); 948 949 s->rx_unicast_packets = 950 MLX5_GET_CTR(out, received_eth_unicast.packets); 951 s->rx_unicast_bytes = 952 MLX5_GET_CTR(out, received_eth_unicast.octets); 953 s->tx_unicast_packets = 954 MLX5_GET_CTR(out, transmitted_eth_unicast.packets); 955 s->tx_unicast_bytes = 956 MLX5_GET_CTR(out, transmitted_eth_unicast.octets); 957 958 s->rx_multicast_packets = 959 MLX5_GET_CTR(out, received_eth_multicast.packets); 960 s->rx_multicast_bytes = 961 MLX5_GET_CTR(out, received_eth_multicast.octets); 962 s->tx_multicast_packets = 963 MLX5_GET_CTR(out, transmitted_eth_multicast.packets); 964 s->tx_multicast_bytes = 965 MLX5_GET_CTR(out, transmitted_eth_multicast.octets); 966 967 s->rx_broadcast_packets = 968 MLX5_GET_CTR(out, received_eth_broadcast.packets); 969 s->rx_broadcast_bytes = 970 MLX5_GET_CTR(out, received_eth_broadcast.octets); 971 s->tx_broadcast_packets = 972 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 973 s->tx_broadcast_bytes = 974 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 975 976 s->tx_packets = s->tx_unicast_packets + 977 s->tx_multicast_packets + s->tx_broadcast_packets; 978 s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes + 979 s->tx_broadcast_bytes; 980 981 /* Update calculated offload counters */ 982 s->tx_csum_offload = s->tx_packets - tx_offload_none; 983 s->rx_csum_good = s->rx_packets - s->rx_csum_none; 984 } 985 986 /* Get physical port counters */ 987 mlx5e_update_pport_counters(priv); 988 989 s->tx_jumbo_packets = 990 priv->stats.port_stats_debug.tx_stat_p1519to2047octets + 991 priv->stats.port_stats_debug.tx_stat_p2048to4095octets + 992 priv->stats.port_stats_debug.tx_stat_p4096to8191octets + 993 priv->stats.port_stats_debug.tx_stat_p8192to10239octets; 994 995#if (__FreeBSD_version < 1100000) 996 /* no get_counters interface in fbsd 10 */ 997 ifp->if_ipackets = s->rx_packets; 998 ifp->if_ierrors = priv->stats.pport.in_range_len_errors + 999 priv->stats.pport.out_of_range_len + 1000 priv->stats.pport.too_long_errors + 1001 priv->stats.pport.check_seq_err + 1002 priv->stats.pport.alignment_err; 1003 ifp->if_iqdrops = s->rx_out_of_buffer; 1004 ifp->if_opackets = s->tx_packets; 1005 ifp->if_oerrors = priv->stats.port_stats_debug.out_discards; 1006 ifp->if_snd.ifq_drops = s->tx_queue_dropped; 1007 ifp->if_ibytes = s->rx_bytes; 1008 ifp->if_obytes = s->tx_bytes; 1009 ifp->if_collisions = 1010 priv->stats.pport.collisions; 1011#endif 1012 1013free_out: 1014 kvfree(out); 1015 1016 /* Update diagnostics, if any */ 1017 if (priv->params_ethtool.diag_pci_enable || 1018 priv->params_ethtool.diag_general_enable) { 1019 error = mlx5_core_get_diagnostics_full(mdev, 1020 priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, 1021 priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); 1022 if (error != 0) 1023 mlx5_en_err(priv->ifp, 1024 "Failed reading diagnostics: %d\n", error); 1025 } 1026 1027 /* Update FEC, if any */ 1028 error = mlx5e_fec_update(priv); 1029 if (error != 0 && error != EOPNOTSUPP) { 1030 mlx5_en_err(priv->ifp, 1031 "Updating FEC failed: %d\n", error); 1032 } 1033} 1034 1035static void 1036mlx5e_update_stats_work(struct work_struct *work) 1037{ 1038 struct mlx5e_priv *priv; 1039 1040 priv = container_of(work, struct mlx5e_priv, update_stats_work); 1041 PRIV_LOCK(priv); 1042 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 1043 mlx5e_update_stats_locked(priv); 1044 PRIV_UNLOCK(priv); 1045} 1046 1047static void 1048mlx5e_update_stats(void *arg) 1049{ 1050 struct mlx5e_priv *priv = arg; 1051 1052 queue_work(priv->wq, &priv->update_stats_work); 1053 1054 callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv); 1055} 1056 1057static void 1058mlx5e_async_event_sub(struct mlx5e_priv *priv, 1059 enum mlx5_dev_event event) 1060{ 1061 switch (event) { 1062 case MLX5_DEV_EVENT_PORT_UP: 1063 case MLX5_DEV_EVENT_PORT_DOWN: 1064 queue_work(priv->wq, &priv->update_carrier_work); 1065 break; 1066 1067 default: 1068 break; 1069 } 1070} 1071 1072static void 1073mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, 1074 enum mlx5_dev_event event, unsigned long param) 1075{ 1076 struct mlx5e_priv *priv = vpriv; 1077 1078 mtx_lock(&priv->async_events_mtx); 1079 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) 1080 mlx5e_async_event_sub(priv, event); 1081 mtx_unlock(&priv->async_events_mtx); 1082} 1083 1084static void 1085mlx5e_enable_async_events(struct mlx5e_priv *priv) 1086{ 1087 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1088} 1089 1090static void 1091mlx5e_disable_async_events(struct mlx5e_priv *priv) 1092{ 1093 mtx_lock(&priv->async_events_mtx); 1094 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1095 mtx_unlock(&priv->async_events_mtx); 1096} 1097 1098static const char *mlx5e_rq_stats_desc[] = { 1099 MLX5E_RQ_STATS(MLX5E_STATS_DESC) 1100}; 1101 1102static int 1103mlx5e_create_rq(struct mlx5e_channel *c, 1104 struct mlx5e_rq_param *param, 1105 struct mlx5e_rq *rq) 1106{ 1107 struct mlx5e_priv *priv = c->priv; 1108 struct mlx5_core_dev *mdev = priv->mdev; 1109 char buffer[16]; 1110 void *rqc = param->rqc; 1111 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); 1112 int wq_sz; 1113 int err; 1114 int i; 1115 u32 nsegs, wqe_sz; 1116 1117 err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 1118 if (err != 0) 1119 goto done; 1120 1121 /* Create DMA descriptor TAG */ 1122 if ((err = -bus_dma_tag_create( 1123 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1124 1, /* any alignment */ 1125 0, /* no boundary */ 1126 BUS_SPACE_MAXADDR, /* lowaddr */ 1127 BUS_SPACE_MAXADDR, /* highaddr */ 1128 NULL, NULL, /* filter, filterarg */ 1129 nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */ 1130 nsegs, /* nsegments */ 1131 nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */ 1132 0, /* flags */ 1133 NULL, NULL, /* lockfunc, lockfuncarg */ 1134 &rq->dma_tag))) 1135 goto done; 1136 1137 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, 1138 &rq->wq_ctrl); 1139 if (err) 1140 goto err_free_dma_tag; 1141 1142 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; 1143 1144 err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs); 1145 if (err != 0) 1146 goto err_rq_wq_destroy; 1147 1148 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1149 1150 err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz); 1151 if (err) 1152 goto err_rq_wq_destroy; 1153 1154 rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1155 for (i = 0; i != wq_sz; i++) { 1156 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); 1157 int j; 1158 1159 err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map); 1160 if (err != 0) { 1161 while (i--) 1162 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1163 goto err_rq_mbuf_free; 1164 } 1165 1166 /* set value for constant fields */ 1167 for (j = 0; j < rq->nsegs; j++) 1168 wqe->data[j].lkey = cpu_to_be32(priv->mr.key); 1169 } 1170 1171 INIT_WORK(&rq->dim.work, mlx5e_dim_work); 1172 if (priv->params.rx_cq_moderation_mode < 2) { 1173 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1174 } else { 1175 void *cqc = container_of(param, 1176 struct mlx5e_channel_param, rq)->rx_cq.cqc; 1177 1178 switch (MLX5_GET(cqc, cqc, cq_period_mode)) { 1179 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: 1180 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1181 break; 1182 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: 1183 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 1184 break; 1185 default: 1186 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1187 break; 1188 } 1189 } 1190 1191 rq->ifp = c->ifp; 1192 rq->channel = c; 1193 rq->ix = c->ix; 1194 1195 snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix); 1196 mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1197 buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM, 1198 rq->stats.arg); 1199 return (0); 1200 1201err_rq_mbuf_free: 1202 free(rq->mbuf, M_MLX5EN); 1203 tcp_lro_free(&rq->lro); 1204err_rq_wq_destroy: 1205 mlx5_wq_destroy(&rq->wq_ctrl); 1206err_free_dma_tag: 1207 bus_dma_tag_destroy(rq->dma_tag); 1208done: 1209 return (err); 1210} 1211 1212static void 1213mlx5e_destroy_rq(struct mlx5e_rq *rq) 1214{ 1215 int wq_sz; 1216 int i; 1217 1218 /* destroy all sysctl nodes */ 1219 sysctl_ctx_free(&rq->stats.ctx); 1220 1221 /* free leftover LRO packets, if any */ 1222 tcp_lro_free(&rq->lro); 1223 1224 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1225 for (i = 0; i != wq_sz; i++) { 1226 if (rq->mbuf[i].mbuf != NULL) { 1227 bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map); 1228 m_freem(rq->mbuf[i].mbuf); 1229 } 1230 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1231 } 1232 free(rq->mbuf, M_MLX5EN); 1233 mlx5_wq_destroy(&rq->wq_ctrl); 1234 bus_dma_tag_destroy(rq->dma_tag); 1235} 1236 1237static int 1238mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) 1239{ 1240 struct mlx5e_channel *c = rq->channel; 1241 struct mlx5e_priv *priv = c->priv; 1242 struct mlx5_core_dev *mdev = priv->mdev; 1243 1244 void *in; 1245 void *rqc; 1246 void *wq; 1247 int inlen; 1248 int err; 1249 1250 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 1251 sizeof(u64) * rq->wq_ctrl.buf.npages; 1252 in = mlx5_vzalloc(inlen); 1253 if (in == NULL) 1254 return (-ENOMEM); 1255 1256 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1257 wq = MLX5_ADDR_OF(rqc, rqc, wq); 1258 1259 memcpy(rqc, param->rqc, sizeof(param->rqc)); 1260 1261 MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); 1262 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 1263 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 1264 if (priv->counter_set_id >= 0) 1265 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); 1266 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 1267 PAGE_SHIFT); 1268 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); 1269 1270 mlx5_fill_page_array(&rq->wq_ctrl.buf, 1271 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1272 1273 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); 1274 1275 kvfree(in); 1276 1277 return (err); 1278} 1279 1280static int 1281mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) 1282{ 1283 struct mlx5e_channel *c = rq->channel; 1284 struct mlx5e_priv *priv = c->priv; 1285 struct mlx5_core_dev *mdev = priv->mdev; 1286 1287 void *in; 1288 void *rqc; 1289 int inlen; 1290 int err; 1291 1292 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 1293 in = mlx5_vzalloc(inlen); 1294 if (in == NULL) 1295 return (-ENOMEM); 1296 1297 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 1298 1299 MLX5_SET(modify_rq_in, in, rqn, rq->rqn); 1300 MLX5_SET(modify_rq_in, in, rq_state, curr_state); 1301 MLX5_SET(rqc, rqc, state, next_state); 1302 1303 err = mlx5_core_modify_rq(mdev, in, inlen); 1304 1305 kvfree(in); 1306 1307 return (err); 1308} 1309 1310static void 1311mlx5e_disable_rq(struct mlx5e_rq *rq) 1312{ 1313 struct mlx5e_channel *c = rq->channel; 1314 struct mlx5e_priv *priv = c->priv; 1315 struct mlx5_core_dev *mdev = priv->mdev; 1316 1317 mlx5_core_destroy_rq(mdev, rq->rqn); 1318} 1319 1320static int 1321mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) 1322{ 1323 struct mlx5e_channel *c = rq->channel; 1324 struct mlx5e_priv *priv = c->priv; 1325 struct mlx5_wq_ll *wq = &rq->wq; 1326 int i; 1327 1328 for (i = 0; i < 1000; i++) { 1329 if (wq->cur_sz >= priv->params.min_rx_wqes) 1330 return (0); 1331 1332 msleep(4); 1333 } 1334 return (-ETIMEDOUT); 1335} 1336 1337static int 1338mlx5e_open_rq(struct mlx5e_channel *c, 1339 struct mlx5e_rq_param *param, 1340 struct mlx5e_rq *rq) 1341{ 1342 int err; 1343 1344 err = mlx5e_create_rq(c, param, rq); 1345 if (err) 1346 return (err); 1347 1348 err = mlx5e_enable_rq(rq, param); 1349 if (err) 1350 goto err_destroy_rq; 1351 1352 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 1353 if (err) 1354 goto err_disable_rq; 1355 1356 c->rq.enabled = 1; 1357 1358 return (0); 1359 1360err_disable_rq: 1361 mlx5e_disable_rq(rq); 1362err_destroy_rq: 1363 mlx5e_destroy_rq(rq); 1364 1365 return (err); 1366} 1367 1368static void 1369mlx5e_close_rq(struct mlx5e_rq *rq) 1370{ 1371 mtx_lock(&rq->mtx); 1372 rq->enabled = 0; 1373 callout_stop(&rq->watchdog); 1374 mtx_unlock(&rq->mtx); 1375 1376 callout_drain(&rq->watchdog); 1377 1378 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 1379} 1380 1381static void 1382mlx5e_close_rq_wait(struct mlx5e_rq *rq) 1383{ 1384 1385 mlx5e_disable_rq(rq); 1386 mlx5e_close_cq(&rq->cq); 1387 cancel_work_sync(&rq->dim.work); 1388 mlx5e_destroy_rq(rq); 1389} 1390 1391void 1392mlx5e_free_sq_db(struct mlx5e_sq *sq) 1393{ 1394 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1395 int x; 1396 1397 for (x = 0; x != wq_sz; x++) { 1398 if (sq->mbuf[x].mbuf != NULL) { 1399 bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map); 1400 m_freem(sq->mbuf[x].mbuf); 1401 } 1402 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1403 } 1404 free(sq->mbuf, M_MLX5EN); 1405} 1406 1407int 1408mlx5e_alloc_sq_db(struct mlx5e_sq *sq) 1409{ 1410 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1411 int err; 1412 int x; 1413 1414 sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1415 1416 /* Create DMA descriptor MAPs */ 1417 for (x = 0; x != wq_sz; x++) { 1418 err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map); 1419 if (err != 0) { 1420 while (x--) 1421 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1422 free(sq->mbuf, M_MLX5EN); 1423 return (err); 1424 } 1425 } 1426 return (0); 1427} 1428 1429static const char *mlx5e_sq_stats_desc[] = { 1430 MLX5E_SQ_STATS(MLX5E_STATS_DESC) 1431}; 1432 1433void 1434mlx5e_update_sq_inline(struct mlx5e_sq *sq) 1435{ 1436 sq->max_inline = sq->priv->params.tx_max_inline; 1437 sq->min_inline_mode = sq->priv->params.tx_min_inline_mode; 1438 1439 /* 1440 * Check if trust state is DSCP or if inline mode is NONE which 1441 * indicates CX-5 or newer hardware. 1442 */ 1443 if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP || 1444 sq->min_inline_mode == MLX5_INLINE_MODE_NONE) { 1445 if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert)) 1446 sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN; 1447 else 1448 sq->min_insert_caps = MLX5E_INSERT_NON_VLAN; 1449 } else { 1450 sq->min_insert_caps = 0; 1451 } 1452} 1453 1454static void 1455mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 1456{ 1457 int i; 1458 1459 for (i = 0; i != c->num_tc; i++) { 1460 mtx_lock(&c->sq[i].lock); 1461 mlx5e_update_sq_inline(&c->sq[i]); 1462 mtx_unlock(&c->sq[i].lock); 1463 } 1464} 1465 1466void 1467mlx5e_refresh_sq_inline(struct mlx5e_priv *priv) 1468{ 1469 int i; 1470 1471 /* check if channels are closed */ 1472 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 1473 return; 1474 1475 for (i = 0; i < priv->params.num_channels; i++) 1476 mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]); 1477} 1478 1479static int 1480mlx5e_create_sq(struct mlx5e_channel *c, 1481 int tc, 1482 struct mlx5e_sq_param *param, 1483 struct mlx5e_sq *sq) 1484{ 1485 struct mlx5e_priv *priv = c->priv; 1486 struct mlx5_core_dev *mdev = priv->mdev; 1487 char buffer[16]; 1488 void *sqc = param->sqc; 1489 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); 1490 int err; 1491 1492 /* Create DMA descriptor TAG */ 1493 if ((err = -bus_dma_tag_create( 1494 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1495 1, /* any alignment */ 1496 0, /* no boundary */ 1497 BUS_SPACE_MAXADDR, /* lowaddr */ 1498 BUS_SPACE_MAXADDR, /* highaddr */ 1499 NULL, NULL, /* filter, filterarg */ 1500 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ 1501 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ 1502 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 1503 0, /* flags */ 1504 NULL, NULL, /* lockfunc, lockfuncarg */ 1505 &sq->dma_tag))) 1506 goto done; 1507 1508 err = mlx5_alloc_map_uar(mdev, &sq->uar); 1509 if (err) 1510 goto err_free_dma_tag; 1511 1512 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, 1513 &sq->wq_ctrl); 1514 if (err) 1515 goto err_unmap_free_uar; 1516 1517 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1518 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; 1519 1520 err = mlx5e_alloc_sq_db(sq); 1521 if (err) 1522 goto err_sq_wq_destroy; 1523 1524 sq->mkey_be = cpu_to_be32(priv->mr.key); 1525 sq->ifp = priv->ifp; 1526 sq->priv = priv; 1527 sq->tc = tc; 1528 1529 mlx5e_update_sq_inline(sq); 1530 1531 snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc); 1532 mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1533 buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM, 1534 sq->stats.arg); 1535 1536 return (0); 1537 1538err_sq_wq_destroy: 1539 mlx5_wq_destroy(&sq->wq_ctrl); 1540 1541err_unmap_free_uar: 1542 mlx5_unmap_free_uar(mdev, &sq->uar); 1543 1544err_free_dma_tag: 1545 bus_dma_tag_destroy(sq->dma_tag); 1546done: 1547 return (err); 1548} 1549 1550static void 1551mlx5e_destroy_sq(struct mlx5e_sq *sq) 1552{ 1553 /* destroy all sysctl nodes */ 1554 sysctl_ctx_free(&sq->stats.ctx); 1555 1556 mlx5e_free_sq_db(sq); 1557 mlx5_wq_destroy(&sq->wq_ctrl); 1558 mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar); 1559 bus_dma_tag_destroy(sq->dma_tag); 1560} 1561 1562int 1563mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param, 1564 int tis_num) 1565{ 1566 void *in; 1567 void *sqc; 1568 void *wq; 1569 int inlen; 1570 int err; 1571 1572 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1573 sizeof(u64) * sq->wq_ctrl.buf.npages; 1574 in = mlx5_vzalloc(inlen); 1575 if (in == NULL) 1576 return (-ENOMEM); 1577 1578 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 1579 wq = MLX5_ADDR_OF(sqc, sqc, wq); 1580 1581 memcpy(sqc, param->sqc, sizeof(param->sqc)); 1582 1583 MLX5_SET(sqc, sqc, tis_num_0, tis_num); 1584 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); 1585 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 1586 MLX5_SET(sqc, sqc, tis_lst_sz, 1); 1587 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1588 1589 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1590 MLX5_SET(wq, wq, uar_page, sq->uar.index); 1591 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - 1592 PAGE_SHIFT); 1593 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); 1594 1595 mlx5_fill_page_array(&sq->wq_ctrl.buf, 1596 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1597 1598 err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); 1599 1600 kvfree(in); 1601 1602 return (err); 1603} 1604 1605int 1606mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) 1607{ 1608 void *in; 1609 void *sqc; 1610 int inlen; 1611 int err; 1612 1613 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 1614 in = mlx5_vzalloc(inlen); 1615 if (in == NULL) 1616 return (-ENOMEM); 1617 1618 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1619 1620 MLX5_SET(modify_sq_in, in, sqn, sq->sqn); 1621 MLX5_SET(modify_sq_in, in, sq_state, curr_state); 1622 MLX5_SET(sqc, sqc, state, next_state); 1623 1624 err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); 1625 1626 kvfree(in); 1627 1628 return (err); 1629} 1630 1631void 1632mlx5e_disable_sq(struct mlx5e_sq *sq) 1633{ 1634 1635 mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); 1636} 1637 1638static int 1639mlx5e_open_sq(struct mlx5e_channel *c, 1640 int tc, 1641 struct mlx5e_sq_param *param, 1642 struct mlx5e_sq *sq) 1643{ 1644 int err; 1645 1646 err = mlx5e_create_sq(c, tc, param, sq); 1647 if (err) 1648 return (err); 1649 1650 err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]); 1651 if (err) 1652 goto err_destroy_sq; 1653 1654 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); 1655 if (err) 1656 goto err_disable_sq; 1657 1658 WRITE_ONCE(sq->running, 1); 1659 1660 return (0); 1661 1662err_disable_sq: 1663 mlx5e_disable_sq(sq); 1664err_destroy_sq: 1665 mlx5e_destroy_sq(sq); 1666 1667 return (err); 1668} 1669 1670static void 1671mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep) 1672{ 1673 /* fill up remainder with NOPs */ 1674 while (sq->cev_counter != 0) { 1675 while (!mlx5e_sq_has_room_for(sq, 1)) { 1676 if (can_sleep != 0) { 1677 mtx_unlock(&sq->lock); 1678 msleep(4); 1679 mtx_lock(&sq->lock); 1680 } else { 1681 goto done; 1682 } 1683 } 1684 /* send a single NOP */ 1685 mlx5e_send_nop(sq, 1); 1686 atomic_thread_fence_rel(); 1687 } 1688done: 1689 /* Check if we need to write the doorbell */ 1690 if (likely(sq->doorbell.d64 != 0)) { 1691 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 1692 sq->doorbell.d64 = 0; 1693 } 1694} 1695 1696void 1697mlx5e_sq_cev_timeout(void *arg) 1698{ 1699 struct mlx5e_sq *sq = arg; 1700 1701 mtx_assert(&sq->lock, MA_OWNED); 1702 1703 /* check next state */ 1704 switch (sq->cev_next_state) { 1705 case MLX5E_CEV_STATE_SEND_NOPS: 1706 /* fill TX ring with NOPs, if any */ 1707 mlx5e_sq_send_nops_locked(sq, 0); 1708 1709 /* check if completed */ 1710 if (sq->cev_counter == 0) { 1711 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 1712 return; 1713 } 1714 break; 1715 default: 1716 /* send NOPs on next timeout */ 1717 sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS; 1718 break; 1719 } 1720 1721 /* restart timer */ 1722 callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq); 1723} 1724 1725void 1726mlx5e_drain_sq(struct mlx5e_sq *sq) 1727{ 1728 int error; 1729 struct mlx5_core_dev *mdev= sq->priv->mdev; 1730 1731 /* 1732 * Check if already stopped. 1733 * 1734 * NOTE: Serialization of this function is managed by the 1735 * caller ensuring the priv's state lock is locked or in case 1736 * of rate limit support, a single thread manages drain and 1737 * resume of SQs. The "running" variable can therefore safely 1738 * be read without any locks. 1739 */ 1740 if (READ_ONCE(sq->running) == 0) 1741 return; 1742 1743 /* don't put more packets into the SQ */ 1744 WRITE_ONCE(sq->running, 0); 1745 1746 /* serialize access to DMA rings */ 1747 mtx_lock(&sq->lock); 1748 1749 /* teardown event factor timer, if any */ 1750 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 1751 callout_stop(&sq->cev_callout); 1752 1753 /* send dummy NOPs in order to flush the transmit ring */ 1754 mlx5e_sq_send_nops_locked(sq, 1); 1755 mtx_unlock(&sq->lock); 1756 1757 /* make sure it is safe to free the callout */ 1758 callout_drain(&sq->cev_callout); 1759 1760 /* wait till SQ is empty or link is down */ 1761 mtx_lock(&sq->lock); 1762 while (sq->cc != sq->pc && 1763 (sq->priv->media_status_last & IFM_ACTIVE) != 0 && 1764 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1765 mtx_unlock(&sq->lock); 1766 msleep(1); 1767 sq->cq.mcq.comp(&sq->cq.mcq); 1768 mtx_lock(&sq->lock); 1769 } 1770 mtx_unlock(&sq->lock); 1771 1772 /* error out remaining requests */ 1773 error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 1774 if (error != 0) { 1775 mlx5_en_err(sq->ifp, 1776 "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); 1777 } 1778 1779 /* wait till SQ is empty */ 1780 mtx_lock(&sq->lock); 1781 while (sq->cc != sq->pc && 1782 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1783 mtx_unlock(&sq->lock); 1784 msleep(1); 1785 sq->cq.mcq.comp(&sq->cq.mcq); 1786 mtx_lock(&sq->lock); 1787 } 1788 mtx_unlock(&sq->lock); 1789} 1790 1791static void 1792mlx5e_close_sq_wait(struct mlx5e_sq *sq) 1793{ 1794 1795 mlx5e_drain_sq(sq); 1796 mlx5e_disable_sq(sq); 1797 mlx5e_destroy_sq(sq); 1798} 1799 1800static int 1801mlx5e_create_cq(struct mlx5e_priv *priv, 1802 struct mlx5e_cq_param *param, 1803 struct mlx5e_cq *cq, 1804 mlx5e_cq_comp_t *comp, 1805 int eq_ix) 1806{ 1807 struct mlx5_core_dev *mdev = priv->mdev; 1808 struct mlx5_core_cq *mcq = &cq->mcq; 1809 int eqn_not_used; 1810 int irqn; 1811 int err; 1812 u32 i; 1813 1814 param->wq.buf_numa_node = 0; 1815 param->wq.db_numa_node = 0; 1816 1817 err = mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn); 1818 if (err) 1819 return (err); 1820 1821 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, 1822 &cq->wq_ctrl); 1823 if (err) 1824 return (err); 1825 1826 mcq->cqe_sz = 64; 1827 mcq->set_ci_db = cq->wq_ctrl.db.db; 1828 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1829 *mcq->set_ci_db = 0; 1830 *mcq->arm_db = 0; 1831 mcq->vector = eq_ix; 1832 mcq->comp = comp; 1833 mcq->event = mlx5e_cq_error_event; 1834 mcq->irqn = irqn; 1835 mcq->uar = &priv->cq_uar; 1836 1837 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 1838 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); 1839 1840 cqe->op_own = 0xf1; 1841 } 1842 1843 cq->priv = priv; 1844 1845 return (0); 1846} 1847 1848static void 1849mlx5e_destroy_cq(struct mlx5e_cq *cq) 1850{ 1851 mlx5_wq_destroy(&cq->wq_ctrl); 1852} 1853 1854static int 1855mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix) 1856{ 1857 struct mlx5_core_cq *mcq = &cq->mcq; 1858 void *in; 1859 void *cqc; 1860 int inlen; 1861 int irqn_not_used; 1862 int eqn; 1863 int err; 1864 1865 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1866 sizeof(u64) * cq->wq_ctrl.buf.npages; 1867 in = mlx5_vzalloc(inlen); 1868 if (in == NULL) 1869 return (-ENOMEM); 1870 1871 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1872 1873 memcpy(cqc, param->cqc, sizeof(param->cqc)); 1874 1875 mlx5_fill_page_array(&cq->wq_ctrl.buf, 1876 (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas)); 1877 1878 mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); 1879 1880 MLX5_SET(cqc, cqc, c_eqn, eqn); 1881 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); 1882 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - 1883 PAGE_SHIFT); 1884 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); 1885 1886 err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen); 1887 1888 kvfree(in); 1889 1890 if (err) 1891 return (err); 1892 1893 mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); 1894 1895 return (0); 1896} 1897 1898static void 1899mlx5e_disable_cq(struct mlx5e_cq *cq) 1900{ 1901 1902 mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); 1903} 1904 1905int 1906mlx5e_open_cq(struct mlx5e_priv *priv, 1907 struct mlx5e_cq_param *param, 1908 struct mlx5e_cq *cq, 1909 mlx5e_cq_comp_t *comp, 1910 int eq_ix) 1911{ 1912 int err; 1913 1914 err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); 1915 if (err) 1916 return (err); 1917 1918 err = mlx5e_enable_cq(cq, param, eq_ix); 1919 if (err) 1920 goto err_destroy_cq; 1921 1922 return (0); 1923 1924err_destroy_cq: 1925 mlx5e_destroy_cq(cq); 1926 1927 return (err); 1928} 1929 1930void 1931mlx5e_close_cq(struct mlx5e_cq *cq) 1932{ 1933 mlx5e_disable_cq(cq); 1934 mlx5e_destroy_cq(cq); 1935} 1936 1937static int 1938mlx5e_open_tx_cqs(struct mlx5e_channel *c, 1939 struct mlx5e_channel_param *cparam) 1940{ 1941 int err; 1942 int tc; 1943 1944 for (tc = 0; tc < c->num_tc; tc++) { 1945 /* open completion queue */ 1946 err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, 1947 &mlx5e_tx_cq_comp, c->ix); 1948 if (err) 1949 goto err_close_tx_cqs; 1950 } 1951 return (0); 1952 1953err_close_tx_cqs: 1954 for (tc--; tc >= 0; tc--) 1955 mlx5e_close_cq(&c->sq[tc].cq); 1956 1957 return (err); 1958} 1959 1960static void 1961mlx5e_close_tx_cqs(struct mlx5e_channel *c) 1962{ 1963 int tc; 1964 1965 for (tc = 0; tc < c->num_tc; tc++) 1966 mlx5e_close_cq(&c->sq[tc].cq); 1967} 1968 1969static int 1970mlx5e_open_sqs(struct mlx5e_channel *c, 1971 struct mlx5e_channel_param *cparam) 1972{ 1973 int err; 1974 int tc; 1975 1976 for (tc = 0; tc < c->num_tc; tc++) { 1977 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); 1978 if (err) 1979 goto err_close_sqs; 1980 } 1981 1982 return (0); 1983 1984err_close_sqs: 1985 for (tc--; tc >= 0; tc--) 1986 mlx5e_close_sq_wait(&c->sq[tc]); 1987 1988 return (err); 1989} 1990 1991static void 1992mlx5e_close_sqs_wait(struct mlx5e_channel *c) 1993{ 1994 int tc; 1995 1996 for (tc = 0; tc < c->num_tc; tc++) 1997 mlx5e_close_sq_wait(&c->sq[tc]); 1998} 1999 2000static void 2001mlx5e_chan_mtx_init(struct mlx5e_channel *c) 2002{ 2003 int tc; 2004 2005 mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF); 2006 2007 callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0); 2008 2009 for (tc = 0; tc < c->num_tc; tc++) { 2010 struct mlx5e_sq *sq = c->sq + tc; 2011 2012 mtx_init(&sq->lock, "mlx5tx", 2013 MTX_NETWORK_LOCK " TX", MTX_DEF); 2014 mtx_init(&sq->comp_lock, "mlx5comp", 2015 MTX_NETWORK_LOCK " TX", MTX_DEF); 2016 2017 callout_init_mtx(&sq->cev_callout, &sq->lock, 0); 2018 2019 sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; 2020 2021 /* ensure the TX completion event factor is not zero */ 2022 if (sq->cev_factor == 0) 2023 sq->cev_factor = 1; 2024 } 2025} 2026 2027static void 2028mlx5e_chan_mtx_destroy(struct mlx5e_channel *c) 2029{ 2030 int tc; 2031 2032 mtx_destroy(&c->rq.mtx); 2033 2034 for (tc = 0; tc < c->num_tc; tc++) { 2035 mtx_destroy(&c->sq[tc].lock); 2036 mtx_destroy(&c->sq[tc].comp_lock); 2037 } 2038} 2039 2040static int 2041mlx5e_open_channel(struct mlx5e_priv *priv, int ix, 2042 struct mlx5e_channel_param *cparam, 2043 struct mlx5e_channel *c) 2044{ 2045 int err; 2046 2047 memset(c, 0, sizeof(*c)); 2048 2049 c->priv = priv; 2050 c->ix = ix; 2051 c->ifp = priv->ifp; 2052 c->num_tc = priv->num_tc; 2053 2054 /* init mutexes */ 2055 mlx5e_chan_mtx_init(c); 2056 2057 /* open transmit completion queue */ 2058 err = mlx5e_open_tx_cqs(c, cparam); 2059 if (err) 2060 goto err_free; 2061 2062 /* open receive completion queue */ 2063 err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, 2064 &mlx5e_rx_cq_comp, c->ix); 2065 if (err) 2066 goto err_close_tx_cqs; 2067 2068 err = mlx5e_open_sqs(c, cparam); 2069 if (err) 2070 goto err_close_rx_cq; 2071 2072 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 2073 if (err) 2074 goto err_close_sqs; 2075 2076 /* poll receive queue initially */ 2077 c->rq.cq.mcq.comp(&c->rq.cq.mcq); 2078 2079 return (0); 2080 2081err_close_sqs: 2082 mlx5e_close_sqs_wait(c); 2083 2084err_close_rx_cq: 2085 mlx5e_close_cq(&c->rq.cq); 2086 2087err_close_tx_cqs: 2088 mlx5e_close_tx_cqs(c); 2089 2090err_free: 2091 /* destroy mutexes */ 2092 mlx5e_chan_mtx_destroy(c); 2093 return (err); 2094} 2095 2096static void 2097mlx5e_close_channel(struct mlx5e_channel *c) 2098{ 2099 mlx5e_close_rq(&c->rq); 2100} 2101 2102static void 2103mlx5e_close_channel_wait(struct mlx5e_channel *c) 2104{ 2105 mlx5e_close_rq_wait(&c->rq); 2106 mlx5e_close_sqs_wait(c); 2107 mlx5e_close_tx_cqs(c); 2108 /* destroy mutexes */ 2109 mlx5e_chan_mtx_destroy(c); 2110} 2111 2112static int 2113mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs) 2114{ 2115 u32 r, n; 2116 2117 r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz : 2118 MLX5E_SW2MB_MTU(priv->ifp->if_mtu); 2119 if (r > MJUM16BYTES) 2120 return (-ENOMEM); 2121 2122 if (r > MJUM9BYTES) 2123 r = MJUM16BYTES; 2124 else if (r > MJUMPAGESIZE) 2125 r = MJUM9BYTES; 2126 else if (r > MCLBYTES) 2127 r = MJUMPAGESIZE; 2128 else 2129 r = MCLBYTES; 2130 2131 /* 2132 * n + 1 must be a power of two, because stride size must be. 2133 * Stride size is 16 * (n + 1), as the first segment is 2134 * control. 2135 */ 2136 for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++) 2137 ; 2138 2139 if (n > MLX5E_MAX_BUSDMA_RX_SEGS) 2140 return (-ENOMEM); 2141 2142 *wqe_sz = r; 2143 *nsegs = n; 2144 return (0); 2145} 2146 2147static void 2148mlx5e_build_rq_param(struct mlx5e_priv *priv, 2149 struct mlx5e_rq_param *param) 2150{ 2151 void *rqc = param->rqc; 2152 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 2153 u32 wqe_sz, nsegs; 2154 2155 mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 2156 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); 2157 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 2158 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + 2159 nsegs * sizeof(struct mlx5_wqe_data_seg))); 2160 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); 2161 MLX5_SET(wq, wq, pd, priv->pdn); 2162 2163 param->wq.buf_numa_node = 0; 2164 param->wq.db_numa_node = 0; 2165 param->wq.linear = 1; 2166} 2167 2168static void 2169mlx5e_build_sq_param(struct mlx5e_priv *priv, 2170 struct mlx5e_sq_param *param) 2171{ 2172 void *sqc = param->sqc; 2173 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2174 2175 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); 2176 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 2177 MLX5_SET(wq, wq, pd, priv->pdn); 2178 2179 param->wq.buf_numa_node = 0; 2180 param->wq.db_numa_node = 0; 2181 param->wq.linear = 1; 2182} 2183 2184static void 2185mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 2186 struct mlx5e_cq_param *param) 2187{ 2188 void *cqc = param->cqc; 2189 2190 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); 2191} 2192 2193static void 2194mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr) 2195{ 2196 2197 *ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE); 2198 2199 /* apply LRO restrictions */ 2200 if (priv->params.hw_lro_en && 2201 ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) { 2202 ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO; 2203 } 2204} 2205 2206static void 2207mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, 2208 struct mlx5e_cq_param *param) 2209{ 2210 struct net_dim_cq_moder curr; 2211 void *cqc = param->cqc; 2212 2213 /* 2214 * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE 2215 * format is more beneficial for FreeBSD use case. 2216 * 2217 * Adding support for MLX5_CQE_FORMAT_CSUM will require changes 2218 * in mlx5e_decompress_cqe. 2219 */ 2220 if (priv->params.cqe_zipping_en) { 2221 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH); 2222 MLX5_SET(cqc, cqc, cqe_compression_en, 1); 2223 } 2224 2225 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); 2226 2227 switch (priv->params.rx_cq_moderation_mode) { 2228 case 0: 2229 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2230 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2231 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2232 break; 2233 case 1: 2234 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2235 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2236 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2237 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2238 else 2239 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2240 break; 2241 case 2: 2242 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr); 2243 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2244 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2245 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2246 break; 2247 case 3: 2248 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr); 2249 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2250 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2251 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2252 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2253 else 2254 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2255 break; 2256 default: 2257 break; 2258 } 2259 2260 mlx5e_dim_build_cq_param(priv, param); 2261 2262 mlx5e_build_common_cq_param(priv, param); 2263} 2264 2265static void 2266mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 2267 struct mlx5e_cq_param *param) 2268{ 2269 void *cqc = param->cqc; 2270 2271 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); 2272 MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); 2273 MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); 2274 2275 switch (priv->params.tx_cq_moderation_mode) { 2276 case 0: 2277 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2278 break; 2279 default: 2280 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2281 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2282 else 2283 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2284 break; 2285 } 2286 2287 mlx5e_build_common_cq_param(priv, param); 2288} 2289 2290static void 2291mlx5e_build_channel_param(struct mlx5e_priv *priv, 2292 struct mlx5e_channel_param *cparam) 2293{ 2294 memset(cparam, 0, sizeof(*cparam)); 2295 2296 mlx5e_build_rq_param(priv, &cparam->rq); 2297 mlx5e_build_sq_param(priv, &cparam->sq); 2298 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); 2299 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); 2300} 2301 2302static int 2303mlx5e_open_channels(struct mlx5e_priv *priv) 2304{ 2305 struct mlx5e_channel_param cparam; 2306 int err; 2307 int i; 2308 int j; 2309 2310 mlx5e_build_channel_param(priv, &cparam); 2311 for (i = 0; i < priv->params.num_channels; i++) { 2312 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); 2313 if (err) 2314 goto err_close_channels; 2315 } 2316 2317 for (j = 0; j < priv->params.num_channels; j++) { 2318 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq); 2319 if (err) 2320 goto err_close_channels; 2321 } 2322 2323 return (0); 2324 2325err_close_channels: 2326 while (i--) { 2327 mlx5e_close_channel(&priv->channel[i]); 2328 mlx5e_close_channel_wait(&priv->channel[i]); 2329 } 2330 return (err); 2331} 2332 2333static void 2334mlx5e_close_channels(struct mlx5e_priv *priv) 2335{ 2336 int i; 2337 2338 for (i = 0; i < priv->params.num_channels; i++) 2339 mlx5e_close_channel(&priv->channel[i]); 2340 for (i = 0; i < priv->params.num_channels; i++) 2341 mlx5e_close_channel_wait(&priv->channel[i]); 2342} 2343 2344static int 2345mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) 2346{ 2347 2348 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2349 uint8_t cq_mode; 2350 2351 switch (priv->params.tx_cq_moderation_mode) { 2352 case 0: 2353 case 2: 2354 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2355 break; 2356 default: 2357 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2358 break; 2359 } 2360 2361 return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, 2362 priv->params.tx_cq_moderation_usec, 2363 priv->params.tx_cq_moderation_pkts, 2364 cq_mode)); 2365 } 2366 2367 return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, 2368 priv->params.tx_cq_moderation_usec, 2369 priv->params.tx_cq_moderation_pkts)); 2370} 2371 2372static int 2373mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) 2374{ 2375 2376 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2377 uint8_t cq_mode; 2378 uint8_t dim_mode; 2379 int retval; 2380 2381 switch (priv->params.rx_cq_moderation_mode) { 2382 case 0: 2383 case 2: 2384 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2385 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 2386 break; 2387 default: 2388 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2389 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 2390 break; 2391 } 2392 2393 /* tear down dynamic interrupt moderation */ 2394 mtx_lock(&rq->mtx); 2395 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 2396 mtx_unlock(&rq->mtx); 2397 2398 /* wait for dynamic interrupt moderation work task, if any */ 2399 cancel_work_sync(&rq->dim.work); 2400 2401 if (priv->params.rx_cq_moderation_mode >= 2) { 2402 struct net_dim_cq_moder curr; 2403 2404 mlx5e_get_default_profile(priv, dim_mode, &curr); 2405 2406 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2407 curr.usec, curr.pkts, cq_mode); 2408 2409 /* set dynamic interrupt moderation mode and zero defaults */ 2410 mtx_lock(&rq->mtx); 2411 rq->dim.mode = dim_mode; 2412 rq->dim.state = 0; 2413 rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE; 2414 mtx_unlock(&rq->mtx); 2415 } else { 2416 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2417 priv->params.rx_cq_moderation_usec, 2418 priv->params.rx_cq_moderation_pkts, 2419 cq_mode); 2420 } 2421 return (retval); 2422 } 2423 2424 return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, 2425 priv->params.rx_cq_moderation_usec, 2426 priv->params.rx_cq_moderation_pkts)); 2427} 2428 2429static int 2430mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 2431{ 2432 int err; 2433 int i; 2434 2435 err = mlx5e_refresh_rq_params(priv, &c->rq); 2436 if (err) 2437 goto done; 2438 2439 for (i = 0; i != c->num_tc; i++) { 2440 err = mlx5e_refresh_sq_params(priv, &c->sq[i]); 2441 if (err) 2442 goto done; 2443 } 2444done: 2445 return (err); 2446} 2447 2448int 2449mlx5e_refresh_channel_params(struct mlx5e_priv *priv) 2450{ 2451 int i; 2452 2453 /* check if channels are closed */ 2454 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2455 return (EINVAL); 2456 2457 for (i = 0; i < priv->params.num_channels; i++) { 2458 int err; 2459 2460 err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]); 2461 if (err) 2462 return (err); 2463 } 2464 return (0); 2465} 2466 2467static int 2468mlx5e_open_tis(struct mlx5e_priv *priv, int tc) 2469{ 2470 struct mlx5_core_dev *mdev = priv->mdev; 2471 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 2472 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 2473 2474 memset(in, 0, sizeof(in)); 2475 2476 MLX5_SET(tisc, tisc, prio, tc); 2477 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 2478 2479 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); 2480} 2481 2482static void 2483mlx5e_close_tis(struct mlx5e_priv *priv, int tc) 2484{ 2485 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); 2486} 2487 2488static int 2489mlx5e_open_tises(struct mlx5e_priv *priv) 2490{ 2491 int num_tc = priv->num_tc; 2492 int err; 2493 int tc; 2494 2495 for (tc = 0; tc < num_tc; tc++) { 2496 err = mlx5e_open_tis(priv, tc); 2497 if (err) 2498 goto err_close_tises; 2499 } 2500 2501 return (0); 2502 2503err_close_tises: 2504 for (tc--; tc >= 0; tc--) 2505 mlx5e_close_tis(priv, tc); 2506 2507 return (err); 2508} 2509 2510static void 2511mlx5e_close_tises(struct mlx5e_priv *priv) 2512{ 2513 int num_tc = priv->num_tc; 2514 int tc; 2515 2516 for (tc = 0; tc < num_tc; tc++) 2517 mlx5e_close_tis(priv, tc); 2518} 2519 2520static int 2521mlx5e_open_rqt(struct mlx5e_priv *priv) 2522{ 2523 struct mlx5_core_dev *mdev = priv->mdev; 2524 u32 *in; 2525 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 2526 void *rqtc; 2527 int inlen; 2528 int err; 2529 int sz; 2530 int i; 2531 2532 sz = 1 << priv->params.rx_hash_log_tbl_sz; 2533 2534 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 2535 in = mlx5_vzalloc(inlen); 2536 if (in == NULL) 2537 return (-ENOMEM); 2538 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 2539 2540 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 2541 MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 2542 2543 for (i = 0; i < sz; i++) { 2544 int ix = i; 2545#ifdef RSS 2546 ix = rss_get_indirection_to_bucket(ix); 2547#endif 2548 /* ensure we don't overflow */ 2549 ix %= priv->params.num_channels; 2550 2551 /* apply receive side scaling stride, if any */ 2552 ix -= ix % (int)priv->params.channels_rsss; 2553 2554 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn); 2555 } 2556 2557 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 2558 2559 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); 2560 if (!err) 2561 priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); 2562 2563 kvfree(in); 2564 2565 return (err); 2566} 2567 2568static void 2569mlx5e_close_rqt(struct mlx5e_priv *priv) 2570{ 2571 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; 2572 u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; 2573 2574 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); 2575 MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); 2576 2577 mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); 2578} 2579 2580static void 2581mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt) 2582{ 2583 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 2584 __be32 *hkey; 2585 2586 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 2587 2588#define ROUGH_MAX_L2_L3_HDR_SZ 256 2589 2590#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2591 MLX5_HASH_FIELD_SEL_DST_IP) 2592 2593#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2594 MLX5_HASH_FIELD_SEL_DST_IP |\ 2595 MLX5_HASH_FIELD_SEL_L4_SPORT |\ 2596 MLX5_HASH_FIELD_SEL_L4_DPORT) 2597 2598#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2599 MLX5_HASH_FIELD_SEL_DST_IP |\ 2600 MLX5_HASH_FIELD_SEL_IPSEC_SPI) 2601 2602 if (priv->params.hw_lro_en) { 2603 MLX5_SET(tirc, tirc, lro_enable_mask, 2604 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 2605 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); 2606 MLX5_SET(tirc, tirc, lro_max_msg_sz, 2607 (priv->params.lro_wqe_sz - 2608 ROUGH_MAX_L2_L3_HDR_SZ) >> 8); 2609 /* TODO: add the option to choose timer value dynamically */ 2610 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, 2611 MLX5_CAP_ETH(priv->mdev, 2612 lro_timer_supported_periods[2])); 2613 } 2614 2615 /* setup parameters for hashing TIR type, if any */ 2616 switch (tt) { 2617 case MLX5E_TT_ANY: 2618 MLX5_SET(tirc, tirc, disp_type, 2619 MLX5_TIRC_DISP_TYPE_DIRECT); 2620 MLX5_SET(tirc, tirc, inline_rqn, 2621 priv->channel[0].rq.rqn); 2622 break; 2623 default: 2624 MLX5_SET(tirc, tirc, disp_type, 2625 MLX5_TIRC_DISP_TYPE_INDIRECT); 2626 MLX5_SET(tirc, tirc, indirect_table, 2627 priv->rqtn); 2628 MLX5_SET(tirc, tirc, rx_hash_fn, 2629 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); 2630 hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 2631#ifdef RSS 2632 /* 2633 * The FreeBSD RSS implementation does currently not 2634 * support symmetric Toeplitz hashes: 2635 */ 2636 MLX5_SET(tirc, tirc, rx_hash_symmetric, 0); 2637 rss_getkey((uint8_t *)hkey); 2638#else 2639 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2640 hkey[0] = cpu_to_be32(0xD181C62C); 2641 hkey[1] = cpu_to_be32(0xF7F4DB5B); 2642 hkey[2] = cpu_to_be32(0x1983A2FC); 2643 hkey[3] = cpu_to_be32(0x943E1ADB); 2644 hkey[4] = cpu_to_be32(0xD9389E6B); 2645 hkey[5] = cpu_to_be32(0xD1039C2C); 2646 hkey[6] = cpu_to_be32(0xA74499AD); 2647 hkey[7] = cpu_to_be32(0x593D56D9); 2648 hkey[8] = cpu_to_be32(0xF3253C06); 2649 hkey[9] = cpu_to_be32(0x2ADC1FFC); 2650#endif 2651 break; 2652 } 2653 2654 switch (tt) { 2655 case MLX5E_TT_IPV4_TCP: 2656 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2657 MLX5_L3_PROT_TYPE_IPV4); 2658 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2659 MLX5_L4_PROT_TYPE_TCP); 2660#ifdef RSS 2661 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) { 2662 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2663 MLX5_HASH_IP); 2664 } else 2665#endif 2666 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2667 MLX5_HASH_ALL); 2668 break; 2669 2670 case MLX5E_TT_IPV6_TCP: 2671 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2672 MLX5_L3_PROT_TYPE_IPV6); 2673 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2674 MLX5_L4_PROT_TYPE_TCP); 2675#ifdef RSS 2676 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) { 2677 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2678 MLX5_HASH_IP); 2679 } else 2680#endif 2681 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2682 MLX5_HASH_ALL); 2683 break; 2684 2685 case MLX5E_TT_IPV4_UDP: 2686 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2687 MLX5_L3_PROT_TYPE_IPV4); 2688 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2689 MLX5_L4_PROT_TYPE_UDP); 2690#ifdef RSS 2691 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) { 2692 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2693 MLX5_HASH_IP); 2694 } else 2695#endif 2696 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2697 MLX5_HASH_ALL); 2698 break; 2699 2700 case MLX5E_TT_IPV6_UDP: 2701 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2702 MLX5_L3_PROT_TYPE_IPV6); 2703 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2704 MLX5_L4_PROT_TYPE_UDP); 2705#ifdef RSS 2706 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) { 2707 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2708 MLX5_HASH_IP); 2709 } else 2710#endif 2711 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2712 MLX5_HASH_ALL); 2713 break; 2714 2715 case MLX5E_TT_IPV4_IPSEC_AH: 2716 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2717 MLX5_L3_PROT_TYPE_IPV4); 2718 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2719 MLX5_HASH_IP_IPSEC_SPI); 2720 break; 2721 2722 case MLX5E_TT_IPV6_IPSEC_AH: 2723 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2724 MLX5_L3_PROT_TYPE_IPV6); 2725 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2726 MLX5_HASH_IP_IPSEC_SPI); 2727 break; 2728 2729 case MLX5E_TT_IPV4_IPSEC_ESP: 2730 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2731 MLX5_L3_PROT_TYPE_IPV4); 2732 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2733 MLX5_HASH_IP_IPSEC_SPI); 2734 break; 2735 2736 case MLX5E_TT_IPV6_IPSEC_ESP: 2737 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2738 MLX5_L3_PROT_TYPE_IPV6); 2739 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2740 MLX5_HASH_IP_IPSEC_SPI); 2741 break; 2742 2743 case MLX5E_TT_IPV4: 2744 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2745 MLX5_L3_PROT_TYPE_IPV4); 2746 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2747 MLX5_HASH_IP); 2748 break; 2749 2750 case MLX5E_TT_IPV6: 2751 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2752 MLX5_L3_PROT_TYPE_IPV6); 2753 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2754 MLX5_HASH_IP); 2755 break; 2756 2757 default: 2758 break; 2759 } 2760} 2761 2762static int 2763mlx5e_open_tir(struct mlx5e_priv *priv, int tt) 2764{ 2765 struct mlx5_core_dev *mdev = priv->mdev; 2766 u32 *in; 2767 void *tirc; 2768 int inlen; 2769 int err; 2770 2771 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 2772 in = mlx5_vzalloc(inlen); 2773 if (in == NULL) 2774 return (-ENOMEM); 2775 tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context); 2776 2777 mlx5e_build_tir_ctx(priv, tirc, tt); 2778 2779 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); 2780 2781 kvfree(in); 2782 2783 return (err); 2784} 2785 2786static void 2787mlx5e_close_tir(struct mlx5e_priv *priv, int tt) 2788{ 2789 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); 2790} 2791 2792static int 2793mlx5e_open_tirs(struct mlx5e_priv *priv) 2794{ 2795 int err; 2796 int i; 2797 2798 for (i = 0; i < MLX5E_NUM_TT; i++) { 2799 err = mlx5e_open_tir(priv, i); 2800 if (err) 2801 goto err_close_tirs; 2802 } 2803 2804 return (0); 2805 2806err_close_tirs: 2807 for (i--; i >= 0; i--) 2808 mlx5e_close_tir(priv, i); 2809 2810 return (err); 2811} 2812 2813static void 2814mlx5e_close_tirs(struct mlx5e_priv *priv) 2815{ 2816 int i; 2817 2818 for (i = 0; i < MLX5E_NUM_TT; i++) 2819 mlx5e_close_tir(priv, i); 2820} 2821 2822/* 2823 * SW MTU does not include headers, 2824 * HW MTU includes all headers and checksums. 2825 */ 2826static int 2827mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu) 2828{ 2829 struct mlx5e_priv *priv = ifp->if_softc; 2830 struct mlx5_core_dev *mdev = priv->mdev; 2831 int hw_mtu; 2832 int err; 2833 2834 hw_mtu = MLX5E_SW2HW_MTU(sw_mtu); 2835 2836 err = mlx5_set_port_mtu(mdev, hw_mtu); 2837 if (err) { 2838 mlx5_en_err(ifp, "mlx5_set_port_mtu failed setting %d, err=%d\n", 2839 sw_mtu, err); 2840 return (err); 2841 } 2842 2843 /* Update vport context MTU */ 2844 err = mlx5_set_vport_mtu(mdev, hw_mtu); 2845 if (err) { 2846 mlx5_en_err(ifp, 2847 "Failed updating vport context with MTU size, err=%d\n", 2848 err); 2849 } 2850 2851 ifp->if_mtu = sw_mtu; 2852 2853 err = mlx5_query_vport_mtu(mdev, &hw_mtu); 2854 if (err || !hw_mtu) { 2855 /* fallback to port oper mtu */ 2856 err = mlx5_query_port_oper_mtu(mdev, &hw_mtu); 2857 } 2858 if (err) { 2859 mlx5_en_err(ifp, 2860 "Query port MTU, after setting new MTU value, failed\n"); 2861 return (err); 2862 } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) { 2863 err = -E2BIG, 2864 mlx5_en_err(ifp, 2865 "Port MTU %d is smaller than ifp mtu %d\n", 2866 hw_mtu, sw_mtu); 2867 } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) { 2868 err = -EINVAL; 2869 mlx5_en_err(ifp, 2870 "Port MTU %d is bigger than ifp mtu %d\n", 2871 hw_mtu, sw_mtu); 2872 } 2873 priv->params_ethtool.hw_mtu = hw_mtu; 2874 2875 return (err); 2876} 2877 2878int 2879mlx5e_open_locked(struct ifnet *ifp) 2880{ 2881 struct mlx5e_priv *priv = ifp->if_softc; 2882 int err; 2883 u16 set_id; 2884 2885 /* check if already opened */ 2886 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 2887 return (0); 2888 2889#ifdef RSS 2890 if (rss_getnumbuckets() > priv->params.num_channels) { 2891 mlx5_en_info(ifp, 2892 "NOTE: There are more RSS buckets(%u) than channels(%u) available\n", 2893 rss_getnumbuckets(), priv->params.num_channels); 2894 } 2895#endif 2896 err = mlx5e_open_tises(priv); 2897 if (err) { 2898 mlx5_en_err(ifp, "mlx5e_open_tises failed, %d\n", err); 2899 return (err); 2900 } 2901 err = mlx5_vport_alloc_q_counter(priv->mdev, 2902 MLX5_INTERFACE_PROTOCOL_ETH, &set_id); 2903 if (err) { 2904 mlx5_en_err(priv->ifp, 2905 "mlx5_vport_alloc_q_counter failed: %d\n", err); 2906 goto err_close_tises; 2907 } 2908 /* store counter set ID */ 2909 priv->counter_set_id = set_id; 2910 2911 err = mlx5e_open_channels(priv); 2912 if (err) { 2913 mlx5_en_err(ifp, 2914 "mlx5e_open_channels failed, %d\n", err); 2915 goto err_dalloc_q_counter; 2916 } 2917 err = mlx5e_open_rqt(priv); 2918 if (err) { 2919 mlx5_en_err(ifp, "mlx5e_open_rqt failed, %d\n", err); 2920 goto err_close_channels; 2921 } 2922 err = mlx5e_open_tirs(priv); 2923 if (err) { 2924 mlx5_en_err(ifp, "mlx5e_open_tir failed, %d\n", err); 2925 goto err_close_rqls; 2926 } 2927 err = mlx5e_open_flow_table(priv); 2928 if (err) { 2929 mlx5_en_err(ifp, 2930 "mlx5e_open_flow_table failed, %d\n", err); 2931 goto err_close_tirs; 2932 } 2933 err = mlx5e_add_all_vlan_rules(priv); 2934 if (err) { 2935 mlx5_en_err(ifp, 2936 "mlx5e_add_all_vlan_rules failed, %d\n", err); 2937 goto err_close_flow_table; 2938 } 2939 set_bit(MLX5E_STATE_OPENED, &priv->state); 2940 2941 mlx5e_update_carrier(priv); 2942 mlx5e_set_rx_mode_core(priv); 2943 2944 return (0); 2945 2946err_close_flow_table: 2947 mlx5e_close_flow_table(priv); 2948 2949err_close_tirs: 2950 mlx5e_close_tirs(priv); 2951 2952err_close_rqls: 2953 mlx5e_close_rqt(priv); 2954 2955err_close_channels: 2956 mlx5e_close_channels(priv); 2957 2958err_dalloc_q_counter: 2959 mlx5_vport_dealloc_q_counter(priv->mdev, 2960 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 2961 2962err_close_tises: 2963 mlx5e_close_tises(priv); 2964 2965 return (err); 2966} 2967 2968static void 2969mlx5e_open(void *arg) 2970{ 2971 struct mlx5e_priv *priv = arg; 2972 2973 PRIV_LOCK(priv); 2974 if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) 2975 mlx5_en_err(priv->ifp, 2976 "Setting port status to up failed\n"); 2977 2978 mlx5e_open_locked(priv->ifp); 2979 priv->ifp->if_drv_flags |= IFF_DRV_RUNNING; 2980 PRIV_UNLOCK(priv); 2981} 2982 2983int 2984mlx5e_close_locked(struct ifnet *ifp) 2985{ 2986 struct mlx5e_priv *priv = ifp->if_softc; 2987 2988 /* check if already closed */ 2989 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2990 return (0); 2991 2992 clear_bit(MLX5E_STATE_OPENED, &priv->state); 2993 2994 mlx5e_set_rx_mode_core(priv); 2995 mlx5e_del_all_vlan_rules(priv); 2996 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 2997 mlx5e_close_flow_table(priv); 2998 mlx5e_close_tirs(priv); 2999 mlx5e_close_rqt(priv); 3000 mlx5e_close_channels(priv); 3001 mlx5_vport_dealloc_q_counter(priv->mdev, 3002 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 3003 mlx5e_close_tises(priv); 3004 3005 return (0); 3006} 3007 3008#if (__FreeBSD_version >= 1100000) 3009static uint64_t 3010mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt) 3011{ 3012 struct mlx5e_priv *priv = ifp->if_softc; 3013 u64 retval; 3014 3015 /* PRIV_LOCK(priv); XXX not allowed */ 3016 switch (cnt) { 3017 case IFCOUNTER_IPACKETS: 3018 retval = priv->stats.vport.rx_packets; 3019 break; 3020 case IFCOUNTER_IERRORS: 3021 retval = priv->stats.pport.in_range_len_errors + 3022 priv->stats.pport.out_of_range_len + 3023 priv->stats.pport.too_long_errors + 3024 priv->stats.pport.check_seq_err + 3025 priv->stats.pport.alignment_err; 3026 break; 3027 case IFCOUNTER_IQDROPS: 3028 retval = priv->stats.vport.rx_out_of_buffer; 3029 break; 3030 case IFCOUNTER_OPACKETS: 3031 retval = priv->stats.vport.tx_packets; 3032 break; 3033 case IFCOUNTER_OERRORS: 3034 retval = priv->stats.port_stats_debug.out_discards; 3035 break; 3036 case IFCOUNTER_IBYTES: 3037 retval = priv->stats.vport.rx_bytes; 3038 break; 3039 case IFCOUNTER_OBYTES: 3040 retval = priv->stats.vport.tx_bytes; 3041 break; 3042 case IFCOUNTER_IMCASTS: 3043 retval = priv->stats.vport.rx_multicast_packets; 3044 break; 3045 case IFCOUNTER_OMCASTS: 3046 retval = priv->stats.vport.tx_multicast_packets; 3047 break; 3048 case IFCOUNTER_OQDROPS: 3049 retval = priv->stats.vport.tx_queue_dropped; 3050 break; 3051 case IFCOUNTER_COLLISIONS: 3052 retval = priv->stats.pport.collisions; 3053 break; 3054 default: 3055 retval = if_get_counter_default(ifp, cnt); 3056 break; 3057 } 3058 /* PRIV_UNLOCK(priv); XXX not allowed */ 3059 return (retval); 3060} 3061#endif 3062 3063static void 3064mlx5e_set_rx_mode(struct ifnet *ifp) 3065{ 3066 struct mlx5e_priv *priv = ifp->if_softc; 3067 3068 queue_work(priv->wq, &priv->set_rx_mode_work); 3069} 3070 3071static int 3072mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3073{ 3074 struct mlx5e_priv *priv; 3075 struct ifreq *ifr; 3076 struct ifi2creq i2c; 3077 int error = 0; 3078 int mask = 0; 3079 int size_read = 0; 3080 int module_status; 3081 int module_num; 3082 int max_mtu; 3083 uint8_t read_addr; 3084 3085 priv = ifp->if_softc; 3086 3087 /* check if detaching */ 3088 if (priv == NULL || priv->gone != 0) 3089 return (ENXIO); 3090 3091 switch (command) { 3092 case SIOCSIFMTU: 3093 ifr = (struct ifreq *)data; 3094 3095 PRIV_LOCK(priv); 3096 mlx5_query_port_max_mtu(priv->mdev, &max_mtu); 3097 3098 if (ifr->ifr_mtu >= MLX5E_MTU_MIN && 3099 ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) { 3100 int was_opened; 3101 3102 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3103 if (was_opened) 3104 mlx5e_close_locked(ifp); 3105 3106 /* set new MTU */ 3107 mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu); 3108 3109 if (was_opened) 3110 mlx5e_open_locked(ifp); 3111 } else { 3112 error = EINVAL; 3113 mlx5_en_err(ifp, 3114 "Invalid MTU value. Min val: %d, Max val: %d\n", 3115 MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu)); 3116 } 3117 PRIV_UNLOCK(priv); 3118 break; 3119 case SIOCSIFFLAGS: 3120 if ((ifp->if_flags & IFF_UP) && 3121 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3122 mlx5e_set_rx_mode(ifp); 3123 break; 3124 } 3125 PRIV_LOCK(priv); 3126 if (ifp->if_flags & IFF_UP) { 3127 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3128 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3129 mlx5e_open_locked(ifp); 3130 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3131 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); 3132 } 3133 } else { 3134 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3135 mlx5_set_port_status(priv->mdev, 3136 MLX5_PORT_DOWN); 3137 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 3138 mlx5e_close_locked(ifp); 3139 mlx5e_update_carrier(priv); 3140 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3141 } 3142 } 3143 PRIV_UNLOCK(priv); 3144 break; 3145 case SIOCADDMULTI: 3146 case SIOCDELMULTI: 3147 mlx5e_set_rx_mode(ifp); 3148 break; 3149 case SIOCSIFMEDIA: 3150 case SIOCGIFMEDIA: 3151 case SIOCGIFXMEDIA: 3152 ifr = (struct ifreq *)data; 3153 error = ifmedia_ioctl(ifp, ifr, &priv->media, command); 3154 break; 3155 case SIOCSIFCAP: 3156 ifr = (struct ifreq *)data; 3157 PRIV_LOCK(priv); 3158 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3159 3160 if (mask & IFCAP_TXCSUM) { 3161 ifp->if_capenable ^= IFCAP_TXCSUM; 3162 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3163 3164 if (IFCAP_TSO4 & ifp->if_capenable && 3165 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3166 ifp->if_capenable &= ~IFCAP_TSO4; 3167 ifp->if_hwassist &= ~CSUM_IP_TSO; 3168 mlx5_en_err(ifp, 3169 "tso4 disabled due to -txcsum.\n"); 3170 } 3171 } 3172 if (mask & IFCAP_TXCSUM_IPV6) { 3173 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 3174 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3175 3176 if (IFCAP_TSO6 & ifp->if_capenable && 3177 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3178 ifp->if_capenable &= ~IFCAP_TSO6; 3179 ifp->if_hwassist &= ~CSUM_IP6_TSO; 3180 mlx5_en_err(ifp, 3181 "tso6 disabled due to -txcsum6.\n"); 3182 } 3183 } 3184 if (mask & IFCAP_RXCSUM) 3185 ifp->if_capenable ^= IFCAP_RXCSUM; 3186 if (mask & IFCAP_RXCSUM_IPV6) 3187 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 3188 if (mask & IFCAP_TSO4) { 3189 if (!(IFCAP_TSO4 & ifp->if_capenable) && 3190 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3191 mlx5_en_err(ifp, "enable txcsum first.\n"); 3192 error = EAGAIN; 3193 goto out; 3194 } 3195 ifp->if_capenable ^= IFCAP_TSO4; 3196 ifp->if_hwassist ^= CSUM_IP_TSO; 3197 } 3198 if (mask & IFCAP_TSO6) { 3199 if (!(IFCAP_TSO6 & ifp->if_capenable) && 3200 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3201 mlx5_en_err(ifp, "enable txcsum6 first.\n"); 3202 error = EAGAIN; 3203 goto out; 3204 } 3205 ifp->if_capenable ^= IFCAP_TSO6; 3206 ifp->if_hwassist ^= CSUM_IP6_TSO; 3207 } 3208 if (mask & IFCAP_VLAN_HWFILTER) { 3209 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3210 mlx5e_disable_vlan_filter(priv); 3211 else 3212 mlx5e_enable_vlan_filter(priv); 3213 3214 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 3215 } 3216 if (mask & IFCAP_VLAN_HWTAGGING) 3217 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3218 if (mask & IFCAP_WOL_MAGIC) 3219 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3220 3221 VLAN_CAPABILITIES(ifp); 3222 /* turn off LRO means also turn of HW LRO - if it's on */ 3223 if (mask & IFCAP_LRO) { 3224 int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3225 bool need_restart = false; 3226 3227 ifp->if_capenable ^= IFCAP_LRO; 3228 3229 /* figure out if updating HW LRO is needed */ 3230 if (!(ifp->if_capenable & IFCAP_LRO)) { 3231 if (priv->params.hw_lro_en) { 3232 priv->params.hw_lro_en = false; 3233 need_restart = true; 3234 } 3235 } else { 3236 if (priv->params.hw_lro_en == false && 3237 priv->params_ethtool.hw_lro != 0) { 3238 priv->params.hw_lro_en = true; 3239 need_restart = true; 3240 } 3241 } 3242 if (was_opened && need_restart) { 3243 mlx5e_close_locked(ifp); 3244 mlx5e_open_locked(ifp); 3245 } 3246 } 3247out: 3248 PRIV_UNLOCK(priv); 3249 break; 3250 3251 case SIOCGI2C: 3252 ifr = (struct ifreq *)data; 3253 3254 /* 3255 * Copy from the user-space address ifr_data to the 3256 * kernel-space address i2c 3257 */ 3258 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 3259 if (error) 3260 break; 3261 3262 if (i2c.len > sizeof(i2c.data)) { 3263 error = EINVAL; 3264 break; 3265 } 3266 3267 PRIV_LOCK(priv); 3268 /* Get module_num which is required for the query_eeprom */ 3269 error = mlx5_query_module_num(priv->mdev, &module_num); 3270 if (error) { 3271 mlx5_en_err(ifp, 3272 "Query module num failed, eeprom reading is not supported\n"); 3273 error = EINVAL; 3274 goto err_i2c; 3275 } 3276 /* Check if module is present before doing an access */ 3277 module_status = mlx5_query_module_status(priv->mdev, module_num); 3278 if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED) { 3279 error = EINVAL; 3280 goto err_i2c; 3281 } 3282 /* 3283 * Currently 0XA0 and 0xA2 are the only addresses permitted. 3284 * The internal conversion is as follows: 3285 */ 3286 if (i2c.dev_addr == 0xA0) 3287 read_addr = MLX5_I2C_ADDR_LOW; 3288 else if (i2c.dev_addr == 0xA2) 3289 read_addr = MLX5_I2C_ADDR_HIGH; 3290 else { 3291 mlx5_en_err(ifp, 3292 "Query eeprom failed, Invalid Address: %X\n", 3293 i2c.dev_addr); 3294 error = EINVAL; 3295 goto err_i2c; 3296 } 3297 error = mlx5_query_eeprom(priv->mdev, 3298 read_addr, MLX5_EEPROM_LOW_PAGE, 3299 (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num, 3300 (uint32_t *)i2c.data, &size_read); 3301 if (error) { 3302 mlx5_en_err(ifp, 3303 "Query eeprom failed, eeprom reading is not supported\n"); 3304 error = EINVAL; 3305 goto err_i2c; 3306 } 3307 3308 if (i2c.len > MLX5_EEPROM_MAX_BYTES) { 3309 error = mlx5_query_eeprom(priv->mdev, 3310 read_addr, MLX5_EEPROM_LOW_PAGE, 3311 (uint32_t)(i2c.offset + size_read), 3312 (uint32_t)(i2c.len - size_read), module_num, 3313 (uint32_t *)(i2c.data + size_read), &size_read); 3314 } 3315 if (error) { 3316 mlx5_en_err(ifp, 3317 "Query eeprom failed, eeprom reading is not supported\n"); 3318 error = EINVAL; 3319 goto err_i2c; 3320 } 3321 3322 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 3323err_i2c: 3324 PRIV_UNLOCK(priv); 3325 break; 3326 3327 default: 3328 error = ether_ioctl(ifp, command, data); 3329 break; 3330 } 3331 return (error); 3332} 3333 3334static int 3335mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3336{ 3337 /* 3338 * TODO: uncoment once FW really sets all these bits if 3339 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap || 3340 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap || 3341 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return 3342 * -ENOTSUPP; 3343 */ 3344 3345 /* TODO: add more must-to-have features */ 3346 3347 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3348 return (-ENODEV); 3349 3350 return (0); 3351} 3352 3353static u16 3354mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 3355{ 3356 uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U; 3357 3358 bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2; 3359 3360 /* verify against driver hardware limit */ 3361 if (bf_buf_size > MLX5E_MAX_TX_INLINE) 3362 bf_buf_size = MLX5E_MAX_TX_INLINE; 3363 3364 return (bf_buf_size); 3365} 3366 3367static int 3368mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev, 3369 struct mlx5e_priv *priv, 3370 int num_comp_vectors) 3371{ 3372 int err; 3373 3374 /* 3375 * TODO: Consider link speed for setting "log_sq_size", 3376 * "log_rq_size" and "cq_moderation_xxx": 3377 */ 3378 priv->params.log_sq_size = 3379 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 3380 priv->params.log_rq_size = 3381 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 3382 priv->params.rx_cq_moderation_usec = 3383 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 3384 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : 3385 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 3386 priv->params.rx_cq_moderation_mode = 3387 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0; 3388 priv->params.rx_cq_moderation_pkts = 3389 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 3390 priv->params.tx_cq_moderation_usec = 3391 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 3392 priv->params.tx_cq_moderation_pkts = 3393 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 3394 priv->params.min_rx_wqes = 3395 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; 3396 priv->params.rx_hash_log_tbl_sz = 3397 (order_base_2(num_comp_vectors) > 3398 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? 3399 order_base_2(num_comp_vectors) : 3400 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; 3401 priv->params.num_tc = 1; 3402 priv->params.default_vlan_prio = 0; 3403 priv->counter_set_id = -1; 3404 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); 3405 3406 err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); 3407 if (err) 3408 return (err); 3409 3410 /* 3411 * hw lro is currently defaulted to off. when it won't anymore we 3412 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)" 3413 */ 3414 priv->params.hw_lro_en = false; 3415 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 3416 3417 /* 3418 * CQE zipping is currently defaulted to off. when it won't 3419 * anymore we will consider the HW capability: 3420 * "!!MLX5_CAP_GEN(mdev, cqe_compression)" 3421 */ 3422 priv->params.cqe_zipping_en = false; 3423 3424 priv->mdev = mdev; 3425 priv->params.num_channels = num_comp_vectors; 3426 priv->params.channels_rsss = 1; 3427 priv->order_base_2_num_channels = order_base_2(num_comp_vectors); 3428 priv->queue_mapping_channel_mask = 3429 roundup_pow_of_two(num_comp_vectors) - 1; 3430 priv->num_tc = priv->params.num_tc; 3431 priv->default_vlan_prio = priv->params.default_vlan_prio; 3432 3433 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 3434 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 3435 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 3436 3437 return (0); 3438} 3439 3440static int 3441mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, 3442 struct mlx5_core_mr *mkey) 3443{ 3444 struct ifnet *ifp = priv->ifp; 3445 struct mlx5_core_dev *mdev = priv->mdev; 3446 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 3447 void *mkc; 3448 u32 *in; 3449 int err; 3450 3451 in = mlx5_vzalloc(inlen); 3452 if (in == NULL) { 3453 mlx5_en_err(ifp, "failed to allocate inbox\n"); 3454 return (-ENOMEM); 3455 } 3456 3457 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 3458 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA); 3459 MLX5_SET(mkc, mkc, lw, 1); 3460 MLX5_SET(mkc, mkc, lr, 1); 3461 3462 MLX5_SET(mkc, mkc, pd, pdn); 3463 MLX5_SET(mkc, mkc, length64, 1); 3464 MLX5_SET(mkc, mkc, qpn, 0xffffff); 3465 3466 err = mlx5_core_create_mkey(mdev, mkey, in, inlen); 3467 if (err) 3468 mlx5_en_err(ifp, "mlx5_core_create_mkey failed, %d\n", 3469 err); 3470 3471 kvfree(in); 3472 return (err); 3473} 3474 3475static const char *mlx5e_vport_stats_desc[] = { 3476 MLX5E_VPORT_STATS(MLX5E_STATS_DESC) 3477}; 3478 3479static const char *mlx5e_pport_stats_desc[] = { 3480 MLX5E_PPORT_STATS(MLX5E_STATS_DESC) 3481}; 3482 3483static void 3484mlx5e_priv_mtx_init(struct mlx5e_priv *priv) 3485{ 3486 mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); 3487 sx_init(&priv->state_lock, "mlx5state"); 3488 callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); 3489 MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); 3490} 3491 3492static void 3493mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv) 3494{ 3495 mtx_destroy(&priv->async_events_mtx); 3496 sx_destroy(&priv->state_lock); 3497} 3498 3499static int 3500sysctl_firmware(SYSCTL_HANDLER_ARGS) 3501{ 3502 /* 3503 * %d.%d%.d the string format. 3504 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536. 3505 * We need at most 5 chars to store that. 3506 * It also has: two "." and NULL at the end, which means we need 18 3507 * (5*3 + 3) chars at most. 3508 */ 3509 char fw[18]; 3510 struct mlx5e_priv *priv = arg1; 3511 int error; 3512 3513 snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), 3514 fw_rev_sub(priv->mdev)); 3515 error = sysctl_handle_string(oidp, fw, sizeof(fw), req); 3516 return (error); 3517} 3518 3519static void 3520mlx5e_disable_tx_dma(struct mlx5e_channel *ch) 3521{ 3522 int i; 3523 3524 for (i = 0; i < ch->num_tc; i++) 3525 mlx5e_drain_sq(&ch->sq[i]); 3526} 3527 3528static void 3529mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq) 3530{ 3531 3532 sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP); 3533 sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8); 3534 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 3535 sq->doorbell.d64 = 0; 3536} 3537 3538void 3539mlx5e_resume_sq(struct mlx5e_sq *sq) 3540{ 3541 int err; 3542 3543 /* check if already enabled */ 3544 if (READ_ONCE(sq->running) != 0) 3545 return; 3546 3547 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR, 3548 MLX5_SQC_STATE_RST); 3549 if (err != 0) { 3550 mlx5_en_err(sq->ifp, 3551 "mlx5e_modify_sq() from ERR to RST failed: %d\n", err); 3552 } 3553 3554 sq->cc = 0; 3555 sq->pc = 0; 3556 3557 /* reset doorbell prior to moving from RST to RDY */ 3558 mlx5e_reset_sq_doorbell_record(sq); 3559 3560 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, 3561 MLX5_SQC_STATE_RDY); 3562 if (err != 0) { 3563 mlx5_en_err(sq->ifp, 3564 "mlx5e_modify_sq() from RST to RDY failed: %d\n", err); 3565 } 3566 3567 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 3568 WRITE_ONCE(sq->running, 1); 3569} 3570 3571static void 3572mlx5e_enable_tx_dma(struct mlx5e_channel *ch) 3573{ 3574 int i; 3575 3576 for (i = 0; i < ch->num_tc; i++) 3577 mlx5e_resume_sq(&ch->sq[i]); 3578} 3579 3580static void 3581mlx5e_disable_rx_dma(struct mlx5e_channel *ch) 3582{ 3583 struct mlx5e_rq *rq = &ch->rq; 3584 int err; 3585 3586 mtx_lock(&rq->mtx); 3587 rq->enabled = 0; 3588 callout_stop(&rq->watchdog); 3589 mtx_unlock(&rq->mtx); 3590 3591 callout_drain(&rq->watchdog); 3592 3593 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 3594 if (err != 0) { 3595 mlx5_en_err(rq->ifp, 3596 "mlx5e_modify_rq() from RDY to RST failed: %d\n", err); 3597 } 3598 3599 while (!mlx5_wq_ll_is_empty(&rq->wq)) { 3600 msleep(1); 3601 rq->cq.mcq.comp(&rq->cq.mcq); 3602 } 3603 3604 /* 3605 * Transitioning into RST state will allow the FW to track less ERR state queues, 3606 * thus reducing the recv queue flushing time 3607 */ 3608 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST); 3609 if (err != 0) { 3610 mlx5_en_err(rq->ifp, 3611 "mlx5e_modify_rq() from ERR to RST failed: %d\n", err); 3612 } 3613} 3614 3615static void 3616mlx5e_enable_rx_dma(struct mlx5e_channel *ch) 3617{ 3618 struct mlx5e_rq *rq = &ch->rq; 3619 int err; 3620 3621 rq->wq.wqe_ctr = 0; 3622 mlx5_wq_ll_update_db_record(&rq->wq); 3623 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 3624 if (err != 0) { 3625 mlx5_en_err(rq->ifp, 3626 "mlx5e_modify_rq() from RST to RDY failed: %d\n", err); 3627 } 3628 3629 rq->enabled = 1; 3630 3631 rq->cq.mcq.comp(&rq->cq.mcq); 3632} 3633 3634void 3635mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) 3636{ 3637 int i; 3638 3639 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3640 return; 3641 3642 for (i = 0; i < priv->params.num_channels; i++) { 3643 if (value) 3644 mlx5e_disable_tx_dma(&priv->channel[i]); 3645 else 3646 mlx5e_enable_tx_dma(&priv->channel[i]); 3647 } 3648} 3649 3650void 3651mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) 3652{ 3653 int i; 3654 3655 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3656 return; 3657 3658 for (i = 0; i < priv->params.num_channels; i++) { 3659 if (value) 3660 mlx5e_disable_rx_dma(&priv->channel[i]); 3661 else 3662 mlx5e_enable_rx_dma(&priv->channel[i]); 3663 } 3664} 3665 3666static void 3667mlx5e_add_hw_stats(struct mlx5e_priv *priv) 3668{ 3669 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3670 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0, 3671 sysctl_firmware, "A", "HCA firmware version"); 3672 3673 SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3674 OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, 3675 "Board ID"); 3676} 3677 3678static int 3679mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3680{ 3681 struct mlx5e_priv *priv = arg1; 3682 uint8_t temp[MLX5E_MAX_PRIORITY]; 3683 uint32_t tx_pfc; 3684 int err; 3685 int i; 3686 3687 PRIV_LOCK(priv); 3688 3689 tx_pfc = priv->params.tx_priority_flow_control; 3690 3691 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3692 temp[i] = (tx_pfc >> i) & 1; 3693 3694 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3695 if (err || !req->newptr) 3696 goto done; 3697 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3698 if (err) 3699 goto done; 3700 3701 priv->params.tx_priority_flow_control = 0; 3702 3703 /* range check input value */ 3704 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3705 if (temp[i] > 1) { 3706 err = ERANGE; 3707 goto done; 3708 } 3709 priv->params.tx_priority_flow_control |= (temp[i] << i); 3710 } 3711 3712 /* check if update is required */ 3713 if (tx_pfc != priv->params.tx_priority_flow_control) 3714 err = -mlx5e_set_port_pfc(priv); 3715done: 3716 if (err != 0) 3717 priv->params.tx_priority_flow_control= tx_pfc; 3718 PRIV_UNLOCK(priv); 3719 3720 return (err); 3721} 3722 3723static int 3724mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3725{ 3726 struct mlx5e_priv *priv = arg1; 3727 uint8_t temp[MLX5E_MAX_PRIORITY]; 3728 uint32_t rx_pfc; 3729 int err; 3730 int i; 3731 3732 PRIV_LOCK(priv); 3733 3734 rx_pfc = priv->params.rx_priority_flow_control; 3735 3736 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3737 temp[i] = (rx_pfc >> i) & 1; 3738 3739 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3740 if (err || !req->newptr) 3741 goto done; 3742 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3743 if (err) 3744 goto done; 3745 3746 priv->params.rx_priority_flow_control = 0; 3747 3748 /* range check input value */ 3749 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3750 if (temp[i] > 1) { 3751 err = ERANGE; 3752 goto done; 3753 } 3754 priv->params.rx_priority_flow_control |= (temp[i] << i); 3755 } 3756 3757 /* check if update is required */ 3758 if (rx_pfc != priv->params.rx_priority_flow_control) { 3759 err = -mlx5e_set_port_pfc(priv); 3760 if (err == 0) 3761 err = mlx5e_update_buf_lossy(priv); 3762 } 3763done: 3764 if (err != 0) 3765 priv->params.rx_priority_flow_control= rx_pfc; 3766 PRIV_UNLOCK(priv); 3767 3768 return (err); 3769} 3770 3771static void 3772mlx5e_setup_pauseframes(struct mlx5e_priv *priv) 3773{ 3774#if (__FreeBSD_version < 1100000) 3775 char path[96]; 3776#endif 3777 int error; 3778 3779 /* enable pauseframes by default */ 3780 priv->params.tx_pauseframe_control = 1; 3781 priv->params.rx_pauseframe_control = 1; 3782 3783 /* disable ports flow control, PFC, by default */ 3784 priv->params.tx_priority_flow_control = 0; 3785 priv->params.rx_priority_flow_control = 0; 3786 3787#if (__FreeBSD_version < 1100000) 3788 /* compute path for sysctl */ 3789 snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control", 3790 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3791 3792 /* try to fetch tunable, if any */ 3793 TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control); 3794 3795 /* compute path for sysctl */ 3796 snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control", 3797 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3798 3799 /* try to fetch tunable, if any */ 3800 TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control); 3801#endif 3802 3803 /* register pauseframe SYSCTLs */ 3804 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3805 OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN, 3806 &priv->params.tx_pauseframe_control, 0, 3807 "Set to enable TX pause frames. Clear to disable."); 3808 3809 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3810 OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN, 3811 &priv->params.rx_pauseframe_control, 0, 3812 "Set to enable RX pause frames. Clear to disable."); 3813 3814 /* register priority flow control, PFC, SYSCTLs */ 3815 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3816 OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3817 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU", 3818 "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable."); 3819 3820 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3821 OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3822 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU", 3823 "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable."); 3824 3825 PRIV_LOCK(priv); 3826 3827 /* range check */ 3828 priv->params.tx_pauseframe_control = 3829 priv->params.tx_pauseframe_control ? 1 : 0; 3830 priv->params.rx_pauseframe_control = 3831 priv->params.rx_pauseframe_control ? 1 : 0; 3832 3833 /* update firmware */ 3834 error = mlx5e_set_port_pause_and_pfc(priv); 3835 if (error == -EINVAL) { 3836 mlx5_en_err(priv->ifp, 3837 "Global pauseframes must be disabled before enabling PFC.\n"); 3838 priv->params.rx_priority_flow_control = 0; 3839 priv->params.tx_priority_flow_control = 0; 3840 3841 /* update firmware */ 3842 (void) mlx5e_set_port_pause_and_pfc(priv); 3843 } 3844 PRIV_UNLOCK(priv); 3845} 3846 3847static void * 3848mlx5e_create_ifp(struct mlx5_core_dev *mdev) 3849{ 3850 struct ifnet *ifp; 3851 struct mlx5e_priv *priv; 3852 u8 dev_addr[ETHER_ADDR_LEN] __aligned(4); 3853 u8 connector_type; 3854 struct sysctl_oid_list *child; 3855 int ncv = mdev->priv.eq_table.num_comp_vectors; 3856 char unit[16]; 3857 int err; 3858 int i,j; 3859 u32 eth_proto_cap; 3860 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 3861 bool ext = 0; 3862 u32 speeds_num; 3863 struct media media_entry = {}; 3864 3865 if (mlx5e_check_required_hca_cap(mdev)) { 3866 mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n"); 3867 return (NULL); 3868 } 3869 /* 3870 * Try to allocate the priv and make room for worst-case 3871 * number of channel structures: 3872 */ 3873 priv = malloc(sizeof(*priv) + 3874 (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors), 3875 M_MLX5EN, M_WAITOK | M_ZERO); 3876 mlx5e_priv_mtx_init(priv); 3877 3878 ifp = priv->ifp = if_alloc(IFT_ETHER); 3879 if (ifp == NULL) { 3880 mlx5_core_err(mdev, "if_alloc() failed\n"); 3881 goto err_free_priv; 3882 } 3883 ifp->if_softc = priv; 3884 if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev)); 3885 ifp->if_mtu = ETHERMTU; 3886 ifp->if_init = mlx5e_open; 3887 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3888 ifp->if_ioctl = mlx5e_ioctl; 3889 ifp->if_transmit = mlx5e_xmit; 3890 ifp->if_qflush = if_qflush; 3891#if (__FreeBSD_version >= 1100000) 3892 ifp->if_get_counter = mlx5e_get_counter; 3893#endif 3894 ifp->if_snd.ifq_maxlen = ifqmaxlen; 3895 /* 3896 * Set driver features 3897 */ 3898 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 3899 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 3900 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 3901 ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 3902 ifp->if_capabilities |= IFCAP_LRO; 3903 ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO; 3904 ifp->if_capabilities |= IFCAP_HWSTATS; 3905 3906 /* set TSO limits so that we don't have to drop TX packets */ 3907 ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 3908 ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */; 3909 ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE; 3910 3911 ifp->if_capenable = ifp->if_capabilities; 3912 ifp->if_hwassist = 0; 3913 if (ifp->if_capenable & IFCAP_TSO) 3914 ifp->if_hwassist |= CSUM_TSO; 3915 if (ifp->if_capenable & IFCAP_TXCSUM) 3916 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3917 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 3918 ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3919 3920 /* ifnet sysctl tree */ 3921 sysctl_ctx_init(&priv->sysctl_ctx); 3922 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), 3923 OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name"); 3924 if (priv->sysctl_ifnet == NULL) { 3925 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3926 goto err_free_sysctl; 3927 } 3928 snprintf(unit, sizeof(unit), "%d", ifp->if_dunit); 3929 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3930 OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit"); 3931 if (priv->sysctl_ifnet == NULL) { 3932 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3933 goto err_free_sysctl; 3934 } 3935 3936 /* HW sysctl tree */ 3937 child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev)); 3938 priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, 3939 OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw"); 3940 if (priv->sysctl_hw == NULL) { 3941 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3942 goto err_free_sysctl; 3943 } 3944 3945 err = mlx5e_build_ifp_priv(mdev, priv, ncv); 3946 if (err) { 3947 mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err); 3948 goto err_free_sysctl; 3949 } 3950 3951 /* reuse mlx5core's watchdog workqueue */ 3952 priv->wq = mdev->priv.health.wq_watchdog; 3953 3954 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); 3955 if (err) { 3956 mlx5_en_err(ifp, "mlx5_alloc_map_uar failed, %d\n", err); 3957 goto err_free_wq; 3958 } 3959 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 3960 if (err) { 3961 mlx5_en_err(ifp, "mlx5_core_alloc_pd failed, %d\n", err); 3962 goto err_unmap_free_uar; 3963 } 3964 err = mlx5_alloc_transport_domain(mdev, &priv->tdn); 3965 if (err) { 3966 mlx5_en_err(ifp, 3967 "mlx5_alloc_transport_domain failed, %d\n", err); 3968 goto err_dealloc_pd; 3969 } 3970 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); 3971 if (err) { 3972 mlx5_en_err(ifp, "mlx5e_create_mkey failed, %d\n", err); 3973 goto err_dealloc_transport_domain; 3974 } 3975 mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); 3976 3977 /* check if we should generate a random MAC address */ 3978 if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && 3979 is_zero_ether_addr(dev_addr)) { 3980 random_ether_addr(dev_addr); 3981 mlx5_en_err(ifp, "Assigned random MAC address\n"); 3982 } 3983 3984 /* set default MTU */ 3985 mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu); 3986 3987 /* Set default media status */ 3988 priv->media_status_last = IFM_AVALID; 3989 priv->media_active_last = IFM_ETHER | IFM_AUTO | 3990 IFM_ETH_RXPAUSE | IFM_FDX; 3991 3992 /* setup default pauseframes configuration */ 3993 mlx5e_setup_pauseframes(priv); 3994 3995 /* Setup supported medias */ 3996 //TODO: If we failed to query ptys is it ok to proceed?? 3997 if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) { 3998 ext = MLX5_CAP_PCAM_FEATURE(mdev, 3999 ptys_extended_ethernet); 4000 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 4001 eth_proto_capability); 4002 if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) 4003 connector_type = MLX5_GET(ptys_reg, out, 4004 connector_type); 4005 } else { 4006 eth_proto_cap = 0; 4007 mlx5_en_err(ifp, "Query port media capability failed, %d\n", err); 4008 } 4009 4010 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 4011 mlx5e_media_change, mlx5e_media_status); 4012 4013 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER; 4014 for (i = 0; i != speeds_num; i++) { 4015 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 4016 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 4017 mlx5e_mode_table[i][j]; 4018 if (media_entry.baudrate == 0) 4019 continue; 4020 if (MLX5E_PROT_MASK(i) & eth_proto_cap) { 4021 ifmedia_add(&priv->media, 4022 media_entry.subtype | 4023 IFM_ETHER, 0, NULL); 4024 ifmedia_add(&priv->media, 4025 media_entry.subtype | 4026 IFM_ETHER | IFM_FDX | 4027 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4028 } 4029 } 4030 } 4031 4032 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 4033 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4034 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4035 4036 /* Set autoselect by default */ 4037 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4038 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 4039 ether_ifattach(ifp, dev_addr); 4040 4041 /* Register for VLAN events */ 4042 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 4043 mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 4044 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 4045 mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 4046 4047 /* Link is down by default */ 4048 if_link_state_change(ifp, LINK_STATE_DOWN); 4049 4050 mlx5e_enable_async_events(priv); 4051 4052 mlx5e_add_hw_stats(priv); 4053 4054 mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4055 "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM, 4056 priv->stats.vport.arg); 4057 4058 mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4059 "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM, 4060 priv->stats.pport.arg); 4061 4062 mlx5e_create_ethtool(priv); 4063 4064 mtx_lock(&priv->async_events_mtx); 4065 mlx5e_update_stats(priv); 4066 mtx_unlock(&priv->async_events_mtx); 4067 4068 return (priv); 4069 4070err_dealloc_transport_domain: 4071 mlx5_dealloc_transport_domain(mdev, priv->tdn); 4072 4073err_dealloc_pd: 4074 mlx5_core_dealloc_pd(mdev, priv->pdn); 4075 4076err_unmap_free_uar: 4077 mlx5_unmap_free_uar(mdev, &priv->cq_uar); 4078 4079err_free_wq: 4080 flush_workqueue(priv->wq); 4081 4082err_free_sysctl: 4083 sysctl_ctx_free(&priv->sysctl_ctx); 4084 if (priv->sysctl_debug) 4085 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4086 if_free(ifp); 4087 4088err_free_priv: 4089 mlx5e_priv_mtx_destroy(priv); 4090 free(priv, M_MLX5EN); 4091 return (NULL); 4092} 4093 4094static void 4095mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv) 4096{ 4097 struct mlx5e_priv *priv = vpriv; 4098 struct ifnet *ifp = priv->ifp; 4099 4100 /* don't allow more IOCTLs */ 4101 priv->gone = 1; 4102 4103 /* XXX wait a bit to allow IOCTL handlers to complete */ 4104 pause("W", hz); 4105 4106 /* stop watchdog timer */ 4107 callout_drain(&priv->watchdog); 4108 4109 if (priv->vlan_attach != NULL) 4110 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 4111 if (priv->vlan_detach != NULL) 4112 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 4113 4114 /* make sure device gets closed */ 4115 PRIV_LOCK(priv); 4116 mlx5e_close_locked(ifp); 4117 PRIV_UNLOCK(priv); 4118 4119 /* unregister device */ 4120 ifmedia_removeall(&priv->media); 4121 ether_ifdetach(ifp); 4122 if_free(ifp); 4123 4124 /* destroy all remaining sysctl nodes */ 4125 sysctl_ctx_free(&priv->stats.vport.ctx); 4126 sysctl_ctx_free(&priv->stats.pport.ctx); 4127 if (priv->sysctl_debug) 4128 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4129 sysctl_ctx_free(&priv->sysctl_ctx); 4130 4131 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 4132 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); 4133 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 4134 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 4135 mlx5e_disable_async_events(priv); 4136 flush_workqueue(priv->wq); 4137 mlx5e_priv_mtx_destroy(priv); 4138 free(priv, M_MLX5EN); 4139} 4140 4141static void * 4142mlx5e_get_ifp(void *vpriv) 4143{ 4144 struct mlx5e_priv *priv = vpriv; 4145 4146 return (priv->ifp); 4147} 4148 4149static struct mlx5_interface mlx5e_interface = { 4150 .add = mlx5e_create_ifp, 4151 .remove = mlx5e_destroy_ifp, 4152 .event = mlx5e_async_event, 4153 .protocol = MLX5_INTERFACE_PROTOCOL_ETH, 4154 .get_dev = mlx5e_get_ifp, 4155}; 4156 4157void 4158mlx5e_init(void) 4159{ 4160 mlx5_register_interface(&mlx5e_interface); 4161} 4162 4163void 4164mlx5e_cleanup(void) 4165{ 4166 mlx5_unregister_interface(&mlx5e_interface); 4167} 4168 4169static void 4170mlx5e_show_version(void __unused *arg) 4171{ 4172 4173 printf("%s", mlx5e_version); 4174} 4175SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL); 4176 4177module_init_order(mlx5e_init, SI_ORDER_THIRD); 4178module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD); 4179 4180#if (__FreeBSD_version >= 1100000) 4181MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1); 4182#endif 4183MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1); 4184MODULE_VERSION(mlx5en, 1); 4185