mlx5_en_main.c revision 359859
1/*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 359859 2020-04-13 09:09:28Z hselasky $ 26 */ 27 28#include "en.h" 29 30#include <sys/sockio.h> 31#include <machine/atomic.h> 32 33#ifndef ETH_DRIVER_VERSION 34#define ETH_DRIVER_VERSION "3.5.2" 35#endif 36#define DRIVER_RELDATE "September 2019" 37 38static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver " 39 ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 40 41static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs); 42 43struct mlx5e_channel_param { 44 struct mlx5e_rq_param rq; 45 struct mlx5e_sq_param sq; 46 struct mlx5e_cq_param rx_cq; 47 struct mlx5e_cq_param tx_cq; 48}; 49 50struct media { 51 u32 subtype; 52 u64 baudrate; 53}; 54 55static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 56 57 [MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = { 58 .subtype = IFM_1000_CX_SGMII, 59 .baudrate = IF_Mbps(1000ULL), 60 }, 61 [MLX5E_1000BASE_KX][MLX5E_KX] = { 62 .subtype = IFM_1000_KX, 63 .baudrate = IF_Mbps(1000ULL), 64 }, 65 [MLX5E_10GBASE_CX4][MLX5E_CX4] = { 66 .subtype = IFM_10G_CX4, 67 .baudrate = IF_Gbps(10ULL), 68 }, 69 [MLX5E_10GBASE_KX4][MLX5E_KX4] = { 70 .subtype = IFM_10G_KX4, 71 .baudrate = IF_Gbps(10ULL), 72 }, 73 [MLX5E_10GBASE_KR][MLX5E_KR] = { 74 .subtype = IFM_10G_KR, 75 .baudrate = IF_Gbps(10ULL), 76 }, 77 [MLX5E_20GBASE_KR2][MLX5E_KR2] = { 78 .subtype = IFM_20G_KR2, 79 .baudrate = IF_Gbps(20ULL), 80 }, 81 [MLX5E_40GBASE_CR4][MLX5E_CR4] = { 82 .subtype = IFM_40G_CR4, 83 .baudrate = IF_Gbps(40ULL), 84 }, 85 [MLX5E_40GBASE_KR4][MLX5E_KR4] = { 86 .subtype = IFM_40G_KR4, 87 .baudrate = IF_Gbps(40ULL), 88 }, 89 [MLX5E_56GBASE_R4][MLX5E_R] = { 90 .subtype = IFM_56G_R4, 91 .baudrate = IF_Gbps(56ULL), 92 }, 93 [MLX5E_10GBASE_CR][MLX5E_CR1] = { 94 .subtype = IFM_10G_CR1, 95 .baudrate = IF_Gbps(10ULL), 96 }, 97 [MLX5E_10GBASE_SR][MLX5E_SR] = { 98 .subtype = IFM_10G_SR, 99 .baudrate = IF_Gbps(10ULL), 100 }, 101 [MLX5E_10GBASE_ER_LR][MLX5E_ER] = { 102 .subtype = IFM_10G_ER, 103 .baudrate = IF_Gbps(10ULL), 104 }, 105 [MLX5E_10GBASE_ER_LR][MLX5E_LR] = { 106 .subtype = IFM_10G_LR, 107 .baudrate = IF_Gbps(10ULL), 108 }, 109 [MLX5E_40GBASE_SR4][MLX5E_SR4] = { 110 .subtype = IFM_40G_SR4, 111 .baudrate = IF_Gbps(40ULL), 112 }, 113 [MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = { 114 .subtype = IFM_40G_LR4, 115 .baudrate = IF_Gbps(40ULL), 116 }, 117 [MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = { 118 .subtype = IFM_40G_ER4, 119 .baudrate = IF_Gbps(40ULL), 120 }, 121 [MLX5E_100GBASE_CR4][MLX5E_CR4] = { 122 .subtype = IFM_100G_CR4, 123 .baudrate = IF_Gbps(100ULL), 124 }, 125 [MLX5E_100GBASE_SR4][MLX5E_SR4] = { 126 .subtype = IFM_100G_SR4, 127 .baudrate = IF_Gbps(100ULL), 128 }, 129 [MLX5E_100GBASE_KR4][MLX5E_KR4] = { 130 .subtype = IFM_100G_KR4, 131 .baudrate = IF_Gbps(100ULL), 132 }, 133 [MLX5E_100GBASE_LR4][MLX5E_LR4] = { 134 .subtype = IFM_100G_LR4, 135 .baudrate = IF_Gbps(100ULL), 136 }, 137 [MLX5E_100BASE_TX][MLX5E_TX] = { 138 .subtype = IFM_100_TX, 139 .baudrate = IF_Mbps(100ULL), 140 }, 141 [MLX5E_1000BASE_T][MLX5E_T] = { 142 .subtype = IFM_1000_T, 143 .baudrate = IF_Mbps(1000ULL), 144 }, 145 [MLX5E_10GBASE_T][MLX5E_T] = { 146 .subtype = IFM_10G_T, 147 .baudrate = IF_Gbps(10ULL), 148 }, 149 [MLX5E_25GBASE_CR][MLX5E_CR] = { 150 .subtype = IFM_25G_CR, 151 .baudrate = IF_Gbps(25ULL), 152 }, 153 [MLX5E_25GBASE_KR][MLX5E_KR] = { 154 .subtype = IFM_25G_KR, 155 .baudrate = IF_Gbps(25ULL), 156 }, 157 [MLX5E_25GBASE_SR][MLX5E_SR] = { 158 .subtype = IFM_25G_SR, 159 .baudrate = IF_Gbps(25ULL), 160 }, 161 [MLX5E_50GBASE_CR2][MLX5E_CR2] = { 162 .subtype = IFM_50G_CR2, 163 .baudrate = IF_Gbps(50ULL), 164 }, 165 [MLX5E_50GBASE_KR2][MLX5E_KR2] = { 166 .subtype = IFM_50G_KR2, 167 .baudrate = IF_Gbps(50ULL), 168 }, 169}; 170 171static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 172 [MLX5E_SGMII_100M][MLX5E_SGMII] = { 173 .subtype = IFM_100_SGMII, 174 .baudrate = IF_Mbps(100), 175 }, 176 [MLX5E_1000BASE_X_SGMII][MLX5E_KX] = { 177 .subtype = IFM_1000_KX, 178 .baudrate = IF_Mbps(1000), 179 }, 180 [MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = { 181 .subtype = IFM_1000_CX_SGMII, 182 .baudrate = IF_Mbps(1000), 183 }, 184 [MLX5E_1000BASE_X_SGMII][MLX5E_CX] = { 185 .subtype = IFM_1000_CX, 186 .baudrate = IF_Mbps(1000), 187 }, 188 [MLX5E_1000BASE_X_SGMII][MLX5E_LX] = { 189 .subtype = IFM_1000_LX, 190 .baudrate = IF_Mbps(1000), 191 }, 192 [MLX5E_1000BASE_X_SGMII][MLX5E_SX] = { 193 .subtype = IFM_1000_SX, 194 .baudrate = IF_Mbps(1000), 195 }, 196 [MLX5E_1000BASE_X_SGMII][MLX5E_T] = { 197 .subtype = IFM_1000_T, 198 .baudrate = IF_Mbps(1000), 199 }, 200 [MLX5E_5GBASE_R][MLX5E_T] = { 201 .subtype = IFM_5000_T, 202 .baudrate = IF_Mbps(5000), 203 }, 204 [MLX5E_5GBASE_R][MLX5E_KR] = { 205 .subtype = IFM_5000_KR, 206 .baudrate = IF_Mbps(5000), 207 }, 208 [MLX5E_5GBASE_R][MLX5E_KR1] = { 209 .subtype = IFM_5000_KR1, 210 .baudrate = IF_Mbps(5000), 211 }, 212 [MLX5E_5GBASE_R][MLX5E_KR_S] = { 213 .subtype = IFM_5000_KR_S, 214 .baudrate = IF_Mbps(5000), 215 }, 216 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = { 217 .subtype = IFM_10G_ER, 218 .baudrate = IF_Gbps(10ULL), 219 }, 220 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = { 221 .subtype = IFM_10G_KR, 222 .baudrate = IF_Gbps(10ULL), 223 }, 224 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = { 225 .subtype = IFM_10G_LR, 226 .baudrate = IF_Gbps(10ULL), 227 }, 228 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = { 229 .subtype = IFM_10G_SR, 230 .baudrate = IF_Gbps(10ULL), 231 }, 232 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = { 233 .subtype = IFM_10G_T, 234 .baudrate = IF_Gbps(10ULL), 235 }, 236 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = { 237 .subtype = IFM_10G_AOC, 238 .baudrate = IF_Gbps(10ULL), 239 }, 240 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = { 241 .subtype = IFM_10G_CR1, 242 .baudrate = IF_Gbps(10ULL), 243 }, 244 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = { 245 .subtype = IFM_40G_CR4, 246 .baudrate = IF_Gbps(40ULL), 247 }, 248 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = { 249 .subtype = IFM_40G_KR4, 250 .baudrate = IF_Gbps(40ULL), 251 }, 252 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = { 253 .subtype = IFM_40G_LR4, 254 .baudrate = IF_Gbps(40ULL), 255 }, 256 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = { 257 .subtype = IFM_40G_SR4, 258 .baudrate = IF_Gbps(40ULL), 259 }, 260 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = { 261 .subtype = IFM_40G_ER4, 262 .baudrate = IF_Gbps(40ULL), 263 }, 264 265 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = { 266 .subtype = IFM_25G_CR, 267 .baudrate = IF_Gbps(25ULL), 268 }, 269 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = { 270 .subtype = IFM_25G_KR, 271 .baudrate = IF_Gbps(25ULL), 272 }, 273 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = { 274 .subtype = IFM_25G_SR, 275 .baudrate = IF_Gbps(25ULL), 276 }, 277 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = { 278 .subtype = IFM_25G_ACC, 279 .baudrate = IF_Gbps(25ULL), 280 }, 281 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = { 282 .subtype = IFM_25G_AOC, 283 .baudrate = IF_Gbps(25ULL), 284 }, 285 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = { 286 .subtype = IFM_25G_CR1, 287 .baudrate = IF_Gbps(25ULL), 288 }, 289 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = { 290 .subtype = IFM_25G_CR_S, 291 .baudrate = IF_Gbps(25ULL), 292 }, 293 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = { 294 .subtype = IFM_5000_KR1, 295 .baudrate = IF_Gbps(25ULL), 296 }, 297 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = { 298 .subtype = IFM_25G_KR_S, 299 .baudrate = IF_Gbps(25ULL), 300 }, 301 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = { 302 .subtype = IFM_25G_LR, 303 .baudrate = IF_Gbps(25ULL), 304 }, 305 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = { 306 .subtype = IFM_25G_T, 307 .baudrate = IF_Gbps(25ULL), 308 }, 309 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = { 310 .subtype = IFM_50G_CR2, 311 .baudrate = IF_Gbps(50ULL), 312 }, 313 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = { 314 .subtype = IFM_50G_KR2, 315 .baudrate = IF_Gbps(50ULL), 316 }, 317 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = { 318 .subtype = IFM_50G_SR2, 319 .baudrate = IF_Gbps(50ULL), 320 }, 321 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = { 322 .subtype = IFM_50G_LR2, 323 .baudrate = IF_Gbps(50ULL), 324 }, 325 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = { 326 .subtype = IFM_50G_LR, 327 .baudrate = IF_Gbps(50ULL), 328 }, 329 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = { 330 .subtype = IFM_50G_SR, 331 .baudrate = IF_Gbps(50ULL), 332 }, 333 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = { 334 .subtype = IFM_50G_CP, 335 .baudrate = IF_Gbps(50ULL), 336 }, 337 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = { 338 .subtype = IFM_50G_FR, 339 .baudrate = IF_Gbps(50ULL), 340 }, 341 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = { 342 .subtype = IFM_50G_KR_PAM4, 343 .baudrate = IF_Gbps(50ULL), 344 }, 345 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = { 346 .subtype = IFM_100G_CR4, 347 .baudrate = IF_Gbps(100ULL), 348 }, 349 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = { 350 .subtype = IFM_100G_KR4, 351 .baudrate = IF_Gbps(100ULL), 352 }, 353 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = { 354 .subtype = IFM_100G_LR4, 355 .baudrate = IF_Gbps(100ULL), 356 }, 357 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = { 358 .subtype = IFM_100G_SR4, 359 .baudrate = IF_Gbps(100ULL), 360 }, 361 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = { 362 .subtype = IFM_100G_SR2, 363 .baudrate = IF_Gbps(100ULL), 364 }, 365 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = { 366 .subtype = IFM_100G_CP2, 367 .baudrate = IF_Gbps(100ULL), 368 }, 369 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = { 370 .subtype = IFM_100G_KR2_PAM4, 371 .baudrate = IF_Gbps(100ULL), 372 }, 373 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = { 374 .subtype = IFM_200G_DR4, 375 .baudrate = IF_Gbps(200ULL), 376 }, 377 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = { 378 .subtype = IFM_200G_LR4, 379 .baudrate = IF_Gbps(200ULL), 380 }, 381 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = { 382 .subtype = IFM_200G_SR4, 383 .baudrate = IF_Gbps(200ULL), 384 }, 385 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = { 386 .subtype = IFM_200G_FR4, 387 .baudrate = IF_Gbps(200ULL), 388 }, 389 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = { 390 .subtype = IFM_200G_CR4_PAM4, 391 .baudrate = IF_Gbps(200ULL), 392 }, 393 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = { 394 .subtype = IFM_200G_KR4_PAM4, 395 .baudrate = IF_Gbps(200ULL), 396 }, 397}; 398 399MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet"); 400 401static void 402mlx5e_update_carrier(struct mlx5e_priv *priv) 403{ 404 struct mlx5_core_dev *mdev = priv->mdev; 405 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 406 u32 eth_proto_oper; 407 int error; 408 u8 port_state; 409 u8 is_er_type; 410 u8 i, j; 411 bool ext; 412 struct media media_entry = {}; 413 414 port_state = mlx5_query_vport_state(mdev, 415 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 416 417 if (port_state == VPORT_STATE_UP) { 418 priv->media_status_last |= IFM_ACTIVE; 419 } else { 420 priv->media_status_last &= ~IFM_ACTIVE; 421 priv->media_active_last = IFM_ETHER; 422 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 423 return; 424 } 425 426 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 427 MLX5_PTYS_EN, 1); 428 if (error) { 429 priv->media_active_last = IFM_ETHER; 430 priv->ifp->if_baudrate = 1; 431 mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n", 432 error); 433 return; 434 } 435 436 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 437 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 438 eth_proto_oper); 439 440 i = ilog2(eth_proto_oper); 441 442 for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) { 443 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 444 mlx5e_mode_table[i][j]; 445 if (media_entry.baudrate != 0) 446 break; 447 } 448 449 if (media_entry.subtype == 0) { 450 mlx5_en_err(priv->ifp, 451 "Could not find operational media subtype\n"); 452 return; 453 } 454 455 switch (media_entry.subtype) { 456 case IFM_10G_ER: 457 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 458 if (error != 0) { 459 mlx5_en_err(priv->ifp, 460 "query port pddr failed: %d\n", error); 461 } 462 if (error != 0 || is_er_type == 0) 463 media_entry.subtype = IFM_10G_LR; 464 break; 465 case IFM_40G_LR4: 466 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 467 if (error != 0) { 468 mlx5_en_err(priv->ifp, 469 "query port pddr failed: %d\n", error); 470 } 471 if (error == 0 && is_er_type != 0) 472 media_entry.subtype = IFM_40G_ER4; 473 break; 474 } 475 priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX; 476 priv->ifp->if_baudrate = media_entry.baudrate; 477 478 if_link_state_change(priv->ifp, LINK_STATE_UP); 479} 480 481static void 482mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 483{ 484 struct mlx5e_priv *priv = dev->if_softc; 485 486 ifmr->ifm_status = priv->media_status_last; 487 ifmr->ifm_active = priv->media_active_last | 488 (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | 489 (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); 490 491} 492 493static u32 494mlx5e_find_link_mode(u32 subtype, bool ext) 495{ 496 u32 i; 497 u32 j; 498 u32 link_mode = 0; 499 u32 speeds_num = 0; 500 struct media media_entry = {}; 501 502 switch (subtype) { 503 case IFM_10G_LR: 504 subtype = IFM_10G_ER; 505 break; 506 case IFM_40G_ER4: 507 subtype = IFM_40G_LR4; 508 break; 509 } 510 511 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : 512 MLX5E_LINK_SPEEDS_NUMBER; 513 514 for (i = 0; i != speeds_num; i++) { 515 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 516 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 517 mlx5e_mode_table[i][j]; 518 if (media_entry.baudrate == 0) 519 continue; 520 if (media_entry.subtype == subtype) { 521 link_mode |= MLX5E_PROT_MASK(i); 522 } 523 } 524 } 525 526 return (link_mode); 527} 528 529static int 530mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) 531{ 532 return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, 533 priv->params.rx_pauseframe_control, 534 priv->params.tx_pauseframe_control, 535 priv->params.rx_priority_flow_control, 536 priv->params.tx_priority_flow_control)); 537} 538 539static int 540mlx5e_set_port_pfc(struct mlx5e_priv *priv) 541{ 542 int error; 543 544 if (priv->gone != 0) { 545 error = -ENXIO; 546 } else if (priv->params.rx_pauseframe_control || 547 priv->params.tx_pauseframe_control) { 548 mlx5_en_err(priv->ifp, 549 "Global pauseframes must be disabled before enabling PFC.\n"); 550 error = -EINVAL; 551 } else { 552 error = mlx5e_set_port_pause_and_pfc(priv); 553 } 554 return (error); 555} 556 557static int 558mlx5e_media_change(struct ifnet *dev) 559{ 560 struct mlx5e_priv *priv = dev->if_softc; 561 struct mlx5_core_dev *mdev = priv->mdev; 562 u32 eth_proto_cap; 563 u32 link_mode; 564 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 565 int was_opened; 566 int locked; 567 int error; 568 bool ext; 569 570 locked = PRIV_LOCKED(priv); 571 if (!locked) 572 PRIV_LOCK(priv); 573 574 if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { 575 error = EINVAL; 576 goto done; 577 } 578 579 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 580 MLX5_PTYS_EN, 1); 581 if (error != 0) { 582 mlx5_en_err(dev, "Query port media capability failed\n"); 583 goto done; 584 } 585 586 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 587 link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext); 588 589 /* query supported capabilities */ 590 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 591 eth_proto_capability); 592 593 /* check for autoselect */ 594 if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { 595 link_mode = eth_proto_cap; 596 if (link_mode == 0) { 597 mlx5_en_err(dev, "Port media capability is zero\n"); 598 error = EINVAL; 599 goto done; 600 } 601 } else { 602 link_mode = link_mode & eth_proto_cap; 603 if (link_mode == 0) { 604 mlx5_en_err(dev, "Not supported link mode requested\n"); 605 error = EINVAL; 606 goto done; 607 } 608 } 609 if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 610 /* check if PFC is enabled */ 611 if (priv->params.rx_priority_flow_control || 612 priv->params.tx_priority_flow_control) { 613 mlx5_en_err(dev, "PFC must be disabled before enabling global pauseframes.\n"); 614 error = EINVAL; 615 goto done; 616 } 617 } 618 /* update pauseframe control bits */ 619 priv->params.rx_pauseframe_control = 620 (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; 621 priv->params.tx_pauseframe_control = 622 (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; 623 624 /* check if device is opened */ 625 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 626 627 /* reconfigure the hardware */ 628 mlx5_set_port_status(mdev, MLX5_PORT_DOWN); 629 mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext); 630 error = -mlx5e_set_port_pause_and_pfc(priv); 631 if (was_opened) 632 mlx5_set_port_status(mdev, MLX5_PORT_UP); 633 634done: 635 if (!locked) 636 PRIV_UNLOCK(priv); 637 return (error); 638} 639 640static void 641mlx5e_update_carrier_work(struct work_struct *work) 642{ 643 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 644 update_carrier_work); 645 646 PRIV_LOCK(priv); 647 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 648 mlx5e_update_carrier(priv); 649 PRIV_UNLOCK(priv); 650} 651 652#define MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f) \ 653 s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c); 654 655#define MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f) \ 656 s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c); 657 658static void 659mlx5e_update_pcie_counters(struct mlx5e_priv *priv) 660{ 661 struct mlx5_core_dev *mdev = priv->mdev; 662 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 663 const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 664 void *out; 665 void *in; 666 int err; 667 668 /* allocate firmware request structures */ 669 in = mlx5_vzalloc(sz); 670 out = mlx5_vzalloc(sz); 671 if (in == NULL || out == NULL) 672 goto free_out; 673 674 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 675 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 676 if (err != 0) 677 goto free_out; 678 679 MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64) 680 MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 681 682 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); 683 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 684 if (err != 0) 685 goto free_out; 686 687 MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 688 689 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP); 690 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 691 if (err != 0) 692 goto free_out; 693 694 MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 695 696free_out: 697 /* free firmware request structures */ 698 kvfree(in); 699 kvfree(out); 700} 701 702/* 703 * This function reads the physical port counters from the firmware 704 * using a pre-defined layout defined by various MLX5E_PPORT_XXX() 705 * macros. The output is converted from big-endian 64-bit values into 706 * host endian ones and stored in the "priv->stats.pport" structure. 707 */ 708static void 709mlx5e_update_pport_counters(struct mlx5e_priv *priv) 710{ 711 struct mlx5_core_dev *mdev = priv->mdev; 712 struct mlx5e_pport_stats *s = &priv->stats.pport; 713 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 714 u32 *in; 715 u32 *out; 716 const u64 *ptr; 717 unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 718 unsigned x; 719 unsigned y; 720 unsigned z; 721 722 /* allocate firmware request structures */ 723 in = mlx5_vzalloc(sz); 724 out = mlx5_vzalloc(sz); 725 if (in == NULL || out == NULL) 726 goto free_out; 727 728 /* 729 * Get pointer to the 64-bit counter set which is located at a 730 * fixed offset in the output firmware request structure: 731 */ 732 ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); 733 734 MLX5_SET(ppcnt_reg, in, local_port, 1); 735 736 /* read IEEE802_3 counter group using predefined counter layout */ 737 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 738 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 739 for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM; 740 x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) 741 s->arg[y] = be64toh(ptr[x]); 742 743 /* read RFC2819 counter group using predefined counter layout */ 744 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 745 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 746 for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) 747 s->arg[y] = be64toh(ptr[x]); 748 749 for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM + 750 MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) 751 s_debug->arg[y] = be64toh(ptr[x]); 752 753 /* read RFC2863 counter group using predefined counter layout */ 754 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 755 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 756 for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) 757 s_debug->arg[y] = be64toh(ptr[x]); 758 759 /* read physical layer stats counter group using predefined counter layout */ 760 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 761 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 762 for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) 763 s_debug->arg[y] = be64toh(ptr[x]); 764 765 /* read Extended Ethernet counter group using predefined counter layout */ 766 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 767 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 768 for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++) 769 s_debug->arg[y] = be64toh(ptr[x]); 770 771 /* read Extended Statistical Group */ 772 if (MLX5_CAP_GEN(mdev, pcam_reg) && 773 MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) && 774 MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) { 775 /* read Extended Statistical counter group using predefined counter layout */ 776 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 777 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 778 779 for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++) 780 s_debug->arg[y] = be64toh(ptr[x]); 781 } 782 783 /* read PCIE counters */ 784 mlx5e_update_pcie_counters(priv); 785 786 /* read per-priority counters */ 787 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 788 789 /* iterate all the priorities */ 790 for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) { 791 MLX5_SET(ppcnt_reg, in, prio_tc, z); 792 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 793 794 /* read per priority stats counter group using predefined counter layout */ 795 for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM / 796 MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++) 797 s->arg[y] = be64toh(ptr[x]); 798 } 799 800free_out: 801 /* free firmware request structures */ 802 kvfree(in); 803 kvfree(out); 804} 805 806static void 807mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) 808{ 809 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {}; 810 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 811 812 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) 813 return; 814 815 MLX5_SET(query_vnic_env_in, in, opcode, 816 MLX5_CMD_OP_QUERY_VNIC_ENV); 817 MLX5_SET(query_vnic_env_in, in, op_mod, 0); 818 MLX5_SET(query_vnic_env_in, in, other_vport, 0); 819 820 if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0) 821 return; 822 823 priv->stats.vport.rx_steer_missed_packets = 824 MLX5_GET64(query_vnic_env_out, out, 825 vport_env.nic_receive_steering_discard); 826} 827 828/* 829 * This function is called regularly to collect all statistics 830 * counters from the firmware. The values can be viewed through the 831 * sysctl interface. Execution is serialized using the priv's global 832 * configuration lock. 833 */ 834static void 835mlx5e_update_stats_locked(struct mlx5e_priv *priv) 836{ 837 struct mlx5_core_dev *mdev = priv->mdev; 838 struct mlx5e_vport_stats *s = &priv->stats.vport; 839 struct mlx5e_sq_stats *sq_stats; 840#if (__FreeBSD_version < 1100000) 841 struct ifnet *ifp = priv->ifp; 842#endif 843 844 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; 845 u32 *out; 846 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 847 u64 tso_packets = 0; 848 u64 tso_bytes = 0; 849 u64 tx_queue_dropped = 0; 850 u64 tx_defragged = 0; 851 u64 tx_offload_none = 0; 852 u64 lro_packets = 0; 853 u64 lro_bytes = 0; 854 u64 sw_lro_queued = 0; 855 u64 sw_lro_flushed = 0; 856 u64 rx_csum_none = 0; 857 u64 rx_wqe_err = 0; 858 u64 rx_packets = 0; 859 u64 rx_bytes = 0; 860 u32 rx_out_of_buffer = 0; 861 int error; 862 int i; 863 int j; 864 865 out = mlx5_vzalloc(outlen); 866 if (out == NULL) 867 goto free_out; 868 869 /* Collect firts the SW counters and then HW for consistency */ 870 for (i = 0; i < priv->params.num_channels; i++) { 871 struct mlx5e_channel *pch = priv->channel + i; 872 struct mlx5e_rq *rq = &pch->rq; 873 struct mlx5e_rq_stats *rq_stats = &pch->rq.stats; 874 875 /* collect stats from LRO */ 876 rq_stats->sw_lro_queued = rq->lro.lro_queued; 877 rq_stats->sw_lro_flushed = rq->lro.lro_flushed; 878 sw_lro_queued += rq_stats->sw_lro_queued; 879 sw_lro_flushed += rq_stats->sw_lro_flushed; 880 lro_packets += rq_stats->lro_packets; 881 lro_bytes += rq_stats->lro_bytes; 882 rx_csum_none += rq_stats->csum_none; 883 rx_wqe_err += rq_stats->wqe_err; 884 rx_packets += rq_stats->packets; 885 rx_bytes += rq_stats->bytes; 886 887 for (j = 0; j < priv->num_tc; j++) { 888 sq_stats = &pch->sq[j].stats; 889 890 tso_packets += sq_stats->tso_packets; 891 tso_bytes += sq_stats->tso_bytes; 892 tx_queue_dropped += sq_stats->dropped; 893 tx_queue_dropped += sq_stats->enobuf; 894 tx_defragged += sq_stats->defragged; 895 tx_offload_none += sq_stats->csum_offload_none; 896 } 897 } 898 899 /* update counters */ 900 s->tso_packets = tso_packets; 901 s->tso_bytes = tso_bytes; 902 s->tx_queue_dropped = tx_queue_dropped; 903 s->tx_defragged = tx_defragged; 904 s->lro_packets = lro_packets; 905 s->lro_bytes = lro_bytes; 906 s->sw_lro_queued = sw_lro_queued; 907 s->sw_lro_flushed = sw_lro_flushed; 908 s->rx_csum_none = rx_csum_none; 909 s->rx_wqe_err = rx_wqe_err; 910 s->rx_packets = rx_packets; 911 s->rx_bytes = rx_bytes; 912 913 mlx5e_grp_vnic_env_update_stats(priv); 914 915 /* HW counters */ 916 memset(in, 0, sizeof(in)); 917 918 MLX5_SET(query_vport_counter_in, in, opcode, 919 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 920 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 921 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 922 923 memset(out, 0, outlen); 924 925 /* get number of out-of-buffer drops first */ 926 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && 927 mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, 928 &rx_out_of_buffer) == 0) { 929 s->rx_out_of_buffer = rx_out_of_buffer; 930 } 931 932 /* get port statistics */ 933 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) { 934#define MLX5_GET_CTR(out, x) \ 935 MLX5_GET64(query_vport_counter_out, out, x) 936 937 s->rx_error_packets = 938 MLX5_GET_CTR(out, received_errors.packets); 939 s->rx_error_bytes = 940 MLX5_GET_CTR(out, received_errors.octets); 941 s->tx_error_packets = 942 MLX5_GET_CTR(out, transmit_errors.packets); 943 s->tx_error_bytes = 944 MLX5_GET_CTR(out, transmit_errors.octets); 945 946 s->rx_unicast_packets = 947 MLX5_GET_CTR(out, received_eth_unicast.packets); 948 s->rx_unicast_bytes = 949 MLX5_GET_CTR(out, received_eth_unicast.octets); 950 s->tx_unicast_packets = 951 MLX5_GET_CTR(out, transmitted_eth_unicast.packets); 952 s->tx_unicast_bytes = 953 MLX5_GET_CTR(out, transmitted_eth_unicast.octets); 954 955 s->rx_multicast_packets = 956 MLX5_GET_CTR(out, received_eth_multicast.packets); 957 s->rx_multicast_bytes = 958 MLX5_GET_CTR(out, received_eth_multicast.octets); 959 s->tx_multicast_packets = 960 MLX5_GET_CTR(out, transmitted_eth_multicast.packets); 961 s->tx_multicast_bytes = 962 MLX5_GET_CTR(out, transmitted_eth_multicast.octets); 963 964 s->rx_broadcast_packets = 965 MLX5_GET_CTR(out, received_eth_broadcast.packets); 966 s->rx_broadcast_bytes = 967 MLX5_GET_CTR(out, received_eth_broadcast.octets); 968 s->tx_broadcast_packets = 969 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 970 s->tx_broadcast_bytes = 971 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 972 973 s->tx_packets = s->tx_unicast_packets + 974 s->tx_multicast_packets + s->tx_broadcast_packets; 975 s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes + 976 s->tx_broadcast_bytes; 977 978 /* Update calculated offload counters */ 979 s->tx_csum_offload = s->tx_packets - tx_offload_none; 980 s->rx_csum_good = s->rx_packets - s->rx_csum_none; 981 } 982 983 /* Get physical port counters */ 984 mlx5e_update_pport_counters(priv); 985 986 s->tx_jumbo_packets = 987 priv->stats.port_stats_debug.tx_stat_p1519to2047octets + 988 priv->stats.port_stats_debug.tx_stat_p2048to4095octets + 989 priv->stats.port_stats_debug.tx_stat_p4096to8191octets + 990 priv->stats.port_stats_debug.tx_stat_p8192to10239octets; 991 992#if (__FreeBSD_version < 1100000) 993 /* no get_counters interface in fbsd 10 */ 994 ifp->if_ipackets = s->rx_packets; 995 ifp->if_ierrors = priv->stats.pport.in_range_len_errors + 996 priv->stats.pport.out_of_range_len + 997 priv->stats.pport.too_long_errors + 998 priv->stats.pport.check_seq_err + 999 priv->stats.pport.alignment_err; 1000 ifp->if_iqdrops = s->rx_out_of_buffer; 1001 ifp->if_opackets = s->tx_packets; 1002 ifp->if_oerrors = priv->stats.port_stats_debug.out_discards; 1003 ifp->if_snd.ifq_drops = s->tx_queue_dropped; 1004 ifp->if_ibytes = s->rx_bytes; 1005 ifp->if_obytes = s->tx_bytes; 1006 ifp->if_collisions = 1007 priv->stats.pport.collisions; 1008#endif 1009 1010free_out: 1011 kvfree(out); 1012 1013 /* Update diagnostics, if any */ 1014 if (priv->params_ethtool.diag_pci_enable || 1015 priv->params_ethtool.diag_general_enable) { 1016 error = mlx5_core_get_diagnostics_full(mdev, 1017 priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, 1018 priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); 1019 if (error != 0) 1020 mlx5_en_err(priv->ifp, 1021 "Failed reading diagnostics: %d\n", error); 1022 } 1023 1024 /* Update FEC, if any */ 1025 error = mlx5e_fec_update(priv); 1026 if (error != 0 && error != EOPNOTSUPP) { 1027 mlx5_en_err(priv->ifp, 1028 "Updating FEC failed: %d\n", error); 1029 } 1030} 1031 1032static void 1033mlx5e_update_stats_work(struct work_struct *work) 1034{ 1035 struct mlx5e_priv *priv; 1036 1037 priv = container_of(work, struct mlx5e_priv, update_stats_work); 1038 PRIV_LOCK(priv); 1039 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && 1040 !test_bit(MLX5_INTERFACE_STATE_TEARDOWN, &priv->mdev->intf_state)) 1041 mlx5e_update_stats_locked(priv); 1042 PRIV_UNLOCK(priv); 1043} 1044 1045static void 1046mlx5e_update_stats(void *arg) 1047{ 1048 struct mlx5e_priv *priv = arg; 1049 1050 queue_work(priv->wq, &priv->update_stats_work); 1051 1052 callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv); 1053} 1054 1055static void 1056mlx5e_async_event_sub(struct mlx5e_priv *priv, 1057 enum mlx5_dev_event event) 1058{ 1059 switch (event) { 1060 case MLX5_DEV_EVENT_PORT_UP: 1061 case MLX5_DEV_EVENT_PORT_DOWN: 1062 queue_work(priv->wq, &priv->update_carrier_work); 1063 break; 1064 1065 default: 1066 break; 1067 } 1068} 1069 1070static void 1071mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, 1072 enum mlx5_dev_event event, unsigned long param) 1073{ 1074 struct mlx5e_priv *priv = vpriv; 1075 1076 mtx_lock(&priv->async_events_mtx); 1077 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) 1078 mlx5e_async_event_sub(priv, event); 1079 mtx_unlock(&priv->async_events_mtx); 1080} 1081 1082static void 1083mlx5e_enable_async_events(struct mlx5e_priv *priv) 1084{ 1085 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1086} 1087 1088static void 1089mlx5e_disable_async_events(struct mlx5e_priv *priv) 1090{ 1091 mtx_lock(&priv->async_events_mtx); 1092 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1093 mtx_unlock(&priv->async_events_mtx); 1094} 1095 1096static const char *mlx5e_rq_stats_desc[] = { 1097 MLX5E_RQ_STATS(MLX5E_STATS_DESC) 1098}; 1099 1100static int 1101mlx5e_create_rq(struct mlx5e_channel *c, 1102 struct mlx5e_rq_param *param, 1103 struct mlx5e_rq *rq) 1104{ 1105 struct mlx5e_priv *priv = c->priv; 1106 struct mlx5_core_dev *mdev = priv->mdev; 1107 char buffer[16]; 1108 void *rqc = param->rqc; 1109 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); 1110 int wq_sz; 1111 int err; 1112 int i; 1113 u32 nsegs, wqe_sz; 1114 1115 err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 1116 if (err != 0) 1117 goto done; 1118 1119 /* Create DMA descriptor TAG */ 1120 if ((err = -bus_dma_tag_create( 1121 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1122 1, /* any alignment */ 1123 0, /* no boundary */ 1124 BUS_SPACE_MAXADDR, /* lowaddr */ 1125 BUS_SPACE_MAXADDR, /* highaddr */ 1126 NULL, NULL, /* filter, filterarg */ 1127 nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */ 1128 nsegs, /* nsegments */ 1129 nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */ 1130 0, /* flags */ 1131 NULL, NULL, /* lockfunc, lockfuncarg */ 1132 &rq->dma_tag))) 1133 goto done; 1134 1135 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, 1136 &rq->wq_ctrl); 1137 if (err) 1138 goto err_free_dma_tag; 1139 1140 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; 1141 1142 err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs); 1143 if (err != 0) 1144 goto err_rq_wq_destroy; 1145 1146 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1147 1148 err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz); 1149 if (err) 1150 goto err_rq_wq_destroy; 1151 1152 rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1153 for (i = 0; i != wq_sz; i++) { 1154 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); 1155 int j; 1156 1157 err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map); 1158 if (err != 0) { 1159 while (i--) 1160 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1161 goto err_rq_mbuf_free; 1162 } 1163 1164 /* set value for constant fields */ 1165 for (j = 0; j < rq->nsegs; j++) 1166 wqe->data[j].lkey = cpu_to_be32(priv->mr.key); 1167 } 1168 1169 INIT_WORK(&rq->dim.work, mlx5e_dim_work); 1170 if (priv->params.rx_cq_moderation_mode < 2) { 1171 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1172 } else { 1173 void *cqc = container_of(param, 1174 struct mlx5e_channel_param, rq)->rx_cq.cqc; 1175 1176 switch (MLX5_GET(cqc, cqc, cq_period_mode)) { 1177 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: 1178 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1179 break; 1180 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: 1181 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 1182 break; 1183 default: 1184 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1185 break; 1186 } 1187 } 1188 1189 rq->ifp = c->ifp; 1190 rq->channel = c; 1191 rq->ix = c->ix; 1192 1193 snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix); 1194 mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1195 buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM, 1196 rq->stats.arg); 1197 return (0); 1198 1199err_rq_mbuf_free: 1200 free(rq->mbuf, M_MLX5EN); 1201 tcp_lro_free(&rq->lro); 1202err_rq_wq_destroy: 1203 mlx5_wq_destroy(&rq->wq_ctrl); 1204err_free_dma_tag: 1205 bus_dma_tag_destroy(rq->dma_tag); 1206done: 1207 return (err); 1208} 1209 1210static void 1211mlx5e_destroy_rq(struct mlx5e_rq *rq) 1212{ 1213 int wq_sz; 1214 int i; 1215 1216 /* destroy all sysctl nodes */ 1217 sysctl_ctx_free(&rq->stats.ctx); 1218 1219 /* free leftover LRO packets, if any */ 1220 tcp_lro_free(&rq->lro); 1221 1222 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1223 for (i = 0; i != wq_sz; i++) { 1224 if (rq->mbuf[i].mbuf != NULL) { 1225 bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map); 1226 m_freem(rq->mbuf[i].mbuf); 1227 } 1228 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1229 } 1230 free(rq->mbuf, M_MLX5EN); 1231 mlx5_wq_destroy(&rq->wq_ctrl); 1232 bus_dma_tag_destroy(rq->dma_tag); 1233} 1234 1235static int 1236mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) 1237{ 1238 struct mlx5e_channel *c = rq->channel; 1239 struct mlx5e_priv *priv = c->priv; 1240 struct mlx5_core_dev *mdev = priv->mdev; 1241 1242 void *in; 1243 void *rqc; 1244 void *wq; 1245 int inlen; 1246 int err; 1247 1248 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 1249 sizeof(u64) * rq->wq_ctrl.buf.npages; 1250 in = mlx5_vzalloc(inlen); 1251 if (in == NULL) 1252 return (-ENOMEM); 1253 1254 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1255 wq = MLX5_ADDR_OF(rqc, rqc, wq); 1256 1257 memcpy(rqc, param->rqc, sizeof(param->rqc)); 1258 1259 MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); 1260 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 1261 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 1262 if (priv->counter_set_id >= 0) 1263 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); 1264 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 1265 PAGE_SHIFT); 1266 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); 1267 1268 mlx5_fill_page_array(&rq->wq_ctrl.buf, 1269 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1270 1271 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); 1272 1273 kvfree(in); 1274 1275 return (err); 1276} 1277 1278static int 1279mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) 1280{ 1281 struct mlx5e_channel *c = rq->channel; 1282 struct mlx5e_priv *priv = c->priv; 1283 struct mlx5_core_dev *mdev = priv->mdev; 1284 1285 void *in; 1286 void *rqc; 1287 int inlen; 1288 int err; 1289 1290 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 1291 in = mlx5_vzalloc(inlen); 1292 if (in == NULL) 1293 return (-ENOMEM); 1294 1295 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 1296 1297 MLX5_SET(modify_rq_in, in, rqn, rq->rqn); 1298 MLX5_SET(modify_rq_in, in, rq_state, curr_state); 1299 MLX5_SET(rqc, rqc, state, next_state); 1300 1301 err = mlx5_core_modify_rq(mdev, in, inlen); 1302 1303 kvfree(in); 1304 1305 return (err); 1306} 1307 1308static void 1309mlx5e_disable_rq(struct mlx5e_rq *rq) 1310{ 1311 struct mlx5e_channel *c = rq->channel; 1312 struct mlx5e_priv *priv = c->priv; 1313 struct mlx5_core_dev *mdev = priv->mdev; 1314 1315 mlx5_core_destroy_rq(mdev, rq->rqn); 1316} 1317 1318static int 1319mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) 1320{ 1321 struct mlx5e_channel *c = rq->channel; 1322 struct mlx5e_priv *priv = c->priv; 1323 struct mlx5_wq_ll *wq = &rq->wq; 1324 int i; 1325 1326 for (i = 0; i < 1000; i++) { 1327 if (wq->cur_sz >= priv->params.min_rx_wqes) 1328 return (0); 1329 1330 msleep(4); 1331 } 1332 return (-ETIMEDOUT); 1333} 1334 1335static int 1336mlx5e_open_rq(struct mlx5e_channel *c, 1337 struct mlx5e_rq_param *param, 1338 struct mlx5e_rq *rq) 1339{ 1340 int err; 1341 1342 err = mlx5e_create_rq(c, param, rq); 1343 if (err) 1344 return (err); 1345 1346 err = mlx5e_enable_rq(rq, param); 1347 if (err) 1348 goto err_destroy_rq; 1349 1350 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 1351 if (err) 1352 goto err_disable_rq; 1353 1354 c->rq.enabled = 1; 1355 1356 return (0); 1357 1358err_disable_rq: 1359 mlx5e_disable_rq(rq); 1360err_destroy_rq: 1361 mlx5e_destroy_rq(rq); 1362 1363 return (err); 1364} 1365 1366static void 1367mlx5e_close_rq(struct mlx5e_rq *rq) 1368{ 1369 mtx_lock(&rq->mtx); 1370 rq->enabled = 0; 1371 callout_stop(&rq->watchdog); 1372 mtx_unlock(&rq->mtx); 1373 1374 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 1375} 1376 1377static void 1378mlx5e_close_rq_wait(struct mlx5e_rq *rq) 1379{ 1380 1381 mlx5e_disable_rq(rq); 1382 mlx5e_close_cq(&rq->cq); 1383 cancel_work_sync(&rq->dim.work); 1384 mlx5e_destroy_rq(rq); 1385} 1386 1387void 1388mlx5e_free_sq_db(struct mlx5e_sq *sq) 1389{ 1390 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1391 int x; 1392 1393 for (x = 0; x != wq_sz; x++) { 1394 if (sq->mbuf[x].mbuf != NULL) { 1395 bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map); 1396 m_freem(sq->mbuf[x].mbuf); 1397 } 1398 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1399 } 1400 free(sq->mbuf, M_MLX5EN); 1401} 1402 1403int 1404mlx5e_alloc_sq_db(struct mlx5e_sq *sq) 1405{ 1406 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1407 int err; 1408 int x; 1409 1410 sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1411 1412 /* Create DMA descriptor MAPs */ 1413 for (x = 0; x != wq_sz; x++) { 1414 err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map); 1415 if (err != 0) { 1416 while (x--) 1417 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1418 free(sq->mbuf, M_MLX5EN); 1419 return (err); 1420 } 1421 } 1422 return (0); 1423} 1424 1425static const char *mlx5e_sq_stats_desc[] = { 1426 MLX5E_SQ_STATS(MLX5E_STATS_DESC) 1427}; 1428 1429void 1430mlx5e_update_sq_inline(struct mlx5e_sq *sq) 1431{ 1432 sq->max_inline = sq->priv->params.tx_max_inline; 1433 sq->min_inline_mode = sq->priv->params.tx_min_inline_mode; 1434 1435 /* 1436 * Check if trust state is DSCP or if inline mode is NONE which 1437 * indicates CX-5 or newer hardware. 1438 */ 1439 if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP || 1440 sq->min_inline_mode == MLX5_INLINE_MODE_NONE) { 1441 if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert)) 1442 sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN; 1443 else 1444 sq->min_insert_caps = MLX5E_INSERT_NON_VLAN; 1445 } else { 1446 sq->min_insert_caps = 0; 1447 } 1448} 1449 1450static void 1451mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 1452{ 1453 int i; 1454 1455 for (i = 0; i != priv->num_tc; i++) { 1456 mtx_lock(&c->sq[i].lock); 1457 mlx5e_update_sq_inline(&c->sq[i]); 1458 mtx_unlock(&c->sq[i].lock); 1459 } 1460} 1461 1462void 1463mlx5e_refresh_sq_inline(struct mlx5e_priv *priv) 1464{ 1465 int i; 1466 1467 /* check if channels are closed */ 1468 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 1469 return; 1470 1471 for (i = 0; i < priv->params.num_channels; i++) 1472 mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]); 1473} 1474 1475static int 1476mlx5e_create_sq(struct mlx5e_channel *c, 1477 int tc, 1478 struct mlx5e_sq_param *param, 1479 struct mlx5e_sq *sq) 1480{ 1481 struct mlx5e_priv *priv = c->priv; 1482 struct mlx5_core_dev *mdev = priv->mdev; 1483 char buffer[16]; 1484 void *sqc = param->sqc; 1485 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); 1486 int err; 1487 1488 /* Create DMA descriptor TAG */ 1489 if ((err = -bus_dma_tag_create( 1490 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1491 1, /* any alignment */ 1492 0, /* no boundary */ 1493 BUS_SPACE_MAXADDR, /* lowaddr */ 1494 BUS_SPACE_MAXADDR, /* highaddr */ 1495 NULL, NULL, /* filter, filterarg */ 1496 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ 1497 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ 1498 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 1499 0, /* flags */ 1500 NULL, NULL, /* lockfunc, lockfuncarg */ 1501 &sq->dma_tag))) 1502 goto done; 1503 1504 err = mlx5_alloc_map_uar(mdev, &sq->uar); 1505 if (err) 1506 goto err_free_dma_tag; 1507 1508 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, 1509 &sq->wq_ctrl); 1510 if (err) 1511 goto err_unmap_free_uar; 1512 1513 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1514 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; 1515 1516 err = mlx5e_alloc_sq_db(sq); 1517 if (err) 1518 goto err_sq_wq_destroy; 1519 1520 sq->mkey_be = cpu_to_be32(priv->mr.key); 1521 sq->ifp = priv->ifp; 1522 sq->priv = priv; 1523 sq->tc = tc; 1524 1525 mlx5e_update_sq_inline(sq); 1526 1527 snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc); 1528 mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1529 buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM, 1530 sq->stats.arg); 1531 1532 return (0); 1533 1534err_sq_wq_destroy: 1535 mlx5_wq_destroy(&sq->wq_ctrl); 1536 1537err_unmap_free_uar: 1538 mlx5_unmap_free_uar(mdev, &sq->uar); 1539 1540err_free_dma_tag: 1541 bus_dma_tag_destroy(sq->dma_tag); 1542done: 1543 return (err); 1544} 1545 1546static void 1547mlx5e_destroy_sq(struct mlx5e_sq *sq) 1548{ 1549 /* destroy all sysctl nodes */ 1550 sysctl_ctx_free(&sq->stats.ctx); 1551 1552 mlx5e_free_sq_db(sq); 1553 mlx5_wq_destroy(&sq->wq_ctrl); 1554 mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar); 1555 bus_dma_tag_destroy(sq->dma_tag); 1556} 1557 1558int 1559mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param, 1560 int tis_num) 1561{ 1562 void *in; 1563 void *sqc; 1564 void *wq; 1565 int inlen; 1566 int err; 1567 1568 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1569 sizeof(u64) * sq->wq_ctrl.buf.npages; 1570 in = mlx5_vzalloc(inlen); 1571 if (in == NULL) 1572 return (-ENOMEM); 1573 1574 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 1575 wq = MLX5_ADDR_OF(sqc, sqc, wq); 1576 1577 memcpy(sqc, param->sqc, sizeof(param->sqc)); 1578 1579 MLX5_SET(sqc, sqc, tis_num_0, tis_num); 1580 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); 1581 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 1582 MLX5_SET(sqc, sqc, tis_lst_sz, 1); 1583 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1584 1585 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1586 MLX5_SET(wq, wq, uar_page, sq->uar.index); 1587 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - 1588 PAGE_SHIFT); 1589 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); 1590 1591 mlx5_fill_page_array(&sq->wq_ctrl.buf, 1592 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1593 1594 err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); 1595 1596 kvfree(in); 1597 1598 return (err); 1599} 1600 1601int 1602mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) 1603{ 1604 void *in; 1605 void *sqc; 1606 int inlen; 1607 int err; 1608 1609 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 1610 in = mlx5_vzalloc(inlen); 1611 if (in == NULL) 1612 return (-ENOMEM); 1613 1614 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1615 1616 MLX5_SET(modify_sq_in, in, sqn, sq->sqn); 1617 MLX5_SET(modify_sq_in, in, sq_state, curr_state); 1618 MLX5_SET(sqc, sqc, state, next_state); 1619 1620 err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); 1621 1622 kvfree(in); 1623 1624 return (err); 1625} 1626 1627void 1628mlx5e_disable_sq(struct mlx5e_sq *sq) 1629{ 1630 1631 mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); 1632} 1633 1634static int 1635mlx5e_open_sq(struct mlx5e_channel *c, 1636 int tc, 1637 struct mlx5e_sq_param *param, 1638 struct mlx5e_sq *sq) 1639{ 1640 int err; 1641 1642 sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; 1643 1644 /* ensure the TX completion event factor is not zero */ 1645 if (sq->cev_factor == 0) 1646 sq->cev_factor = 1; 1647 1648 err = mlx5e_create_sq(c, tc, param, sq); 1649 if (err) 1650 return (err); 1651 1652 err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]); 1653 if (err) 1654 goto err_destroy_sq; 1655 1656 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); 1657 if (err) 1658 goto err_disable_sq; 1659 1660 WRITE_ONCE(sq->running, 1); 1661 1662 return (0); 1663 1664err_disable_sq: 1665 mlx5e_disable_sq(sq); 1666err_destroy_sq: 1667 mlx5e_destroy_sq(sq); 1668 1669 return (err); 1670} 1671 1672static void 1673mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep) 1674{ 1675 /* fill up remainder with NOPs */ 1676 while (sq->cev_counter != 0) { 1677 while (!mlx5e_sq_has_room_for(sq, 1)) { 1678 if (can_sleep != 0) { 1679 mtx_unlock(&sq->lock); 1680 msleep(4); 1681 mtx_lock(&sq->lock); 1682 } else { 1683 goto done; 1684 } 1685 } 1686 /* send a single NOP */ 1687 mlx5e_send_nop(sq, 1); 1688 atomic_thread_fence_rel(); 1689 } 1690done: 1691 /* Check if we need to write the doorbell */ 1692 if (likely(sq->doorbell.d64 != 0)) { 1693 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 1694 sq->doorbell.d64 = 0; 1695 } 1696} 1697 1698void 1699mlx5e_sq_cev_timeout(void *arg) 1700{ 1701 struct mlx5e_sq *sq = arg; 1702 1703 mtx_assert(&sq->lock, MA_OWNED); 1704 1705 /* check next state */ 1706 switch (sq->cev_next_state) { 1707 case MLX5E_CEV_STATE_SEND_NOPS: 1708 /* fill TX ring with NOPs, if any */ 1709 mlx5e_sq_send_nops_locked(sq, 0); 1710 1711 /* check if completed */ 1712 if (sq->cev_counter == 0) { 1713 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 1714 return; 1715 } 1716 break; 1717 default: 1718 /* send NOPs on next timeout */ 1719 sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS; 1720 break; 1721 } 1722 1723 /* restart timer */ 1724 callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq); 1725} 1726 1727void 1728mlx5e_drain_sq(struct mlx5e_sq *sq) 1729{ 1730 int error; 1731 struct mlx5_core_dev *mdev= sq->priv->mdev; 1732 1733 /* 1734 * Check if already stopped. 1735 * 1736 * NOTE: Serialization of this function is managed by the 1737 * caller ensuring the priv's state lock is locked or in case 1738 * of rate limit support, a single thread manages drain and 1739 * resume of SQs. The "running" variable can therefore safely 1740 * be read without any locks. 1741 */ 1742 if (READ_ONCE(sq->running) == 0) 1743 return; 1744 1745 /* don't put more packets into the SQ */ 1746 WRITE_ONCE(sq->running, 0); 1747 1748 /* serialize access to DMA rings */ 1749 mtx_lock(&sq->lock); 1750 1751 /* teardown event factor timer, if any */ 1752 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 1753 callout_stop(&sq->cev_callout); 1754 1755 /* send dummy NOPs in order to flush the transmit ring */ 1756 mlx5e_sq_send_nops_locked(sq, 1); 1757 mtx_unlock(&sq->lock); 1758 1759 /* wait till SQ is empty or link is down */ 1760 mtx_lock(&sq->lock); 1761 while (sq->cc != sq->pc && 1762 (sq->priv->media_status_last & IFM_ACTIVE) != 0 && 1763 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1764 mtx_unlock(&sq->lock); 1765 msleep(1); 1766 sq->cq.mcq.comp(&sq->cq.mcq); 1767 mtx_lock(&sq->lock); 1768 } 1769 mtx_unlock(&sq->lock); 1770 1771 /* error out remaining requests */ 1772 error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 1773 if (error != 0) { 1774 mlx5_en_err(sq->ifp, 1775 "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); 1776 } 1777 1778 /* wait till SQ is empty */ 1779 mtx_lock(&sq->lock); 1780 while (sq->cc != sq->pc && 1781 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1782 mtx_unlock(&sq->lock); 1783 msleep(1); 1784 sq->cq.mcq.comp(&sq->cq.mcq); 1785 mtx_lock(&sq->lock); 1786 } 1787 mtx_unlock(&sq->lock); 1788} 1789 1790static void 1791mlx5e_close_sq_wait(struct mlx5e_sq *sq) 1792{ 1793 1794 mlx5e_drain_sq(sq); 1795 mlx5e_disable_sq(sq); 1796 mlx5e_destroy_sq(sq); 1797} 1798 1799static int 1800mlx5e_create_cq(struct mlx5e_priv *priv, 1801 struct mlx5e_cq_param *param, 1802 struct mlx5e_cq *cq, 1803 mlx5e_cq_comp_t *comp, 1804 int eq_ix) 1805{ 1806 struct mlx5_core_dev *mdev = priv->mdev; 1807 struct mlx5_core_cq *mcq = &cq->mcq; 1808 int eqn_not_used; 1809 int irqn; 1810 int err; 1811 u32 i; 1812 1813 param->wq.buf_numa_node = 0; 1814 param->wq.db_numa_node = 0; 1815 1816 err = mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn); 1817 if (err) 1818 return (err); 1819 1820 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, 1821 &cq->wq_ctrl); 1822 if (err) 1823 return (err); 1824 1825 mcq->cqe_sz = 64; 1826 mcq->set_ci_db = cq->wq_ctrl.db.db; 1827 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1828 *mcq->set_ci_db = 0; 1829 *mcq->arm_db = 0; 1830 mcq->vector = eq_ix; 1831 mcq->comp = comp; 1832 mcq->event = mlx5e_cq_error_event; 1833 mcq->irqn = irqn; 1834 mcq->uar = &priv->cq_uar; 1835 1836 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 1837 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); 1838 1839 cqe->op_own = 0xf1; 1840 } 1841 1842 cq->priv = priv; 1843 1844 return (0); 1845} 1846 1847static void 1848mlx5e_destroy_cq(struct mlx5e_cq *cq) 1849{ 1850 mlx5_wq_destroy(&cq->wq_ctrl); 1851} 1852 1853static int 1854mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix) 1855{ 1856 struct mlx5_core_cq *mcq = &cq->mcq; 1857 void *in; 1858 void *cqc; 1859 int inlen; 1860 int irqn_not_used; 1861 int eqn; 1862 int err; 1863 1864 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1865 sizeof(u64) * cq->wq_ctrl.buf.npages; 1866 in = mlx5_vzalloc(inlen); 1867 if (in == NULL) 1868 return (-ENOMEM); 1869 1870 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1871 1872 memcpy(cqc, param->cqc, sizeof(param->cqc)); 1873 1874 mlx5_fill_page_array(&cq->wq_ctrl.buf, 1875 (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas)); 1876 1877 mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); 1878 1879 MLX5_SET(cqc, cqc, c_eqn, eqn); 1880 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); 1881 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - 1882 PAGE_SHIFT); 1883 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); 1884 1885 err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen); 1886 1887 kvfree(in); 1888 1889 if (err) 1890 return (err); 1891 1892 mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); 1893 1894 return (0); 1895} 1896 1897static void 1898mlx5e_disable_cq(struct mlx5e_cq *cq) 1899{ 1900 1901 mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); 1902} 1903 1904int 1905mlx5e_open_cq(struct mlx5e_priv *priv, 1906 struct mlx5e_cq_param *param, 1907 struct mlx5e_cq *cq, 1908 mlx5e_cq_comp_t *comp, 1909 int eq_ix) 1910{ 1911 int err; 1912 1913 err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); 1914 if (err) 1915 return (err); 1916 1917 err = mlx5e_enable_cq(cq, param, eq_ix); 1918 if (err) 1919 goto err_destroy_cq; 1920 1921 return (0); 1922 1923err_destroy_cq: 1924 mlx5e_destroy_cq(cq); 1925 1926 return (err); 1927} 1928 1929void 1930mlx5e_close_cq(struct mlx5e_cq *cq) 1931{ 1932 mlx5e_disable_cq(cq); 1933 mlx5e_destroy_cq(cq); 1934} 1935 1936static int 1937mlx5e_open_tx_cqs(struct mlx5e_channel *c, 1938 struct mlx5e_channel_param *cparam) 1939{ 1940 int err; 1941 int tc; 1942 1943 for (tc = 0; tc < c->priv->num_tc; tc++) { 1944 /* open completion queue */ 1945 err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, 1946 &mlx5e_tx_cq_comp, c->ix); 1947 if (err) 1948 goto err_close_tx_cqs; 1949 } 1950 return (0); 1951 1952err_close_tx_cqs: 1953 for (tc--; tc >= 0; tc--) 1954 mlx5e_close_cq(&c->sq[tc].cq); 1955 1956 return (err); 1957} 1958 1959static void 1960mlx5e_close_tx_cqs(struct mlx5e_channel *c) 1961{ 1962 int tc; 1963 1964 for (tc = 0; tc < c->priv->num_tc; tc++) 1965 mlx5e_close_cq(&c->sq[tc].cq); 1966} 1967 1968static int 1969mlx5e_open_sqs(struct mlx5e_channel *c, 1970 struct mlx5e_channel_param *cparam) 1971{ 1972 int err; 1973 int tc; 1974 1975 for (tc = 0; tc < c->priv->num_tc; tc++) { 1976 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); 1977 if (err) 1978 goto err_close_sqs; 1979 } 1980 1981 return (0); 1982 1983err_close_sqs: 1984 for (tc--; tc >= 0; tc--) 1985 mlx5e_close_sq_wait(&c->sq[tc]); 1986 1987 return (err); 1988} 1989 1990static void 1991mlx5e_close_sqs_wait(struct mlx5e_channel *c) 1992{ 1993 int tc; 1994 1995 for (tc = 0; tc < c->priv->num_tc; tc++) 1996 mlx5e_close_sq_wait(&c->sq[tc]); 1997} 1998 1999static void 2000mlx5e_chan_static_init(struct mlx5e_priv *priv, struct mlx5e_channel *c, int ix) 2001{ 2002 int tc; 2003 2004 /* setup priv and channel number */ 2005 c->priv = priv; 2006 c->ix = ix; 2007 c->ifp = priv->ifp; 2008 2009 mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF); 2010 2011 callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0); 2012 2013 for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) { 2014 struct mlx5e_sq *sq = c->sq + tc; 2015 2016 mtx_init(&sq->lock, "mlx5tx", 2017 MTX_NETWORK_LOCK " TX", MTX_DEF); 2018 mtx_init(&sq->comp_lock, "mlx5comp", 2019 MTX_NETWORK_LOCK " TX", MTX_DEF); 2020 2021 callout_init_mtx(&sq->cev_callout, &sq->lock, 0); 2022 } 2023} 2024 2025static void 2026mlx5e_chan_static_destroy(struct mlx5e_channel *c) 2027{ 2028 int tc; 2029 2030 callout_drain(&c->rq.watchdog); 2031 2032 mtx_destroy(&c->rq.mtx); 2033 2034 for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) { 2035 callout_drain(&c->sq[tc].cev_callout); 2036 mtx_destroy(&c->sq[tc].lock); 2037 mtx_destroy(&c->sq[tc].comp_lock); 2038 } 2039} 2040 2041static int 2042mlx5e_open_channel(struct mlx5e_priv *priv, 2043 struct mlx5e_channel_param *cparam, 2044 struct mlx5e_channel *c) 2045{ 2046 int i, err; 2047 2048 /* zero non-persistant data */ 2049 MLX5E_ZERO(&c->rq, mlx5e_rq_zero_start); 2050 for (i = 0; i != priv->num_tc; i++) 2051 MLX5E_ZERO(&c->sq[i], mlx5e_sq_zero_start); 2052 2053 /* open transmit completion queue */ 2054 err = mlx5e_open_tx_cqs(c, cparam); 2055 if (err) 2056 goto err_free; 2057 2058 /* open receive completion queue */ 2059 err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, 2060 &mlx5e_rx_cq_comp, c->ix); 2061 if (err) 2062 goto err_close_tx_cqs; 2063 2064 err = mlx5e_open_sqs(c, cparam); 2065 if (err) 2066 goto err_close_rx_cq; 2067 2068 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 2069 if (err) 2070 goto err_close_sqs; 2071 2072 /* poll receive queue initially */ 2073 c->rq.cq.mcq.comp(&c->rq.cq.mcq); 2074 2075 return (0); 2076 2077err_close_sqs: 2078 mlx5e_close_sqs_wait(c); 2079 2080err_close_rx_cq: 2081 mlx5e_close_cq(&c->rq.cq); 2082 2083err_close_tx_cqs: 2084 mlx5e_close_tx_cqs(c); 2085 2086err_free: 2087 return (err); 2088} 2089 2090static void 2091mlx5e_close_channel(struct mlx5e_channel *c) 2092{ 2093 mlx5e_close_rq(&c->rq); 2094} 2095 2096static void 2097mlx5e_close_channel_wait(struct mlx5e_channel *c) 2098{ 2099 mlx5e_close_rq_wait(&c->rq); 2100 mlx5e_close_sqs_wait(c); 2101 mlx5e_close_tx_cqs(c); 2102} 2103 2104static int 2105mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs) 2106{ 2107 u32 r, n; 2108 2109 r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz : 2110 MLX5E_SW2MB_MTU(priv->ifp->if_mtu); 2111 if (r > MJUM16BYTES) 2112 return (-ENOMEM); 2113 2114 if (r > MJUM9BYTES) 2115 r = MJUM16BYTES; 2116 else if (r > MJUMPAGESIZE) 2117 r = MJUM9BYTES; 2118 else if (r > MCLBYTES) 2119 r = MJUMPAGESIZE; 2120 else 2121 r = MCLBYTES; 2122 2123 /* 2124 * n + 1 must be a power of two, because stride size must be. 2125 * Stride size is 16 * (n + 1), as the first segment is 2126 * control. 2127 */ 2128 for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++) 2129 ; 2130 2131 if (n > MLX5E_MAX_BUSDMA_RX_SEGS) 2132 return (-ENOMEM); 2133 2134 *wqe_sz = r; 2135 *nsegs = n; 2136 return (0); 2137} 2138 2139static void 2140mlx5e_build_rq_param(struct mlx5e_priv *priv, 2141 struct mlx5e_rq_param *param) 2142{ 2143 void *rqc = param->rqc; 2144 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 2145 u32 wqe_sz, nsegs; 2146 2147 mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 2148 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); 2149 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 2150 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + 2151 nsegs * sizeof(struct mlx5_wqe_data_seg))); 2152 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); 2153 MLX5_SET(wq, wq, pd, priv->pdn); 2154 2155 param->wq.buf_numa_node = 0; 2156 param->wq.db_numa_node = 0; 2157 param->wq.linear = 1; 2158} 2159 2160static void 2161mlx5e_build_sq_param(struct mlx5e_priv *priv, 2162 struct mlx5e_sq_param *param) 2163{ 2164 void *sqc = param->sqc; 2165 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2166 2167 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); 2168 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 2169 MLX5_SET(wq, wq, pd, priv->pdn); 2170 2171 param->wq.buf_numa_node = 0; 2172 param->wq.db_numa_node = 0; 2173 param->wq.linear = 1; 2174} 2175 2176static void 2177mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 2178 struct mlx5e_cq_param *param) 2179{ 2180 void *cqc = param->cqc; 2181 2182 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); 2183} 2184 2185static void 2186mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr) 2187{ 2188 2189 *ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE); 2190 2191 /* apply LRO restrictions */ 2192 if (priv->params.hw_lro_en && 2193 ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) { 2194 ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO; 2195 } 2196} 2197 2198static void 2199mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, 2200 struct mlx5e_cq_param *param) 2201{ 2202 struct net_dim_cq_moder curr; 2203 void *cqc = param->cqc; 2204 2205 /* 2206 * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE 2207 * format is more beneficial for FreeBSD use case. 2208 * 2209 * Adding support for MLX5_CQE_FORMAT_CSUM will require changes 2210 * in mlx5e_decompress_cqe. 2211 */ 2212 if (priv->params.cqe_zipping_en) { 2213 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH); 2214 MLX5_SET(cqc, cqc, cqe_compression_en, 1); 2215 } 2216 2217 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); 2218 2219 switch (priv->params.rx_cq_moderation_mode) { 2220 case 0: 2221 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2222 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2223 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2224 break; 2225 case 1: 2226 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2227 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2228 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2229 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2230 else 2231 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2232 break; 2233 case 2: 2234 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr); 2235 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2236 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2237 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2238 break; 2239 case 3: 2240 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr); 2241 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2242 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2243 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2244 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2245 else 2246 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2247 break; 2248 default: 2249 break; 2250 } 2251 2252 mlx5e_dim_build_cq_param(priv, param); 2253 2254 mlx5e_build_common_cq_param(priv, param); 2255} 2256 2257static void 2258mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 2259 struct mlx5e_cq_param *param) 2260{ 2261 void *cqc = param->cqc; 2262 2263 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); 2264 MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); 2265 MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); 2266 2267 switch (priv->params.tx_cq_moderation_mode) { 2268 case 0: 2269 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2270 break; 2271 default: 2272 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2273 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2274 else 2275 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2276 break; 2277 } 2278 2279 mlx5e_build_common_cq_param(priv, param); 2280} 2281 2282static void 2283mlx5e_build_channel_param(struct mlx5e_priv *priv, 2284 struct mlx5e_channel_param *cparam) 2285{ 2286 memset(cparam, 0, sizeof(*cparam)); 2287 2288 mlx5e_build_rq_param(priv, &cparam->rq); 2289 mlx5e_build_sq_param(priv, &cparam->sq); 2290 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); 2291 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); 2292} 2293 2294static int 2295mlx5e_open_channels(struct mlx5e_priv *priv) 2296{ 2297 struct mlx5e_channel_param *cparam; 2298 int err; 2299 int i; 2300 int j; 2301 2302 cparam = malloc(sizeof(*cparam), M_MLX5EN, M_WAITOK); 2303 2304 mlx5e_build_channel_param(priv, cparam); 2305 for (i = 0; i < priv->params.num_channels; i++) { 2306 err = mlx5e_open_channel(priv, cparam, &priv->channel[i]); 2307 if (err) 2308 goto err_close_channels; 2309 } 2310 2311 for (j = 0; j < priv->params.num_channels; j++) { 2312 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq); 2313 if (err) 2314 goto err_close_channels; 2315 } 2316 free(cparam, M_MLX5EN); 2317 return (0); 2318 2319err_close_channels: 2320 while (i--) { 2321 mlx5e_close_channel(&priv->channel[i]); 2322 mlx5e_close_channel_wait(&priv->channel[i]); 2323 } 2324 free(cparam, M_MLX5EN); 2325 return (err); 2326} 2327 2328static void 2329mlx5e_close_channels(struct mlx5e_priv *priv) 2330{ 2331 int i; 2332 2333 for (i = 0; i < priv->params.num_channels; i++) 2334 mlx5e_close_channel(&priv->channel[i]); 2335 for (i = 0; i < priv->params.num_channels; i++) 2336 mlx5e_close_channel_wait(&priv->channel[i]); 2337} 2338 2339static int 2340mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) 2341{ 2342 2343 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2344 uint8_t cq_mode; 2345 2346 switch (priv->params.tx_cq_moderation_mode) { 2347 case 0: 2348 case 2: 2349 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2350 break; 2351 default: 2352 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2353 break; 2354 } 2355 2356 return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, 2357 priv->params.tx_cq_moderation_usec, 2358 priv->params.tx_cq_moderation_pkts, 2359 cq_mode)); 2360 } 2361 2362 return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, 2363 priv->params.tx_cq_moderation_usec, 2364 priv->params.tx_cq_moderation_pkts)); 2365} 2366 2367static int 2368mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) 2369{ 2370 2371 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2372 uint8_t cq_mode; 2373 uint8_t dim_mode; 2374 int retval; 2375 2376 switch (priv->params.rx_cq_moderation_mode) { 2377 case 0: 2378 case 2: 2379 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2380 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 2381 break; 2382 default: 2383 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2384 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 2385 break; 2386 } 2387 2388 /* tear down dynamic interrupt moderation */ 2389 mtx_lock(&rq->mtx); 2390 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 2391 mtx_unlock(&rq->mtx); 2392 2393 /* wait for dynamic interrupt moderation work task, if any */ 2394 cancel_work_sync(&rq->dim.work); 2395 2396 if (priv->params.rx_cq_moderation_mode >= 2) { 2397 struct net_dim_cq_moder curr; 2398 2399 mlx5e_get_default_profile(priv, dim_mode, &curr); 2400 2401 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2402 curr.usec, curr.pkts, cq_mode); 2403 2404 /* set dynamic interrupt moderation mode and zero defaults */ 2405 mtx_lock(&rq->mtx); 2406 rq->dim.mode = dim_mode; 2407 rq->dim.state = 0; 2408 rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE; 2409 mtx_unlock(&rq->mtx); 2410 } else { 2411 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2412 priv->params.rx_cq_moderation_usec, 2413 priv->params.rx_cq_moderation_pkts, 2414 cq_mode); 2415 } 2416 return (retval); 2417 } 2418 2419 return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, 2420 priv->params.rx_cq_moderation_usec, 2421 priv->params.rx_cq_moderation_pkts)); 2422} 2423 2424static int 2425mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 2426{ 2427 int err; 2428 int i; 2429 2430 err = mlx5e_refresh_rq_params(priv, &c->rq); 2431 if (err) 2432 goto done; 2433 2434 for (i = 0; i != priv->num_tc; i++) { 2435 err = mlx5e_refresh_sq_params(priv, &c->sq[i]); 2436 if (err) 2437 goto done; 2438 } 2439done: 2440 return (err); 2441} 2442 2443int 2444mlx5e_refresh_channel_params(struct mlx5e_priv *priv) 2445{ 2446 int i; 2447 2448 /* check if channels are closed */ 2449 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2450 return (EINVAL); 2451 2452 for (i = 0; i < priv->params.num_channels; i++) { 2453 int err; 2454 2455 err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]); 2456 if (err) 2457 return (err); 2458 } 2459 return (0); 2460} 2461 2462static int 2463mlx5e_open_tis(struct mlx5e_priv *priv, int tc) 2464{ 2465 struct mlx5_core_dev *mdev = priv->mdev; 2466 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 2467 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 2468 2469 memset(in, 0, sizeof(in)); 2470 2471 MLX5_SET(tisc, tisc, prio, tc); 2472 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 2473 2474 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); 2475} 2476 2477static void 2478mlx5e_close_tis(struct mlx5e_priv *priv, int tc) 2479{ 2480 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); 2481} 2482 2483static int 2484mlx5e_open_tises(struct mlx5e_priv *priv) 2485{ 2486 int num_tc = priv->num_tc; 2487 int err; 2488 int tc; 2489 2490 for (tc = 0; tc < num_tc; tc++) { 2491 err = mlx5e_open_tis(priv, tc); 2492 if (err) 2493 goto err_close_tises; 2494 } 2495 2496 return (0); 2497 2498err_close_tises: 2499 for (tc--; tc >= 0; tc--) 2500 mlx5e_close_tis(priv, tc); 2501 2502 return (err); 2503} 2504 2505static void 2506mlx5e_close_tises(struct mlx5e_priv *priv) 2507{ 2508 int num_tc = priv->num_tc; 2509 int tc; 2510 2511 for (tc = 0; tc < num_tc; tc++) 2512 mlx5e_close_tis(priv, tc); 2513} 2514 2515static int 2516mlx5e_open_rqt(struct mlx5e_priv *priv) 2517{ 2518 struct mlx5_core_dev *mdev = priv->mdev; 2519 u32 *in; 2520 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 2521 void *rqtc; 2522 int inlen; 2523 int err; 2524 int sz; 2525 int i; 2526 2527 sz = 1 << priv->params.rx_hash_log_tbl_sz; 2528 2529 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 2530 in = mlx5_vzalloc(inlen); 2531 if (in == NULL) 2532 return (-ENOMEM); 2533 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 2534 2535 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 2536 MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 2537 2538 for (i = 0; i < sz; i++) { 2539 int ix = i; 2540#ifdef RSS 2541 ix = rss_get_indirection_to_bucket(ix); 2542#endif 2543 /* ensure we don't overflow */ 2544 ix %= priv->params.num_channels; 2545 2546 /* apply receive side scaling stride, if any */ 2547 ix -= ix % (int)priv->params.channels_rsss; 2548 2549 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn); 2550 } 2551 2552 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 2553 2554 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); 2555 if (!err) 2556 priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); 2557 2558 kvfree(in); 2559 2560 return (err); 2561} 2562 2563static void 2564mlx5e_close_rqt(struct mlx5e_priv *priv) 2565{ 2566 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; 2567 u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; 2568 2569 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); 2570 MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); 2571 2572 mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); 2573} 2574 2575static void 2576mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt) 2577{ 2578 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 2579 __be32 *hkey; 2580 2581 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 2582 2583#define ROUGH_MAX_L2_L3_HDR_SZ 256 2584 2585#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2586 MLX5_HASH_FIELD_SEL_DST_IP) 2587 2588#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2589 MLX5_HASH_FIELD_SEL_DST_IP |\ 2590 MLX5_HASH_FIELD_SEL_L4_SPORT |\ 2591 MLX5_HASH_FIELD_SEL_L4_DPORT) 2592 2593#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2594 MLX5_HASH_FIELD_SEL_DST_IP |\ 2595 MLX5_HASH_FIELD_SEL_IPSEC_SPI) 2596 2597 if (priv->params.hw_lro_en) { 2598 MLX5_SET(tirc, tirc, lro_enable_mask, 2599 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 2600 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); 2601 MLX5_SET(tirc, tirc, lro_max_msg_sz, 2602 (priv->params.lro_wqe_sz - 2603 ROUGH_MAX_L2_L3_HDR_SZ) >> 8); 2604 /* TODO: add the option to choose timer value dynamically */ 2605 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, 2606 MLX5_CAP_ETH(priv->mdev, 2607 lro_timer_supported_periods[2])); 2608 } 2609 2610 /* setup parameters for hashing TIR type, if any */ 2611 switch (tt) { 2612 case MLX5E_TT_ANY: 2613 MLX5_SET(tirc, tirc, disp_type, 2614 MLX5_TIRC_DISP_TYPE_DIRECT); 2615 MLX5_SET(tirc, tirc, inline_rqn, 2616 priv->channel[0].rq.rqn); 2617 break; 2618 default: 2619 MLX5_SET(tirc, tirc, disp_type, 2620 MLX5_TIRC_DISP_TYPE_INDIRECT); 2621 MLX5_SET(tirc, tirc, indirect_table, 2622 priv->rqtn); 2623 MLX5_SET(tirc, tirc, rx_hash_fn, 2624 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); 2625 hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 2626#ifdef RSS 2627 /* 2628 * The FreeBSD RSS implementation does currently not 2629 * support symmetric Toeplitz hashes: 2630 */ 2631 MLX5_SET(tirc, tirc, rx_hash_symmetric, 0); 2632 rss_getkey((uint8_t *)hkey); 2633#else 2634 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2635 hkey[0] = cpu_to_be32(0xD181C62C); 2636 hkey[1] = cpu_to_be32(0xF7F4DB5B); 2637 hkey[2] = cpu_to_be32(0x1983A2FC); 2638 hkey[3] = cpu_to_be32(0x943E1ADB); 2639 hkey[4] = cpu_to_be32(0xD9389E6B); 2640 hkey[5] = cpu_to_be32(0xD1039C2C); 2641 hkey[6] = cpu_to_be32(0xA74499AD); 2642 hkey[7] = cpu_to_be32(0x593D56D9); 2643 hkey[8] = cpu_to_be32(0xF3253C06); 2644 hkey[9] = cpu_to_be32(0x2ADC1FFC); 2645#endif 2646 break; 2647 } 2648 2649 switch (tt) { 2650 case MLX5E_TT_IPV4_TCP: 2651 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2652 MLX5_L3_PROT_TYPE_IPV4); 2653 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2654 MLX5_L4_PROT_TYPE_TCP); 2655#ifdef RSS 2656 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) { 2657 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2658 MLX5_HASH_IP); 2659 } else 2660#endif 2661 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2662 MLX5_HASH_ALL); 2663 break; 2664 2665 case MLX5E_TT_IPV6_TCP: 2666 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2667 MLX5_L3_PROT_TYPE_IPV6); 2668 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2669 MLX5_L4_PROT_TYPE_TCP); 2670#ifdef RSS 2671 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) { 2672 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2673 MLX5_HASH_IP); 2674 } else 2675#endif 2676 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2677 MLX5_HASH_ALL); 2678 break; 2679 2680 case MLX5E_TT_IPV4_UDP: 2681 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2682 MLX5_L3_PROT_TYPE_IPV4); 2683 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2684 MLX5_L4_PROT_TYPE_UDP); 2685#ifdef RSS 2686 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) { 2687 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2688 MLX5_HASH_IP); 2689 } else 2690#endif 2691 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2692 MLX5_HASH_ALL); 2693 break; 2694 2695 case MLX5E_TT_IPV6_UDP: 2696 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2697 MLX5_L3_PROT_TYPE_IPV6); 2698 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2699 MLX5_L4_PROT_TYPE_UDP); 2700#ifdef RSS 2701 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) { 2702 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2703 MLX5_HASH_IP); 2704 } else 2705#endif 2706 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2707 MLX5_HASH_ALL); 2708 break; 2709 2710 case MLX5E_TT_IPV4_IPSEC_AH: 2711 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2712 MLX5_L3_PROT_TYPE_IPV4); 2713 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2714 MLX5_HASH_IP_IPSEC_SPI); 2715 break; 2716 2717 case MLX5E_TT_IPV6_IPSEC_AH: 2718 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2719 MLX5_L3_PROT_TYPE_IPV6); 2720 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2721 MLX5_HASH_IP_IPSEC_SPI); 2722 break; 2723 2724 case MLX5E_TT_IPV4_IPSEC_ESP: 2725 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2726 MLX5_L3_PROT_TYPE_IPV4); 2727 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2728 MLX5_HASH_IP_IPSEC_SPI); 2729 break; 2730 2731 case MLX5E_TT_IPV6_IPSEC_ESP: 2732 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2733 MLX5_L3_PROT_TYPE_IPV6); 2734 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2735 MLX5_HASH_IP_IPSEC_SPI); 2736 break; 2737 2738 case MLX5E_TT_IPV4: 2739 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2740 MLX5_L3_PROT_TYPE_IPV4); 2741 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2742 MLX5_HASH_IP); 2743 break; 2744 2745 case MLX5E_TT_IPV6: 2746 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2747 MLX5_L3_PROT_TYPE_IPV6); 2748 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2749 MLX5_HASH_IP); 2750 break; 2751 2752 default: 2753 break; 2754 } 2755} 2756 2757static int 2758mlx5e_open_tir(struct mlx5e_priv *priv, int tt) 2759{ 2760 struct mlx5_core_dev *mdev = priv->mdev; 2761 u32 *in; 2762 void *tirc; 2763 int inlen; 2764 int err; 2765 2766 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 2767 in = mlx5_vzalloc(inlen); 2768 if (in == NULL) 2769 return (-ENOMEM); 2770 tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context); 2771 2772 mlx5e_build_tir_ctx(priv, tirc, tt); 2773 2774 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); 2775 2776 kvfree(in); 2777 2778 return (err); 2779} 2780 2781static void 2782mlx5e_close_tir(struct mlx5e_priv *priv, int tt) 2783{ 2784 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); 2785} 2786 2787static int 2788mlx5e_open_tirs(struct mlx5e_priv *priv) 2789{ 2790 int err; 2791 int i; 2792 2793 for (i = 0; i < MLX5E_NUM_TT; i++) { 2794 err = mlx5e_open_tir(priv, i); 2795 if (err) 2796 goto err_close_tirs; 2797 } 2798 2799 return (0); 2800 2801err_close_tirs: 2802 for (i--; i >= 0; i--) 2803 mlx5e_close_tir(priv, i); 2804 2805 return (err); 2806} 2807 2808static void 2809mlx5e_close_tirs(struct mlx5e_priv *priv) 2810{ 2811 int i; 2812 2813 for (i = 0; i < MLX5E_NUM_TT; i++) 2814 mlx5e_close_tir(priv, i); 2815} 2816 2817/* 2818 * SW MTU does not include headers, 2819 * HW MTU includes all headers and checksums. 2820 */ 2821static int 2822mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu) 2823{ 2824 struct mlx5e_priv *priv = ifp->if_softc; 2825 struct mlx5_core_dev *mdev = priv->mdev; 2826 int hw_mtu; 2827 int err; 2828 2829 hw_mtu = MLX5E_SW2HW_MTU(sw_mtu); 2830 2831 err = mlx5_set_port_mtu(mdev, hw_mtu); 2832 if (err) { 2833 mlx5_en_err(ifp, "mlx5_set_port_mtu failed setting %d, err=%d\n", 2834 sw_mtu, err); 2835 return (err); 2836 } 2837 2838 /* Update vport context MTU */ 2839 err = mlx5_set_vport_mtu(mdev, hw_mtu); 2840 if (err) { 2841 mlx5_en_err(ifp, 2842 "Failed updating vport context with MTU size, err=%d\n", 2843 err); 2844 } 2845 2846 ifp->if_mtu = sw_mtu; 2847 2848 err = mlx5_query_vport_mtu(mdev, &hw_mtu); 2849 if (err || !hw_mtu) { 2850 /* fallback to port oper mtu */ 2851 err = mlx5_query_port_oper_mtu(mdev, &hw_mtu); 2852 } 2853 if (err) { 2854 mlx5_en_err(ifp, 2855 "Query port MTU, after setting new MTU value, failed\n"); 2856 return (err); 2857 } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) { 2858 err = -E2BIG, 2859 mlx5_en_err(ifp, 2860 "Port MTU %d is smaller than ifp mtu %d\n", 2861 hw_mtu, sw_mtu); 2862 } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) { 2863 err = -EINVAL; 2864 mlx5_en_err(ifp, 2865 "Port MTU %d is bigger than ifp mtu %d\n", 2866 hw_mtu, sw_mtu); 2867 } 2868 priv->params_ethtool.hw_mtu = hw_mtu; 2869 2870 return (err); 2871} 2872 2873int 2874mlx5e_open_locked(struct ifnet *ifp) 2875{ 2876 struct mlx5e_priv *priv = ifp->if_softc; 2877 int err; 2878 u16 set_id; 2879 2880 /* check if already opened */ 2881 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 2882 return (0); 2883 2884#ifdef RSS 2885 if (rss_getnumbuckets() > priv->params.num_channels) { 2886 mlx5_en_info(ifp, 2887 "NOTE: There are more RSS buckets(%u) than channels(%u) available\n", 2888 rss_getnumbuckets(), priv->params.num_channels); 2889 } 2890#endif 2891 err = mlx5e_open_tises(priv); 2892 if (err) { 2893 mlx5_en_err(ifp, "mlx5e_open_tises failed, %d\n", err); 2894 return (err); 2895 } 2896 err = mlx5_vport_alloc_q_counter(priv->mdev, 2897 MLX5_INTERFACE_PROTOCOL_ETH, &set_id); 2898 if (err) { 2899 mlx5_en_err(priv->ifp, 2900 "mlx5_vport_alloc_q_counter failed: %d\n", err); 2901 goto err_close_tises; 2902 } 2903 /* store counter set ID */ 2904 priv->counter_set_id = set_id; 2905 2906 err = mlx5e_open_channels(priv); 2907 if (err) { 2908 mlx5_en_err(ifp, 2909 "mlx5e_open_channels failed, %d\n", err); 2910 goto err_dalloc_q_counter; 2911 } 2912 err = mlx5e_open_rqt(priv); 2913 if (err) { 2914 mlx5_en_err(ifp, "mlx5e_open_rqt failed, %d\n", err); 2915 goto err_close_channels; 2916 } 2917 err = mlx5e_open_tirs(priv); 2918 if (err) { 2919 mlx5_en_err(ifp, "mlx5e_open_tir failed, %d\n", err); 2920 goto err_close_rqls; 2921 } 2922 err = mlx5e_open_flow_table(priv); 2923 if (err) { 2924 mlx5_en_err(ifp, 2925 "mlx5e_open_flow_table failed, %d\n", err); 2926 goto err_close_tirs; 2927 } 2928 err = mlx5e_add_all_vlan_rules(priv); 2929 if (err) { 2930 mlx5_en_err(ifp, 2931 "mlx5e_add_all_vlan_rules failed, %d\n", err); 2932 goto err_close_flow_table; 2933 } 2934 set_bit(MLX5E_STATE_OPENED, &priv->state); 2935 2936 mlx5e_update_carrier(priv); 2937 mlx5e_set_rx_mode_core(priv); 2938 2939 return (0); 2940 2941err_close_flow_table: 2942 mlx5e_close_flow_table(priv); 2943 2944err_close_tirs: 2945 mlx5e_close_tirs(priv); 2946 2947err_close_rqls: 2948 mlx5e_close_rqt(priv); 2949 2950err_close_channels: 2951 mlx5e_close_channels(priv); 2952 2953err_dalloc_q_counter: 2954 mlx5_vport_dealloc_q_counter(priv->mdev, 2955 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 2956 2957err_close_tises: 2958 mlx5e_close_tises(priv); 2959 2960 return (err); 2961} 2962 2963static void 2964mlx5e_open(void *arg) 2965{ 2966 struct mlx5e_priv *priv = arg; 2967 2968 PRIV_LOCK(priv); 2969 if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) 2970 mlx5_en_err(priv->ifp, 2971 "Setting port status to up failed\n"); 2972 2973 mlx5e_open_locked(priv->ifp); 2974 priv->ifp->if_drv_flags |= IFF_DRV_RUNNING; 2975 PRIV_UNLOCK(priv); 2976} 2977 2978int 2979mlx5e_close_locked(struct ifnet *ifp) 2980{ 2981 struct mlx5e_priv *priv = ifp->if_softc; 2982 2983 /* check if already closed */ 2984 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2985 return (0); 2986 2987 clear_bit(MLX5E_STATE_OPENED, &priv->state); 2988 2989 mlx5e_set_rx_mode_core(priv); 2990 mlx5e_del_all_vlan_rules(priv); 2991 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 2992 mlx5e_close_flow_table(priv); 2993 mlx5e_close_tirs(priv); 2994 mlx5e_close_rqt(priv); 2995 mlx5e_close_channels(priv); 2996 mlx5_vport_dealloc_q_counter(priv->mdev, 2997 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 2998 mlx5e_close_tises(priv); 2999 3000 return (0); 3001} 3002 3003#if (__FreeBSD_version >= 1100000) 3004static uint64_t 3005mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt) 3006{ 3007 struct mlx5e_priv *priv = ifp->if_softc; 3008 u64 retval; 3009 3010 /* PRIV_LOCK(priv); XXX not allowed */ 3011 switch (cnt) { 3012 case IFCOUNTER_IPACKETS: 3013 retval = priv->stats.vport.rx_packets; 3014 break; 3015 case IFCOUNTER_IERRORS: 3016 retval = priv->stats.pport.in_range_len_errors + 3017 priv->stats.pport.out_of_range_len + 3018 priv->stats.pport.too_long_errors + 3019 priv->stats.pport.check_seq_err + 3020 priv->stats.pport.alignment_err; 3021 break; 3022 case IFCOUNTER_IQDROPS: 3023 retval = priv->stats.vport.rx_out_of_buffer; 3024 break; 3025 case IFCOUNTER_OPACKETS: 3026 retval = priv->stats.vport.tx_packets; 3027 break; 3028 case IFCOUNTER_OERRORS: 3029 retval = priv->stats.port_stats_debug.out_discards; 3030 break; 3031 case IFCOUNTER_IBYTES: 3032 retval = priv->stats.vport.rx_bytes; 3033 break; 3034 case IFCOUNTER_OBYTES: 3035 retval = priv->stats.vport.tx_bytes; 3036 break; 3037 case IFCOUNTER_IMCASTS: 3038 retval = priv->stats.vport.rx_multicast_packets; 3039 break; 3040 case IFCOUNTER_OMCASTS: 3041 retval = priv->stats.vport.tx_multicast_packets; 3042 break; 3043 case IFCOUNTER_OQDROPS: 3044 retval = priv->stats.vport.tx_queue_dropped; 3045 break; 3046 case IFCOUNTER_COLLISIONS: 3047 retval = priv->stats.pport.collisions; 3048 break; 3049 default: 3050 retval = if_get_counter_default(ifp, cnt); 3051 break; 3052 } 3053 /* PRIV_UNLOCK(priv); XXX not allowed */ 3054 return (retval); 3055} 3056#endif 3057 3058static void 3059mlx5e_set_rx_mode(struct ifnet *ifp) 3060{ 3061 struct mlx5e_priv *priv = ifp->if_softc; 3062 3063 queue_work(priv->wq, &priv->set_rx_mode_work); 3064} 3065 3066static int 3067mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3068{ 3069 struct mlx5e_priv *priv; 3070 struct ifreq *ifr; 3071 struct ifi2creq i2c; 3072 int error = 0; 3073 int mask = 0; 3074 int size_read = 0; 3075 int module_status; 3076 int module_num; 3077 int max_mtu; 3078 uint8_t read_addr; 3079 3080 priv = ifp->if_softc; 3081 3082 /* check if detaching */ 3083 if (priv == NULL || priv->gone != 0) 3084 return (ENXIO); 3085 3086 switch (command) { 3087 case SIOCSIFMTU: 3088 ifr = (struct ifreq *)data; 3089 3090 PRIV_LOCK(priv); 3091 mlx5_query_port_max_mtu(priv->mdev, &max_mtu); 3092 3093 if (ifr->ifr_mtu >= MLX5E_MTU_MIN && 3094 ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) { 3095 int was_opened; 3096 3097 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3098 if (was_opened) 3099 mlx5e_close_locked(ifp); 3100 3101 /* set new MTU */ 3102 mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu); 3103 3104 if (was_opened) 3105 mlx5e_open_locked(ifp); 3106 } else { 3107 error = EINVAL; 3108 mlx5_en_err(ifp, 3109 "Invalid MTU value. Min val: %d, Max val: %d\n", 3110 MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu)); 3111 } 3112 PRIV_UNLOCK(priv); 3113 break; 3114 case SIOCSIFFLAGS: 3115 if ((ifp->if_flags & IFF_UP) && 3116 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3117 mlx5e_set_rx_mode(ifp); 3118 break; 3119 } 3120 PRIV_LOCK(priv); 3121 if (ifp->if_flags & IFF_UP) { 3122 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3123 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3124 mlx5e_open_locked(ifp); 3125 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3126 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); 3127 } 3128 } else { 3129 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3130 mlx5_set_port_status(priv->mdev, 3131 MLX5_PORT_DOWN); 3132 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 3133 mlx5e_close_locked(ifp); 3134 mlx5e_update_carrier(priv); 3135 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3136 } 3137 } 3138 PRIV_UNLOCK(priv); 3139 break; 3140 case SIOCADDMULTI: 3141 case SIOCDELMULTI: 3142 mlx5e_set_rx_mode(ifp); 3143 break; 3144 case SIOCSIFMEDIA: 3145 case SIOCGIFMEDIA: 3146 case SIOCGIFXMEDIA: 3147 ifr = (struct ifreq *)data; 3148 error = ifmedia_ioctl(ifp, ifr, &priv->media, command); 3149 break; 3150 case SIOCSIFCAP: 3151 ifr = (struct ifreq *)data; 3152 PRIV_LOCK(priv); 3153 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3154 3155 if (mask & IFCAP_TXCSUM) { 3156 ifp->if_capenable ^= IFCAP_TXCSUM; 3157 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3158 3159 if (IFCAP_TSO4 & ifp->if_capenable && 3160 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3161 ifp->if_capenable &= ~IFCAP_TSO4; 3162 ifp->if_hwassist &= ~CSUM_IP_TSO; 3163 mlx5_en_err(ifp, 3164 "tso4 disabled due to -txcsum.\n"); 3165 } 3166 } 3167 if (mask & IFCAP_TXCSUM_IPV6) { 3168 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 3169 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3170 3171 if (IFCAP_TSO6 & ifp->if_capenable && 3172 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3173 ifp->if_capenable &= ~IFCAP_TSO6; 3174 ifp->if_hwassist &= ~CSUM_IP6_TSO; 3175 mlx5_en_err(ifp, 3176 "tso6 disabled due to -txcsum6.\n"); 3177 } 3178 } 3179 if (mask & IFCAP_RXCSUM) 3180 ifp->if_capenable ^= IFCAP_RXCSUM; 3181 if (mask & IFCAP_RXCSUM_IPV6) 3182 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 3183 if (mask & IFCAP_TSO4) { 3184 if (!(IFCAP_TSO4 & ifp->if_capenable) && 3185 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3186 mlx5_en_err(ifp, "enable txcsum first.\n"); 3187 error = EAGAIN; 3188 goto out; 3189 } 3190 ifp->if_capenable ^= IFCAP_TSO4; 3191 ifp->if_hwassist ^= CSUM_IP_TSO; 3192 } 3193 if (mask & IFCAP_TSO6) { 3194 if (!(IFCAP_TSO6 & ifp->if_capenable) && 3195 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3196 mlx5_en_err(ifp, "enable txcsum6 first.\n"); 3197 error = EAGAIN; 3198 goto out; 3199 } 3200 ifp->if_capenable ^= IFCAP_TSO6; 3201 ifp->if_hwassist ^= CSUM_IP6_TSO; 3202 } 3203 if (mask & IFCAP_VLAN_HWFILTER) { 3204 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3205 mlx5e_disable_vlan_filter(priv); 3206 else 3207 mlx5e_enable_vlan_filter(priv); 3208 3209 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 3210 } 3211 if (mask & IFCAP_VLAN_HWTAGGING) 3212 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3213 if (mask & IFCAP_WOL_MAGIC) 3214 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3215 3216 VLAN_CAPABILITIES(ifp); 3217 /* turn off LRO means also turn of HW LRO - if it's on */ 3218 if (mask & IFCAP_LRO) { 3219 int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3220 bool need_restart = false; 3221 3222 ifp->if_capenable ^= IFCAP_LRO; 3223 3224 /* figure out if updating HW LRO is needed */ 3225 if (!(ifp->if_capenable & IFCAP_LRO)) { 3226 if (priv->params.hw_lro_en) { 3227 priv->params.hw_lro_en = false; 3228 need_restart = true; 3229 } 3230 } else { 3231 if (priv->params.hw_lro_en == false && 3232 priv->params_ethtool.hw_lro != 0) { 3233 priv->params.hw_lro_en = true; 3234 need_restart = true; 3235 } 3236 } 3237 if (was_opened && need_restart) { 3238 mlx5e_close_locked(ifp); 3239 mlx5e_open_locked(ifp); 3240 } 3241 } 3242out: 3243 PRIV_UNLOCK(priv); 3244 break; 3245 3246 case SIOCGI2C: 3247 ifr = (struct ifreq *)data; 3248 3249 /* 3250 * Copy from the user-space address ifr_data to the 3251 * kernel-space address i2c 3252 */ 3253 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 3254 if (error) 3255 break; 3256 3257 if (i2c.len > sizeof(i2c.data)) { 3258 error = EINVAL; 3259 break; 3260 } 3261 3262 PRIV_LOCK(priv); 3263 /* Get module_num which is required for the query_eeprom */ 3264 error = mlx5_query_module_num(priv->mdev, &module_num); 3265 if (error) { 3266 mlx5_en_err(ifp, 3267 "Query module num failed, eeprom reading is not supported\n"); 3268 error = EINVAL; 3269 goto err_i2c; 3270 } 3271 /* Check if module is present before doing an access */ 3272 module_status = mlx5_query_module_status(priv->mdev, module_num); 3273 if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED) { 3274 error = EINVAL; 3275 goto err_i2c; 3276 } 3277 /* 3278 * Currently 0XA0 and 0xA2 are the only addresses permitted. 3279 * The internal conversion is as follows: 3280 */ 3281 if (i2c.dev_addr == 0xA0) 3282 read_addr = MLX5_I2C_ADDR_LOW; 3283 else if (i2c.dev_addr == 0xA2) 3284 read_addr = MLX5_I2C_ADDR_HIGH; 3285 else { 3286 mlx5_en_err(ifp, 3287 "Query eeprom failed, Invalid Address: %X\n", 3288 i2c.dev_addr); 3289 error = EINVAL; 3290 goto err_i2c; 3291 } 3292 error = mlx5_query_eeprom(priv->mdev, 3293 read_addr, MLX5_EEPROM_LOW_PAGE, 3294 (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num, 3295 (uint32_t *)i2c.data, &size_read); 3296 if (error) { 3297 mlx5_en_err(ifp, 3298 "Query eeprom failed, eeprom reading is not supported\n"); 3299 error = EINVAL; 3300 goto err_i2c; 3301 } 3302 3303 if (i2c.len > MLX5_EEPROM_MAX_BYTES) { 3304 error = mlx5_query_eeprom(priv->mdev, 3305 read_addr, MLX5_EEPROM_LOW_PAGE, 3306 (uint32_t)(i2c.offset + size_read), 3307 (uint32_t)(i2c.len - size_read), module_num, 3308 (uint32_t *)(i2c.data + size_read), &size_read); 3309 } 3310 if (error) { 3311 mlx5_en_err(ifp, 3312 "Query eeprom failed, eeprom reading is not supported\n"); 3313 error = EINVAL; 3314 goto err_i2c; 3315 } 3316 3317 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 3318err_i2c: 3319 PRIV_UNLOCK(priv); 3320 break; 3321 3322 default: 3323 error = ether_ioctl(ifp, command, data); 3324 break; 3325 } 3326 return (error); 3327} 3328 3329static int 3330mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3331{ 3332 /* 3333 * TODO: uncoment once FW really sets all these bits if 3334 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap || 3335 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap || 3336 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return 3337 * -ENOTSUPP; 3338 */ 3339 3340 /* TODO: add more must-to-have features */ 3341 3342 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3343 return (-ENODEV); 3344 3345 return (0); 3346} 3347 3348static u16 3349mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 3350{ 3351 const int min_size = ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN; 3352 const int max_size = MLX5E_MAX_TX_INLINE; 3353 const int bf_buf_size = 3354 ((1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U) - 3355 (sizeof(struct mlx5e_tx_wqe) - 2); 3356 3357 /* verify against driver limits */ 3358 if (bf_buf_size > max_size) 3359 return (max_size); 3360 else if (bf_buf_size < min_size) 3361 return (min_size); 3362 else 3363 return (bf_buf_size); 3364} 3365 3366static int 3367mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev, 3368 struct mlx5e_priv *priv, 3369 int num_comp_vectors) 3370{ 3371 int err; 3372 3373 /* 3374 * TODO: Consider link speed for setting "log_sq_size", 3375 * "log_rq_size" and "cq_moderation_xxx": 3376 */ 3377 priv->params.log_sq_size = 3378 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 3379 priv->params.log_rq_size = 3380 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 3381 priv->params.rx_cq_moderation_usec = 3382 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 3383 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : 3384 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 3385 priv->params.rx_cq_moderation_mode = 3386 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0; 3387 priv->params.rx_cq_moderation_pkts = 3388 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 3389 priv->params.tx_cq_moderation_usec = 3390 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 3391 priv->params.tx_cq_moderation_pkts = 3392 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 3393 priv->params.min_rx_wqes = 3394 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; 3395 priv->params.rx_hash_log_tbl_sz = 3396 (order_base_2(num_comp_vectors) > 3397 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? 3398 order_base_2(num_comp_vectors) : 3399 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; 3400 priv->params.num_tc = 1; 3401 priv->params.default_vlan_prio = 0; 3402 priv->counter_set_id = -1; 3403 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); 3404 3405 err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); 3406 if (err) 3407 return (err); 3408 3409 /* 3410 * hw lro is currently defaulted to off. when it won't anymore we 3411 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)" 3412 */ 3413 priv->params.hw_lro_en = false; 3414 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 3415 3416 /* 3417 * CQE zipping is currently defaulted to off. when it won't 3418 * anymore we will consider the HW capability: 3419 * "!!MLX5_CAP_GEN(mdev, cqe_compression)" 3420 */ 3421 priv->params.cqe_zipping_en = false; 3422 3423 priv->mdev = mdev; 3424 priv->params.num_channels = num_comp_vectors; 3425 priv->params.channels_rsss = 1; 3426 priv->order_base_2_num_channels = order_base_2(num_comp_vectors); 3427 priv->queue_mapping_channel_mask = 3428 roundup_pow_of_two(num_comp_vectors) - 1; 3429 priv->num_tc = priv->params.num_tc; 3430 priv->default_vlan_prio = priv->params.default_vlan_prio; 3431 3432 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 3433 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 3434 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 3435 3436 return (0); 3437} 3438 3439static int 3440mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, 3441 struct mlx5_core_mr *mkey) 3442{ 3443 struct ifnet *ifp = priv->ifp; 3444 struct mlx5_core_dev *mdev = priv->mdev; 3445 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 3446 void *mkc; 3447 u32 *in; 3448 int err; 3449 3450 in = mlx5_vzalloc(inlen); 3451 if (in == NULL) { 3452 mlx5_en_err(ifp, "failed to allocate inbox\n"); 3453 return (-ENOMEM); 3454 } 3455 3456 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 3457 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA); 3458 MLX5_SET(mkc, mkc, lw, 1); 3459 MLX5_SET(mkc, mkc, lr, 1); 3460 3461 MLX5_SET(mkc, mkc, pd, pdn); 3462 MLX5_SET(mkc, mkc, length64, 1); 3463 MLX5_SET(mkc, mkc, qpn, 0xffffff); 3464 3465 err = mlx5_core_create_mkey(mdev, mkey, in, inlen); 3466 if (err) 3467 mlx5_en_err(ifp, "mlx5_core_create_mkey failed, %d\n", 3468 err); 3469 3470 kvfree(in); 3471 return (err); 3472} 3473 3474static const char *mlx5e_vport_stats_desc[] = { 3475 MLX5E_VPORT_STATS(MLX5E_STATS_DESC) 3476}; 3477 3478static const char *mlx5e_pport_stats_desc[] = { 3479 MLX5E_PPORT_STATS(MLX5E_STATS_DESC) 3480}; 3481 3482static void 3483mlx5e_priv_static_init(struct mlx5e_priv *priv, const uint32_t channels) 3484{ 3485 uint32_t x; 3486 3487 mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); 3488 sx_init(&priv->state_lock, "mlx5state"); 3489 callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); 3490 MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); 3491 for (x = 0; x != channels; x++) 3492 mlx5e_chan_static_init(priv, &priv->channel[x], x); 3493} 3494 3495static void 3496mlx5e_priv_static_destroy(struct mlx5e_priv *priv, const uint32_t channels) 3497{ 3498 uint32_t x; 3499 3500 for (x = 0; x != channels; x++) 3501 mlx5e_chan_static_destroy(&priv->channel[x]); 3502 callout_drain(&priv->watchdog); 3503 mtx_destroy(&priv->async_events_mtx); 3504 sx_destroy(&priv->state_lock); 3505} 3506 3507static int 3508sysctl_firmware(SYSCTL_HANDLER_ARGS) 3509{ 3510 /* 3511 * %d.%d%.d the string format. 3512 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536. 3513 * We need at most 5 chars to store that. 3514 * It also has: two "." and NULL at the end, which means we need 18 3515 * (5*3 + 3) chars at most. 3516 */ 3517 char fw[18]; 3518 struct mlx5e_priv *priv = arg1; 3519 int error; 3520 3521 snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), 3522 fw_rev_sub(priv->mdev)); 3523 error = sysctl_handle_string(oidp, fw, sizeof(fw), req); 3524 return (error); 3525} 3526 3527static void 3528mlx5e_disable_tx_dma(struct mlx5e_channel *ch) 3529{ 3530 int i; 3531 3532 for (i = 0; i < ch->priv->num_tc; i++) 3533 mlx5e_drain_sq(&ch->sq[i]); 3534} 3535 3536static void 3537mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq) 3538{ 3539 3540 sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP); 3541 sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8); 3542 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 3543 sq->doorbell.d64 = 0; 3544} 3545 3546void 3547mlx5e_resume_sq(struct mlx5e_sq *sq) 3548{ 3549 int err; 3550 3551 /* check if already enabled */ 3552 if (READ_ONCE(sq->running) != 0) 3553 return; 3554 3555 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR, 3556 MLX5_SQC_STATE_RST); 3557 if (err != 0) { 3558 mlx5_en_err(sq->ifp, 3559 "mlx5e_modify_sq() from ERR to RST failed: %d\n", err); 3560 } 3561 3562 sq->cc = 0; 3563 sq->pc = 0; 3564 3565 /* reset doorbell prior to moving from RST to RDY */ 3566 mlx5e_reset_sq_doorbell_record(sq); 3567 3568 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, 3569 MLX5_SQC_STATE_RDY); 3570 if (err != 0) { 3571 mlx5_en_err(sq->ifp, 3572 "mlx5e_modify_sq() from RST to RDY failed: %d\n", err); 3573 } 3574 3575 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 3576 WRITE_ONCE(sq->running, 1); 3577} 3578 3579static void 3580mlx5e_enable_tx_dma(struct mlx5e_channel *ch) 3581{ 3582 int i; 3583 3584 for (i = 0; i < ch->priv->num_tc; i++) 3585 mlx5e_resume_sq(&ch->sq[i]); 3586} 3587 3588static void 3589mlx5e_disable_rx_dma(struct mlx5e_channel *ch) 3590{ 3591 struct mlx5e_rq *rq = &ch->rq; 3592 int err; 3593 3594 mtx_lock(&rq->mtx); 3595 rq->enabled = 0; 3596 callout_stop(&rq->watchdog); 3597 mtx_unlock(&rq->mtx); 3598 3599 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 3600 if (err != 0) { 3601 mlx5_en_err(rq->ifp, 3602 "mlx5e_modify_rq() from RDY to RST failed: %d\n", err); 3603 } 3604 3605 while (!mlx5_wq_ll_is_empty(&rq->wq)) { 3606 msleep(1); 3607 rq->cq.mcq.comp(&rq->cq.mcq); 3608 } 3609 3610 /* 3611 * Transitioning into RST state will allow the FW to track less ERR state queues, 3612 * thus reducing the recv queue flushing time 3613 */ 3614 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST); 3615 if (err != 0) { 3616 mlx5_en_err(rq->ifp, 3617 "mlx5e_modify_rq() from ERR to RST failed: %d\n", err); 3618 } 3619} 3620 3621static void 3622mlx5e_enable_rx_dma(struct mlx5e_channel *ch) 3623{ 3624 struct mlx5e_rq *rq = &ch->rq; 3625 int err; 3626 3627 rq->wq.wqe_ctr = 0; 3628 mlx5_wq_ll_update_db_record(&rq->wq); 3629 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 3630 if (err != 0) { 3631 mlx5_en_err(rq->ifp, 3632 "mlx5e_modify_rq() from RST to RDY failed: %d\n", err); 3633 } 3634 3635 rq->enabled = 1; 3636 3637 rq->cq.mcq.comp(&rq->cq.mcq); 3638} 3639 3640void 3641mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) 3642{ 3643 int i; 3644 3645 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3646 return; 3647 3648 for (i = 0; i < priv->params.num_channels; i++) { 3649 if (value) 3650 mlx5e_disable_tx_dma(&priv->channel[i]); 3651 else 3652 mlx5e_enable_tx_dma(&priv->channel[i]); 3653 } 3654} 3655 3656void 3657mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) 3658{ 3659 int i; 3660 3661 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3662 return; 3663 3664 for (i = 0; i < priv->params.num_channels; i++) { 3665 if (value) 3666 mlx5e_disable_rx_dma(&priv->channel[i]); 3667 else 3668 mlx5e_enable_rx_dma(&priv->channel[i]); 3669 } 3670} 3671 3672static void 3673mlx5e_add_hw_stats(struct mlx5e_priv *priv) 3674{ 3675 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3676 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0, 3677 sysctl_firmware, "A", "HCA firmware version"); 3678 3679 SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3680 OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, 3681 "Board ID"); 3682} 3683 3684static int 3685mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3686{ 3687 struct mlx5e_priv *priv = arg1; 3688 uint8_t temp[MLX5E_MAX_PRIORITY]; 3689 uint32_t tx_pfc; 3690 int err; 3691 int i; 3692 3693 PRIV_LOCK(priv); 3694 3695 tx_pfc = priv->params.tx_priority_flow_control; 3696 3697 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3698 temp[i] = (tx_pfc >> i) & 1; 3699 3700 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3701 if (err || !req->newptr) 3702 goto done; 3703 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3704 if (err) 3705 goto done; 3706 3707 priv->params.tx_priority_flow_control = 0; 3708 3709 /* range check input value */ 3710 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3711 if (temp[i] > 1) { 3712 err = ERANGE; 3713 goto done; 3714 } 3715 priv->params.tx_priority_flow_control |= (temp[i] << i); 3716 } 3717 3718 /* check if update is required */ 3719 if (tx_pfc != priv->params.tx_priority_flow_control) 3720 err = -mlx5e_set_port_pfc(priv); 3721done: 3722 if (err != 0) 3723 priv->params.tx_priority_flow_control= tx_pfc; 3724 PRIV_UNLOCK(priv); 3725 3726 return (err); 3727} 3728 3729static int 3730mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3731{ 3732 struct mlx5e_priv *priv = arg1; 3733 uint8_t temp[MLX5E_MAX_PRIORITY]; 3734 uint32_t rx_pfc; 3735 int err; 3736 int i; 3737 3738 PRIV_LOCK(priv); 3739 3740 rx_pfc = priv->params.rx_priority_flow_control; 3741 3742 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3743 temp[i] = (rx_pfc >> i) & 1; 3744 3745 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3746 if (err || !req->newptr) 3747 goto done; 3748 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3749 if (err) 3750 goto done; 3751 3752 priv->params.rx_priority_flow_control = 0; 3753 3754 /* range check input value */ 3755 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3756 if (temp[i] > 1) { 3757 err = ERANGE; 3758 goto done; 3759 } 3760 priv->params.rx_priority_flow_control |= (temp[i] << i); 3761 } 3762 3763 /* check if update is required */ 3764 if (rx_pfc != priv->params.rx_priority_flow_control) { 3765 err = -mlx5e_set_port_pfc(priv); 3766 if (err == 0 && priv->sw_is_port_buf_owner) 3767 err = mlx5e_update_buf_lossy(priv); 3768 } 3769done: 3770 if (err != 0) 3771 priv->params.rx_priority_flow_control= rx_pfc; 3772 PRIV_UNLOCK(priv); 3773 3774 return (err); 3775} 3776 3777static void 3778mlx5e_setup_pauseframes(struct mlx5e_priv *priv) 3779{ 3780#if (__FreeBSD_version < 1100000) 3781 char path[96]; 3782#endif 3783 int error; 3784 3785 /* enable pauseframes by default */ 3786 priv->params.tx_pauseframe_control = 1; 3787 priv->params.rx_pauseframe_control = 1; 3788 3789 /* disable ports flow control, PFC, by default */ 3790 priv->params.tx_priority_flow_control = 0; 3791 priv->params.rx_priority_flow_control = 0; 3792 3793#if (__FreeBSD_version < 1100000) 3794 /* compute path for sysctl */ 3795 snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control", 3796 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3797 3798 /* try to fetch tunable, if any */ 3799 TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control); 3800 3801 /* compute path for sysctl */ 3802 snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control", 3803 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3804 3805 /* try to fetch tunable, if any */ 3806 TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control); 3807#endif 3808 3809 /* register pauseframe SYSCTLs */ 3810 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3811 OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN, 3812 &priv->params.tx_pauseframe_control, 0, 3813 "Set to enable TX pause frames. Clear to disable."); 3814 3815 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3816 OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN, 3817 &priv->params.rx_pauseframe_control, 0, 3818 "Set to enable RX pause frames. Clear to disable."); 3819 3820 /* register priority flow control, PFC, SYSCTLs */ 3821 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3822 OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3823 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU", 3824 "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable."); 3825 3826 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3827 OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3828 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU", 3829 "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable."); 3830 3831 PRIV_LOCK(priv); 3832 3833 /* range check */ 3834 priv->params.tx_pauseframe_control = 3835 priv->params.tx_pauseframe_control ? 1 : 0; 3836 priv->params.rx_pauseframe_control = 3837 priv->params.rx_pauseframe_control ? 1 : 0; 3838 3839 /* update firmware */ 3840 error = mlx5e_set_port_pause_and_pfc(priv); 3841 if (error == -EINVAL) { 3842 mlx5_en_err(priv->ifp, 3843 "Global pauseframes must be disabled before enabling PFC.\n"); 3844 priv->params.rx_priority_flow_control = 0; 3845 priv->params.tx_priority_flow_control = 0; 3846 3847 /* update firmware */ 3848 (void) mlx5e_set_port_pause_and_pfc(priv); 3849 } 3850 PRIV_UNLOCK(priv); 3851} 3852 3853static void * 3854mlx5e_create_ifp(struct mlx5_core_dev *mdev) 3855{ 3856 struct ifnet *ifp; 3857 struct mlx5e_priv *priv; 3858 u8 dev_addr[ETHER_ADDR_LEN] __aligned(4); 3859 u8 connector_type; 3860 struct sysctl_oid_list *child; 3861 int ncv = mdev->priv.eq_table.num_comp_vectors; 3862 char unit[16]; 3863 int err; 3864 int i,j; 3865 u32 eth_proto_cap; 3866 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 3867 bool ext = 0; 3868 u32 speeds_num; 3869 struct media media_entry = {}; 3870 3871 if (mlx5e_check_required_hca_cap(mdev)) { 3872 mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n"); 3873 return (NULL); 3874 } 3875 /* 3876 * Try to allocate the priv and make room for worst-case 3877 * number of channel structures: 3878 */ 3879 priv = malloc(sizeof(*priv) + 3880 (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors), 3881 M_MLX5EN, M_WAITOK | M_ZERO); 3882 3883 ifp = priv->ifp = if_alloc(IFT_ETHER); 3884 if (ifp == NULL) { 3885 mlx5_core_err(mdev, "if_alloc() failed\n"); 3886 goto err_free_priv; 3887 } 3888 /* setup all static fields */ 3889 mlx5e_priv_static_init(priv, mdev->priv.eq_table.num_comp_vectors); 3890 3891 ifp->if_softc = priv; 3892 if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev)); 3893 ifp->if_mtu = ETHERMTU; 3894 ifp->if_init = mlx5e_open; 3895 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3896 ifp->if_ioctl = mlx5e_ioctl; 3897 ifp->if_transmit = mlx5e_xmit; 3898 ifp->if_qflush = if_qflush; 3899#if (__FreeBSD_version >= 1100000) 3900 ifp->if_get_counter = mlx5e_get_counter; 3901#endif 3902 ifp->if_snd.ifq_maxlen = ifqmaxlen; 3903 /* 3904 * Set driver features 3905 */ 3906 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 3907 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 3908 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 3909 ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 3910 ifp->if_capabilities |= IFCAP_LRO; 3911 ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO; 3912 ifp->if_capabilities |= IFCAP_HWSTATS; 3913 3914 /* set TSO limits so that we don't have to drop TX packets */ 3915 ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 3916 ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */; 3917 ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE; 3918 3919 ifp->if_capenable = ifp->if_capabilities; 3920 ifp->if_hwassist = 0; 3921 if (ifp->if_capenable & IFCAP_TSO) 3922 ifp->if_hwassist |= CSUM_TSO; 3923 if (ifp->if_capenable & IFCAP_TXCSUM) 3924 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3925 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 3926 ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3927 3928 /* ifnet sysctl tree */ 3929 sysctl_ctx_init(&priv->sysctl_ctx); 3930 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), 3931 OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name"); 3932 if (priv->sysctl_ifnet == NULL) { 3933 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3934 goto err_free_sysctl; 3935 } 3936 snprintf(unit, sizeof(unit), "%d", ifp->if_dunit); 3937 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3938 OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit"); 3939 if (priv->sysctl_ifnet == NULL) { 3940 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3941 goto err_free_sysctl; 3942 } 3943 3944 /* HW sysctl tree */ 3945 child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev)); 3946 priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, 3947 OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw"); 3948 if (priv->sysctl_hw == NULL) { 3949 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3950 goto err_free_sysctl; 3951 } 3952 3953 err = mlx5e_build_ifp_priv(mdev, priv, ncv); 3954 if (err) { 3955 mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err); 3956 goto err_free_sysctl; 3957 } 3958 3959 /* reuse mlx5core's watchdog workqueue */ 3960 priv->wq = mdev->priv.health.wq_watchdog; 3961 3962 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); 3963 if (err) { 3964 mlx5_en_err(ifp, "mlx5_alloc_map_uar failed, %d\n", err); 3965 goto err_free_wq; 3966 } 3967 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 3968 if (err) { 3969 mlx5_en_err(ifp, "mlx5_core_alloc_pd failed, %d\n", err); 3970 goto err_unmap_free_uar; 3971 } 3972 err = mlx5_alloc_transport_domain(mdev, &priv->tdn); 3973 if (err) { 3974 mlx5_en_err(ifp, 3975 "mlx5_alloc_transport_domain failed, %d\n", err); 3976 goto err_dealloc_pd; 3977 } 3978 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); 3979 if (err) { 3980 mlx5_en_err(ifp, "mlx5e_create_mkey failed, %d\n", err); 3981 goto err_dealloc_transport_domain; 3982 } 3983 mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); 3984 3985 /* check if we should generate a random MAC address */ 3986 if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && 3987 is_zero_ether_addr(dev_addr)) { 3988 random_ether_addr(dev_addr); 3989 mlx5_en_err(ifp, "Assigned random MAC address\n"); 3990 } 3991 3992 /* set default MTU */ 3993 mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu); 3994 3995 /* Set default media status */ 3996 priv->media_status_last = IFM_AVALID; 3997 priv->media_active_last = IFM_ETHER | IFM_AUTO | 3998 IFM_ETH_RXPAUSE | IFM_FDX; 3999 4000 /* setup default pauseframes configuration */ 4001 mlx5e_setup_pauseframes(priv); 4002 4003 /* Setup supported medias */ 4004 //TODO: If we failed to query ptys is it ok to proceed?? 4005 if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) { 4006 ext = MLX5_CAP_PCAM_FEATURE(mdev, 4007 ptys_extended_ethernet); 4008 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 4009 eth_proto_capability); 4010 if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) 4011 connector_type = MLX5_GET(ptys_reg, out, 4012 connector_type); 4013 } else { 4014 eth_proto_cap = 0; 4015 mlx5_en_err(ifp, "Query port media capability failed, %d\n", err); 4016 } 4017 4018 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 4019 mlx5e_media_change, mlx5e_media_status); 4020 4021 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER; 4022 for (i = 0; i != speeds_num; i++) { 4023 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 4024 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 4025 mlx5e_mode_table[i][j]; 4026 if (media_entry.baudrate == 0) 4027 continue; 4028 if (MLX5E_PROT_MASK(i) & eth_proto_cap) { 4029 ifmedia_add(&priv->media, 4030 media_entry.subtype | 4031 IFM_ETHER, 0, NULL); 4032 ifmedia_add(&priv->media, 4033 media_entry.subtype | 4034 IFM_ETHER | IFM_FDX | 4035 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4036 } 4037 } 4038 } 4039 4040 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 4041 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4042 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4043 4044 /* Set autoselect by default */ 4045 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4046 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 4047 ether_ifattach(ifp, dev_addr); 4048 4049 /* Register for VLAN events */ 4050 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 4051 mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 4052 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 4053 mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 4054 4055 /* Link is down by default */ 4056 if_link_state_change(ifp, LINK_STATE_DOWN); 4057 4058 mlx5e_enable_async_events(priv); 4059 4060 mlx5e_add_hw_stats(priv); 4061 4062 mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4063 "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM, 4064 priv->stats.vport.arg); 4065 4066 mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4067 "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM, 4068 priv->stats.pport.arg); 4069 4070 mlx5e_create_ethtool(priv); 4071 4072 mtx_lock(&priv->async_events_mtx); 4073 mlx5e_update_stats(priv); 4074 mtx_unlock(&priv->async_events_mtx); 4075 4076 return (priv); 4077 4078err_dealloc_transport_domain: 4079 mlx5_dealloc_transport_domain(mdev, priv->tdn); 4080 4081err_dealloc_pd: 4082 mlx5_core_dealloc_pd(mdev, priv->pdn); 4083 4084err_unmap_free_uar: 4085 mlx5_unmap_free_uar(mdev, &priv->cq_uar); 4086 4087err_free_wq: 4088 flush_workqueue(priv->wq); 4089 4090err_free_sysctl: 4091 sysctl_ctx_free(&priv->sysctl_ctx); 4092 if (priv->sysctl_debug) 4093 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4094 mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors); 4095 if_free(ifp); 4096 4097err_free_priv: 4098 free(priv, M_MLX5EN); 4099 return (NULL); 4100} 4101 4102static void 4103mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv) 4104{ 4105 struct mlx5e_priv *priv = vpriv; 4106 struct ifnet *ifp = priv->ifp; 4107 4108 /* don't allow more IOCTLs */ 4109 priv->gone = 1; 4110 4111 /* XXX wait a bit to allow IOCTL handlers to complete */ 4112 pause("W", hz); 4113 4114 /* stop watchdog timer */ 4115 callout_drain(&priv->watchdog); 4116 4117 if (priv->vlan_attach != NULL) 4118 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 4119 if (priv->vlan_detach != NULL) 4120 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 4121 4122 /* make sure device gets closed */ 4123 PRIV_LOCK(priv); 4124 mlx5e_close_locked(ifp); 4125 PRIV_UNLOCK(priv); 4126 4127 /* unregister device */ 4128 ifmedia_removeall(&priv->media); 4129 ether_ifdetach(ifp); 4130 4131 /* destroy all remaining sysctl nodes */ 4132 sysctl_ctx_free(&priv->stats.vport.ctx); 4133 sysctl_ctx_free(&priv->stats.pport.ctx); 4134 if (priv->sysctl_debug) 4135 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4136 sysctl_ctx_free(&priv->sysctl_ctx); 4137 4138 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 4139 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); 4140 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 4141 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 4142 mlx5e_disable_async_events(priv); 4143 flush_workqueue(priv->wq); 4144 mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors); 4145 if_free(ifp); 4146 free(priv, M_MLX5EN); 4147} 4148 4149static void * 4150mlx5e_get_ifp(void *vpriv) 4151{ 4152 struct mlx5e_priv *priv = vpriv; 4153 4154 return (priv->ifp); 4155} 4156 4157static struct mlx5_interface mlx5e_interface = { 4158 .add = mlx5e_create_ifp, 4159 .remove = mlx5e_destroy_ifp, 4160 .event = mlx5e_async_event, 4161 .protocol = MLX5_INTERFACE_PROTOCOL_ETH, 4162 .get_dev = mlx5e_get_ifp, 4163}; 4164 4165void 4166mlx5e_init(void) 4167{ 4168 mlx5_register_interface(&mlx5e_interface); 4169} 4170 4171void 4172mlx5e_cleanup(void) 4173{ 4174 mlx5_unregister_interface(&mlx5e_interface); 4175} 4176 4177static void 4178mlx5e_show_version(void __unused *arg) 4179{ 4180 4181 printf("%s", mlx5e_version); 4182} 4183SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL); 4184 4185module_init_order(mlx5e_init, SI_ORDER_THIRD); 4186module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD); 4187 4188#if (__FreeBSD_version >= 1100000) 4189MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1); 4190#endif 4191MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1); 4192MODULE_VERSION(mlx5en, 1); 4193