mlx5_en_main.c revision 361171
1/*- 2 * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 361171 2020-05-18 09:04:24Z hselasky $ 26 */ 27 28#include "en.h" 29 30#include <sys/sockio.h> 31#include <machine/atomic.h> 32 33#ifndef ETH_DRIVER_VERSION 34#define ETH_DRIVER_VERSION "3.5.2" 35#endif 36#define DRIVER_RELDATE "September 2019" 37 38static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver " 39 ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 40 41static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs); 42 43struct mlx5e_channel_param { 44 struct mlx5e_rq_param rq; 45 struct mlx5e_sq_param sq; 46 struct mlx5e_cq_param rx_cq; 47 struct mlx5e_cq_param tx_cq; 48}; 49 50struct media { 51 u32 subtype; 52 u64 baudrate; 53}; 54 55static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 56 57 [MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = { 58 .subtype = IFM_1000_CX_SGMII, 59 .baudrate = IF_Mbps(1000ULL), 60 }, 61 [MLX5E_1000BASE_KX][MLX5E_KX] = { 62 .subtype = IFM_1000_KX, 63 .baudrate = IF_Mbps(1000ULL), 64 }, 65 [MLX5E_10GBASE_CX4][MLX5E_CX4] = { 66 .subtype = IFM_10G_CX4, 67 .baudrate = IF_Gbps(10ULL), 68 }, 69 [MLX5E_10GBASE_KX4][MLX5E_KX4] = { 70 .subtype = IFM_10G_KX4, 71 .baudrate = IF_Gbps(10ULL), 72 }, 73 [MLX5E_10GBASE_KR][MLX5E_KR] = { 74 .subtype = IFM_10G_KR, 75 .baudrate = IF_Gbps(10ULL), 76 }, 77 [MLX5E_20GBASE_KR2][MLX5E_KR2] = { 78 .subtype = IFM_20G_KR2, 79 .baudrate = IF_Gbps(20ULL), 80 }, 81 [MLX5E_40GBASE_CR4][MLX5E_CR4] = { 82 .subtype = IFM_40G_CR4, 83 .baudrate = IF_Gbps(40ULL), 84 }, 85 [MLX5E_40GBASE_KR4][MLX5E_KR4] = { 86 .subtype = IFM_40G_KR4, 87 .baudrate = IF_Gbps(40ULL), 88 }, 89 [MLX5E_56GBASE_R4][MLX5E_R] = { 90 .subtype = IFM_56G_R4, 91 .baudrate = IF_Gbps(56ULL), 92 }, 93 [MLX5E_10GBASE_CR][MLX5E_CR1] = { 94 .subtype = IFM_10G_CR1, 95 .baudrate = IF_Gbps(10ULL), 96 }, 97 [MLX5E_10GBASE_SR][MLX5E_SR] = { 98 .subtype = IFM_10G_SR, 99 .baudrate = IF_Gbps(10ULL), 100 }, 101 [MLX5E_10GBASE_ER_LR][MLX5E_ER] = { 102 .subtype = IFM_10G_ER, 103 .baudrate = IF_Gbps(10ULL), 104 }, 105 [MLX5E_10GBASE_ER_LR][MLX5E_LR] = { 106 .subtype = IFM_10G_LR, 107 .baudrate = IF_Gbps(10ULL), 108 }, 109 [MLX5E_40GBASE_SR4][MLX5E_SR4] = { 110 .subtype = IFM_40G_SR4, 111 .baudrate = IF_Gbps(40ULL), 112 }, 113 [MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = { 114 .subtype = IFM_40G_LR4, 115 .baudrate = IF_Gbps(40ULL), 116 }, 117 [MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = { 118 .subtype = IFM_40G_ER4, 119 .baudrate = IF_Gbps(40ULL), 120 }, 121 [MLX5E_100GBASE_CR4][MLX5E_CR4] = { 122 .subtype = IFM_100G_CR4, 123 .baudrate = IF_Gbps(100ULL), 124 }, 125 [MLX5E_100GBASE_SR4][MLX5E_SR4] = { 126 .subtype = IFM_100G_SR4, 127 .baudrate = IF_Gbps(100ULL), 128 }, 129 [MLX5E_100GBASE_KR4][MLX5E_KR4] = { 130 .subtype = IFM_100G_KR4, 131 .baudrate = IF_Gbps(100ULL), 132 }, 133 [MLX5E_100GBASE_LR4][MLX5E_LR4] = { 134 .subtype = IFM_100G_LR4, 135 .baudrate = IF_Gbps(100ULL), 136 }, 137 [MLX5E_100BASE_TX][MLX5E_TX] = { 138 .subtype = IFM_100_TX, 139 .baudrate = IF_Mbps(100ULL), 140 }, 141 [MLX5E_1000BASE_T][MLX5E_T] = { 142 .subtype = IFM_1000_T, 143 .baudrate = IF_Mbps(1000ULL), 144 }, 145 [MLX5E_10GBASE_T][MLX5E_T] = { 146 .subtype = IFM_10G_T, 147 .baudrate = IF_Gbps(10ULL), 148 }, 149 [MLX5E_25GBASE_CR][MLX5E_CR] = { 150 .subtype = IFM_25G_CR, 151 .baudrate = IF_Gbps(25ULL), 152 }, 153 [MLX5E_25GBASE_KR][MLX5E_KR] = { 154 .subtype = IFM_25G_KR, 155 .baudrate = IF_Gbps(25ULL), 156 }, 157 [MLX5E_25GBASE_SR][MLX5E_SR] = { 158 .subtype = IFM_25G_SR, 159 .baudrate = IF_Gbps(25ULL), 160 }, 161 [MLX5E_50GBASE_CR2][MLX5E_CR2] = { 162 .subtype = IFM_50G_CR2, 163 .baudrate = IF_Gbps(50ULL), 164 }, 165 [MLX5E_50GBASE_KR2][MLX5E_KR2] = { 166 .subtype = IFM_50G_KR2, 167 .baudrate = IF_Gbps(50ULL), 168 }, 169}; 170 171static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { 172 [MLX5E_SGMII_100M][MLX5E_SGMII] = { 173 .subtype = IFM_100_SGMII, 174 .baudrate = IF_Mbps(100), 175 }, 176 [MLX5E_1000BASE_X_SGMII][MLX5E_KX] = { 177 .subtype = IFM_1000_KX, 178 .baudrate = IF_Mbps(1000), 179 }, 180 [MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = { 181 .subtype = IFM_1000_CX_SGMII, 182 .baudrate = IF_Mbps(1000), 183 }, 184 [MLX5E_1000BASE_X_SGMII][MLX5E_CX] = { 185 .subtype = IFM_1000_CX, 186 .baudrate = IF_Mbps(1000), 187 }, 188 [MLX5E_1000BASE_X_SGMII][MLX5E_LX] = { 189 .subtype = IFM_1000_LX, 190 .baudrate = IF_Mbps(1000), 191 }, 192 [MLX5E_1000BASE_X_SGMII][MLX5E_SX] = { 193 .subtype = IFM_1000_SX, 194 .baudrate = IF_Mbps(1000), 195 }, 196 [MLX5E_1000BASE_X_SGMII][MLX5E_T] = { 197 .subtype = IFM_1000_T, 198 .baudrate = IF_Mbps(1000), 199 }, 200 [MLX5E_5GBASE_R][MLX5E_T] = { 201 .subtype = IFM_5000_T, 202 .baudrate = IF_Mbps(5000), 203 }, 204 [MLX5E_5GBASE_R][MLX5E_KR] = { 205 .subtype = IFM_5000_KR, 206 .baudrate = IF_Mbps(5000), 207 }, 208 [MLX5E_5GBASE_R][MLX5E_KR1] = { 209 .subtype = IFM_5000_KR1, 210 .baudrate = IF_Mbps(5000), 211 }, 212 [MLX5E_5GBASE_R][MLX5E_KR_S] = { 213 .subtype = IFM_5000_KR_S, 214 .baudrate = IF_Mbps(5000), 215 }, 216 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = { 217 .subtype = IFM_10G_ER, 218 .baudrate = IF_Gbps(10ULL), 219 }, 220 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = { 221 .subtype = IFM_10G_KR, 222 .baudrate = IF_Gbps(10ULL), 223 }, 224 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = { 225 .subtype = IFM_10G_LR, 226 .baudrate = IF_Gbps(10ULL), 227 }, 228 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = { 229 .subtype = IFM_10G_SR, 230 .baudrate = IF_Gbps(10ULL), 231 }, 232 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = { 233 .subtype = IFM_10G_T, 234 .baudrate = IF_Gbps(10ULL), 235 }, 236 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = { 237 .subtype = IFM_10G_AOC, 238 .baudrate = IF_Gbps(10ULL), 239 }, 240 [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = { 241 .subtype = IFM_10G_CR1, 242 .baudrate = IF_Gbps(10ULL), 243 }, 244 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = { 245 .subtype = IFM_40G_CR4, 246 .baudrate = IF_Gbps(40ULL), 247 }, 248 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = { 249 .subtype = IFM_40G_KR4, 250 .baudrate = IF_Gbps(40ULL), 251 }, 252 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = { 253 .subtype = IFM_40G_LR4, 254 .baudrate = IF_Gbps(40ULL), 255 }, 256 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = { 257 .subtype = IFM_40G_SR4, 258 .baudrate = IF_Gbps(40ULL), 259 }, 260 [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = { 261 .subtype = IFM_40G_ER4, 262 .baudrate = IF_Gbps(40ULL), 263 }, 264 265 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = { 266 .subtype = IFM_25G_CR, 267 .baudrate = IF_Gbps(25ULL), 268 }, 269 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = { 270 .subtype = IFM_25G_KR, 271 .baudrate = IF_Gbps(25ULL), 272 }, 273 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = { 274 .subtype = IFM_25G_SR, 275 .baudrate = IF_Gbps(25ULL), 276 }, 277 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = { 278 .subtype = IFM_25G_ACC, 279 .baudrate = IF_Gbps(25ULL), 280 }, 281 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = { 282 .subtype = IFM_25G_AOC, 283 .baudrate = IF_Gbps(25ULL), 284 }, 285 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = { 286 .subtype = IFM_25G_CR1, 287 .baudrate = IF_Gbps(25ULL), 288 }, 289 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = { 290 .subtype = IFM_25G_CR_S, 291 .baudrate = IF_Gbps(25ULL), 292 }, 293 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = { 294 .subtype = IFM_5000_KR1, 295 .baudrate = IF_Gbps(25ULL), 296 }, 297 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = { 298 .subtype = IFM_25G_KR_S, 299 .baudrate = IF_Gbps(25ULL), 300 }, 301 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = { 302 .subtype = IFM_25G_LR, 303 .baudrate = IF_Gbps(25ULL), 304 }, 305 [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = { 306 .subtype = IFM_25G_T, 307 .baudrate = IF_Gbps(25ULL), 308 }, 309 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = { 310 .subtype = IFM_50G_CR2, 311 .baudrate = IF_Gbps(50ULL), 312 }, 313 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = { 314 .subtype = IFM_50G_KR2, 315 .baudrate = IF_Gbps(50ULL), 316 }, 317 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = { 318 .subtype = IFM_50G_SR2, 319 .baudrate = IF_Gbps(50ULL), 320 }, 321 [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = { 322 .subtype = IFM_50G_LR2, 323 .baudrate = IF_Gbps(50ULL), 324 }, 325 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = { 326 .subtype = IFM_50G_LR, 327 .baudrate = IF_Gbps(50ULL), 328 }, 329 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = { 330 .subtype = IFM_50G_SR, 331 .baudrate = IF_Gbps(50ULL), 332 }, 333 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = { 334 .subtype = IFM_50G_CP, 335 .baudrate = IF_Gbps(50ULL), 336 }, 337 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = { 338 .subtype = IFM_50G_FR, 339 .baudrate = IF_Gbps(50ULL), 340 }, 341 [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = { 342 .subtype = IFM_50G_KR_PAM4, 343 .baudrate = IF_Gbps(50ULL), 344 }, 345 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = { 346 .subtype = IFM_100G_CR4, 347 .baudrate = IF_Gbps(100ULL), 348 }, 349 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = { 350 .subtype = IFM_100G_KR4, 351 .baudrate = IF_Gbps(100ULL), 352 }, 353 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = { 354 .subtype = IFM_100G_LR4, 355 .baudrate = IF_Gbps(100ULL), 356 }, 357 [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = { 358 .subtype = IFM_100G_SR4, 359 .baudrate = IF_Gbps(100ULL), 360 }, 361 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = { 362 .subtype = IFM_100G_SR2, 363 .baudrate = IF_Gbps(100ULL), 364 }, 365 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = { 366 .subtype = IFM_100G_CP2, 367 .baudrate = IF_Gbps(100ULL), 368 }, 369 [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = { 370 .subtype = IFM_100G_KR2_PAM4, 371 .baudrate = IF_Gbps(100ULL), 372 }, 373 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = { 374 .subtype = IFM_200G_DR4, 375 .baudrate = IF_Gbps(200ULL), 376 }, 377 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = { 378 .subtype = IFM_200G_LR4, 379 .baudrate = IF_Gbps(200ULL), 380 }, 381 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = { 382 .subtype = IFM_200G_SR4, 383 .baudrate = IF_Gbps(200ULL), 384 }, 385 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = { 386 .subtype = IFM_200G_FR4, 387 .baudrate = IF_Gbps(200ULL), 388 }, 389 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = { 390 .subtype = IFM_200G_CR4_PAM4, 391 .baudrate = IF_Gbps(200ULL), 392 }, 393 [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = { 394 .subtype = IFM_200G_KR4_PAM4, 395 .baudrate = IF_Gbps(200ULL), 396 }, 397}; 398 399MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet"); 400 401static void 402mlx5e_update_carrier(struct mlx5e_priv *priv) 403{ 404 struct mlx5_core_dev *mdev = priv->mdev; 405 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 406 u32 eth_proto_oper; 407 int error; 408 u8 port_state; 409 u8 is_er_type; 410 u8 i, j; 411 bool ext; 412 struct media media_entry = {}; 413 414 port_state = mlx5_query_vport_state(mdev, 415 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 416 417 if (port_state == VPORT_STATE_UP) { 418 priv->media_status_last |= IFM_ACTIVE; 419 } else { 420 priv->media_status_last &= ~IFM_ACTIVE; 421 priv->media_active_last = IFM_ETHER; 422 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 423 return; 424 } 425 426 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 427 MLX5_PTYS_EN, 1); 428 if (error) { 429 priv->media_active_last = IFM_ETHER; 430 priv->ifp->if_baudrate = 1; 431 mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n", 432 error); 433 return; 434 } 435 436 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 437 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 438 eth_proto_oper); 439 440 i = ilog2(eth_proto_oper); 441 442 for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) { 443 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 444 mlx5e_mode_table[i][j]; 445 if (media_entry.baudrate != 0) 446 break; 447 } 448 449 if (media_entry.subtype == 0) { 450 mlx5_en_err(priv->ifp, 451 "Could not find operational media subtype\n"); 452 return; 453 } 454 455 switch (media_entry.subtype) { 456 case IFM_10G_ER: 457 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 458 if (error != 0) { 459 mlx5_en_err(priv->ifp, 460 "query port pddr failed: %d\n", error); 461 } 462 if (error != 0 || is_er_type == 0) 463 media_entry.subtype = IFM_10G_LR; 464 break; 465 case IFM_40G_LR4: 466 error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); 467 if (error != 0) { 468 mlx5_en_err(priv->ifp, 469 "query port pddr failed: %d\n", error); 470 } 471 if (error == 0 && is_er_type != 0) 472 media_entry.subtype = IFM_40G_ER4; 473 break; 474 } 475 priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX; 476 priv->ifp->if_baudrate = media_entry.baudrate; 477 478 if_link_state_change(priv->ifp, LINK_STATE_UP); 479} 480 481static void 482mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 483{ 484 struct mlx5e_priv *priv = dev->if_softc; 485 486 ifmr->ifm_status = priv->media_status_last; 487 ifmr->ifm_active = priv->media_active_last | 488 (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | 489 (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); 490 491} 492 493static u32 494mlx5e_find_link_mode(u32 subtype, bool ext) 495{ 496 u32 i; 497 u32 j; 498 u32 link_mode = 0; 499 u32 speeds_num = 0; 500 struct media media_entry = {}; 501 502 switch (subtype) { 503 case IFM_10G_LR: 504 subtype = IFM_10G_ER; 505 break; 506 case IFM_40G_ER4: 507 subtype = IFM_40G_LR4; 508 break; 509 } 510 511 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : 512 MLX5E_LINK_SPEEDS_NUMBER; 513 514 for (i = 0; i != speeds_num; i++) { 515 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 516 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 517 mlx5e_mode_table[i][j]; 518 if (media_entry.baudrate == 0) 519 continue; 520 if (media_entry.subtype == subtype) { 521 link_mode |= MLX5E_PROT_MASK(i); 522 } 523 } 524 } 525 526 return (link_mode); 527} 528 529static int 530mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) 531{ 532 return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, 533 priv->params.rx_pauseframe_control, 534 priv->params.tx_pauseframe_control, 535 priv->params.rx_priority_flow_control, 536 priv->params.tx_priority_flow_control)); 537} 538 539static int 540mlx5e_set_port_pfc(struct mlx5e_priv *priv) 541{ 542 int error; 543 544 if (priv->gone != 0) { 545 error = -ENXIO; 546 } else if (priv->params.rx_pauseframe_control || 547 priv->params.tx_pauseframe_control) { 548 mlx5_en_err(priv->ifp, 549 "Global pauseframes must be disabled before enabling PFC.\n"); 550 error = -EINVAL; 551 } else { 552 error = mlx5e_set_port_pause_and_pfc(priv); 553 } 554 return (error); 555} 556 557static int 558mlx5e_media_change(struct ifnet *dev) 559{ 560 struct mlx5e_priv *priv = dev->if_softc; 561 struct mlx5_core_dev *mdev = priv->mdev; 562 u32 eth_proto_cap; 563 u32 link_mode; 564 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 565 int was_opened; 566 int locked; 567 int error; 568 bool ext; 569 570 locked = PRIV_LOCKED(priv); 571 if (!locked) 572 PRIV_LOCK(priv); 573 574 if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { 575 error = EINVAL; 576 goto done; 577 } 578 579 error = mlx5_query_port_ptys(mdev, out, sizeof(out), 580 MLX5_PTYS_EN, 1); 581 if (error != 0) { 582 mlx5_en_err(dev, "Query port media capability failed\n"); 583 goto done; 584 } 585 586 ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 587 link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext); 588 589 /* query supported capabilities */ 590 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 591 eth_proto_capability); 592 593 /* check for autoselect */ 594 if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { 595 link_mode = eth_proto_cap; 596 if (link_mode == 0) { 597 mlx5_en_err(dev, "Port media capability is zero\n"); 598 error = EINVAL; 599 goto done; 600 } 601 } else { 602 link_mode = link_mode & eth_proto_cap; 603 if (link_mode == 0) { 604 mlx5_en_err(dev, "Not supported link mode requested\n"); 605 error = EINVAL; 606 goto done; 607 } 608 } 609 if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 610 /* check if PFC is enabled */ 611 if (priv->params.rx_priority_flow_control || 612 priv->params.tx_priority_flow_control) { 613 mlx5_en_err(dev, "PFC must be disabled before enabling global pauseframes.\n"); 614 error = EINVAL; 615 goto done; 616 } 617 } 618 /* update pauseframe control bits */ 619 priv->params.rx_pauseframe_control = 620 (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; 621 priv->params.tx_pauseframe_control = 622 (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; 623 624 /* check if device is opened */ 625 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 626 627 /* reconfigure the hardware */ 628 mlx5_set_port_status(mdev, MLX5_PORT_DOWN); 629 mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext); 630 error = -mlx5e_set_port_pause_and_pfc(priv); 631 if (was_opened) 632 mlx5_set_port_status(mdev, MLX5_PORT_UP); 633 634done: 635 if (!locked) 636 PRIV_UNLOCK(priv); 637 return (error); 638} 639 640static void 641mlx5e_update_carrier_work(struct work_struct *work) 642{ 643 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 644 update_carrier_work); 645 646 PRIV_LOCK(priv); 647 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 648 mlx5e_update_carrier(priv); 649 PRIV_UNLOCK(priv); 650} 651 652#define MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f) \ 653 s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c); 654 655#define MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f) \ 656 s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c); 657 658static void 659mlx5e_update_pcie_counters(struct mlx5e_priv *priv) 660{ 661 struct mlx5_core_dev *mdev = priv->mdev; 662 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 663 const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 664 void *out; 665 void *in; 666 int err; 667 668 /* allocate firmware request structures */ 669 in = mlx5_vzalloc(sz); 670 out = mlx5_vzalloc(sz); 671 if (in == NULL || out == NULL) 672 goto free_out; 673 674 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 675 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 676 if (err != 0) 677 goto free_out; 678 679 MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64) 680 MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 681 682 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); 683 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 684 if (err != 0) 685 goto free_out; 686 687 MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 688 689 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP); 690 err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 691 if (err != 0) 692 goto free_out; 693 694 MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) 695 696free_out: 697 /* free firmware request structures */ 698 kvfree(in); 699 kvfree(out); 700} 701 702/* 703 * This function reads the physical port counters from the firmware 704 * using a pre-defined layout defined by various MLX5E_PPORT_XXX() 705 * macros. The output is converted from big-endian 64-bit values into 706 * host endian ones and stored in the "priv->stats.pport" structure. 707 */ 708static void 709mlx5e_update_pport_counters(struct mlx5e_priv *priv) 710{ 711 struct mlx5_core_dev *mdev = priv->mdev; 712 struct mlx5e_pport_stats *s = &priv->stats.pport; 713 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 714 u32 *in; 715 u32 *out; 716 const u64 *ptr; 717 unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 718 unsigned x; 719 unsigned y; 720 unsigned z; 721 722 /* allocate firmware request structures */ 723 in = mlx5_vzalloc(sz); 724 out = mlx5_vzalloc(sz); 725 if (in == NULL || out == NULL) 726 goto free_out; 727 728 /* 729 * Get pointer to the 64-bit counter set which is located at a 730 * fixed offset in the output firmware request structure: 731 */ 732 ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); 733 734 MLX5_SET(ppcnt_reg, in, local_port, 1); 735 736 /* read IEEE802_3 counter group using predefined counter layout */ 737 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 738 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 739 for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM; 740 x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) 741 s->arg[y] = be64toh(ptr[x]); 742 743 /* read RFC2819 counter group using predefined counter layout */ 744 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 745 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 746 for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) 747 s->arg[y] = be64toh(ptr[x]); 748 749 for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM + 750 MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) 751 s_debug->arg[y] = be64toh(ptr[x]); 752 753 /* read RFC2863 counter group using predefined counter layout */ 754 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 755 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 756 for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) 757 s_debug->arg[y] = be64toh(ptr[x]); 758 759 /* read physical layer stats counter group using predefined counter layout */ 760 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 761 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 762 for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) 763 s_debug->arg[y] = be64toh(ptr[x]); 764 765 /* read Extended Ethernet counter group using predefined counter layout */ 766 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 767 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 768 for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++) 769 s_debug->arg[y] = be64toh(ptr[x]); 770 771 /* read Extended Statistical Group */ 772 if (MLX5_CAP_GEN(mdev, pcam_reg) && 773 MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) && 774 MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) { 775 /* read Extended Statistical counter group using predefined counter layout */ 776 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 777 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 778 779 for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++) 780 s_debug->arg[y] = be64toh(ptr[x]); 781 } 782 783 /* read PCIE counters */ 784 mlx5e_update_pcie_counters(priv); 785 786 /* read per-priority counters */ 787 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 788 789 /* iterate all the priorities */ 790 for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) { 791 MLX5_SET(ppcnt_reg, in, prio_tc, z); 792 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 793 794 /* read per priority stats counter group using predefined counter layout */ 795 for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM / 796 MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++) 797 s->arg[y] = be64toh(ptr[x]); 798 } 799 800free_out: 801 /* free firmware request structures */ 802 kvfree(in); 803 kvfree(out); 804} 805 806static void 807mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) 808{ 809 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {}; 810 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 811 812 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) 813 return; 814 815 MLX5_SET(query_vnic_env_in, in, opcode, 816 MLX5_CMD_OP_QUERY_VNIC_ENV); 817 MLX5_SET(query_vnic_env_in, in, op_mod, 0); 818 MLX5_SET(query_vnic_env_in, in, other_vport, 0); 819 820 if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0) 821 return; 822 823 priv->stats.vport.rx_steer_missed_packets = 824 MLX5_GET64(query_vnic_env_out, out, 825 vport_env.nic_receive_steering_discard); 826} 827 828/* 829 * This function is called regularly to collect all statistics 830 * counters from the firmware. The values can be viewed through the 831 * sysctl interface. Execution is serialized using the priv's global 832 * configuration lock. 833 */ 834static void 835mlx5e_update_stats_locked(struct mlx5e_priv *priv) 836{ 837 struct mlx5_core_dev *mdev = priv->mdev; 838 struct mlx5e_vport_stats *s = &priv->stats.vport; 839 struct mlx5e_sq_stats *sq_stats; 840#if (__FreeBSD_version < 1100000) 841 struct ifnet *ifp = priv->ifp; 842#endif 843 844 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; 845 u32 *out; 846 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 847 u64 tso_packets = 0; 848 u64 tso_bytes = 0; 849 u64 tx_queue_dropped = 0; 850 u64 tx_defragged = 0; 851 u64 tx_offload_none = 0; 852 u64 lro_packets = 0; 853 u64 lro_bytes = 0; 854 u64 sw_lro_queued = 0; 855 u64 sw_lro_flushed = 0; 856 u64 rx_csum_none = 0; 857 u64 rx_wqe_err = 0; 858 u64 rx_packets = 0; 859 u64 rx_bytes = 0; 860 u32 rx_out_of_buffer = 0; 861 int error; 862 int i; 863 int j; 864 865 out = mlx5_vzalloc(outlen); 866 if (out == NULL) 867 goto free_out; 868 869 /* Collect firts the SW counters and then HW for consistency */ 870 for (i = 0; i < priv->params.num_channels; i++) { 871 struct mlx5e_channel *pch = priv->channel + i; 872 struct mlx5e_rq *rq = &pch->rq; 873 struct mlx5e_rq_stats *rq_stats = &pch->rq.stats; 874 875 /* collect stats from LRO */ 876 rq_stats->sw_lro_queued = rq->lro.lro_queued; 877 rq_stats->sw_lro_flushed = rq->lro.lro_flushed; 878 sw_lro_queued += rq_stats->sw_lro_queued; 879 sw_lro_flushed += rq_stats->sw_lro_flushed; 880 lro_packets += rq_stats->lro_packets; 881 lro_bytes += rq_stats->lro_bytes; 882 rx_csum_none += rq_stats->csum_none; 883 rx_wqe_err += rq_stats->wqe_err; 884 rx_packets += rq_stats->packets; 885 rx_bytes += rq_stats->bytes; 886 887 for (j = 0; j < priv->num_tc; j++) { 888 sq_stats = &pch->sq[j].stats; 889 890 tso_packets += sq_stats->tso_packets; 891 tso_bytes += sq_stats->tso_bytes; 892 tx_queue_dropped += sq_stats->dropped; 893 tx_queue_dropped += sq_stats->enobuf; 894 tx_defragged += sq_stats->defragged; 895 tx_offload_none += sq_stats->csum_offload_none; 896 } 897 } 898 899 /* update counters */ 900 s->tso_packets = tso_packets; 901 s->tso_bytes = tso_bytes; 902 s->tx_queue_dropped = tx_queue_dropped; 903 s->tx_defragged = tx_defragged; 904 s->lro_packets = lro_packets; 905 s->lro_bytes = lro_bytes; 906 s->sw_lro_queued = sw_lro_queued; 907 s->sw_lro_flushed = sw_lro_flushed; 908 s->rx_csum_none = rx_csum_none; 909 s->rx_wqe_err = rx_wqe_err; 910 s->rx_packets = rx_packets; 911 s->rx_bytes = rx_bytes; 912 913 mlx5e_grp_vnic_env_update_stats(priv); 914 915 /* HW counters */ 916 memset(in, 0, sizeof(in)); 917 918 MLX5_SET(query_vport_counter_in, in, opcode, 919 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 920 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 921 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 922 923 memset(out, 0, outlen); 924 925 /* get number of out-of-buffer drops first */ 926 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && 927 mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, 928 &rx_out_of_buffer) == 0) { 929 s->rx_out_of_buffer = rx_out_of_buffer; 930 } 931 932 /* get port statistics */ 933 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) { 934#define MLX5_GET_CTR(out, x) \ 935 MLX5_GET64(query_vport_counter_out, out, x) 936 937 s->rx_error_packets = 938 MLX5_GET_CTR(out, received_errors.packets); 939 s->rx_error_bytes = 940 MLX5_GET_CTR(out, received_errors.octets); 941 s->tx_error_packets = 942 MLX5_GET_CTR(out, transmit_errors.packets); 943 s->tx_error_bytes = 944 MLX5_GET_CTR(out, transmit_errors.octets); 945 946 s->rx_unicast_packets = 947 MLX5_GET_CTR(out, received_eth_unicast.packets); 948 s->rx_unicast_bytes = 949 MLX5_GET_CTR(out, received_eth_unicast.octets); 950 s->tx_unicast_packets = 951 MLX5_GET_CTR(out, transmitted_eth_unicast.packets); 952 s->tx_unicast_bytes = 953 MLX5_GET_CTR(out, transmitted_eth_unicast.octets); 954 955 s->rx_multicast_packets = 956 MLX5_GET_CTR(out, received_eth_multicast.packets); 957 s->rx_multicast_bytes = 958 MLX5_GET_CTR(out, received_eth_multicast.octets); 959 s->tx_multicast_packets = 960 MLX5_GET_CTR(out, transmitted_eth_multicast.packets); 961 s->tx_multicast_bytes = 962 MLX5_GET_CTR(out, transmitted_eth_multicast.octets); 963 964 s->rx_broadcast_packets = 965 MLX5_GET_CTR(out, received_eth_broadcast.packets); 966 s->rx_broadcast_bytes = 967 MLX5_GET_CTR(out, received_eth_broadcast.octets); 968 s->tx_broadcast_packets = 969 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 970 s->tx_broadcast_bytes = 971 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 972 973 s->tx_packets = s->tx_unicast_packets + 974 s->tx_multicast_packets + s->tx_broadcast_packets; 975 s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes + 976 s->tx_broadcast_bytes; 977 978 /* Update calculated offload counters */ 979 s->tx_csum_offload = s->tx_packets - tx_offload_none; 980 s->rx_csum_good = s->rx_packets - s->rx_csum_none; 981 } 982 983 /* Get physical port counters */ 984 mlx5e_update_pport_counters(priv); 985 986 s->tx_jumbo_packets = 987 priv->stats.port_stats_debug.tx_stat_p1519to2047octets + 988 priv->stats.port_stats_debug.tx_stat_p2048to4095octets + 989 priv->stats.port_stats_debug.tx_stat_p4096to8191octets + 990 priv->stats.port_stats_debug.tx_stat_p8192to10239octets; 991 992#if (__FreeBSD_version < 1100000) 993 /* no get_counters interface in fbsd 10 */ 994 ifp->if_ipackets = s->rx_packets; 995 ifp->if_ierrors = priv->stats.pport.in_range_len_errors + 996 priv->stats.pport.out_of_range_len + 997 priv->stats.pport.too_long_errors + 998 priv->stats.pport.check_seq_err + 999 priv->stats.pport.alignment_err; 1000 ifp->if_iqdrops = s->rx_out_of_buffer; 1001 ifp->if_opackets = s->tx_packets; 1002 ifp->if_oerrors = priv->stats.port_stats_debug.out_discards; 1003 ifp->if_snd.ifq_drops = s->tx_queue_dropped; 1004 ifp->if_ibytes = s->rx_bytes; 1005 ifp->if_obytes = s->tx_bytes; 1006 ifp->if_collisions = 1007 priv->stats.pport.collisions; 1008#endif 1009 1010free_out: 1011 kvfree(out); 1012 1013 /* Update diagnostics, if any */ 1014 if (priv->params_ethtool.diag_pci_enable || 1015 priv->params_ethtool.diag_general_enable) { 1016 error = mlx5_core_get_diagnostics_full(mdev, 1017 priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, 1018 priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); 1019 if (error != 0) 1020 mlx5_en_err(priv->ifp, 1021 "Failed reading diagnostics: %d\n", error); 1022 } 1023 1024 /* Update FEC, if any */ 1025 error = mlx5e_fec_update(priv); 1026 if (error != 0 && error != EOPNOTSUPP) { 1027 mlx5_en_err(priv->ifp, 1028 "Updating FEC failed: %d\n", error); 1029 } 1030 1031 /* Update temperature, if any */ 1032 if (priv->params_ethtool.hw_num_temp != 0) { 1033 error = mlx5e_hw_temperature_update(priv); 1034 if (error != 0 && error != EOPNOTSUPP) { 1035 mlx5_en_err(priv->ifp, 1036 "Updating temperature failed: %d\n", error); 1037 } 1038 } 1039} 1040 1041static void 1042mlx5e_update_stats_work(struct work_struct *work) 1043{ 1044 struct mlx5e_priv *priv; 1045 1046 priv = container_of(work, struct mlx5e_priv, update_stats_work); 1047 PRIV_LOCK(priv); 1048 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && 1049 !test_bit(MLX5_INTERFACE_STATE_TEARDOWN, &priv->mdev->intf_state)) 1050 mlx5e_update_stats_locked(priv); 1051 PRIV_UNLOCK(priv); 1052} 1053 1054static void 1055mlx5e_update_stats(void *arg) 1056{ 1057 struct mlx5e_priv *priv = arg; 1058 1059 queue_work(priv->wq, &priv->update_stats_work); 1060 1061 callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv); 1062} 1063 1064static void 1065mlx5e_async_event_sub(struct mlx5e_priv *priv, 1066 enum mlx5_dev_event event) 1067{ 1068 switch (event) { 1069 case MLX5_DEV_EVENT_PORT_UP: 1070 case MLX5_DEV_EVENT_PORT_DOWN: 1071 queue_work(priv->wq, &priv->update_carrier_work); 1072 break; 1073 1074 default: 1075 break; 1076 } 1077} 1078 1079static void 1080mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, 1081 enum mlx5_dev_event event, unsigned long param) 1082{ 1083 struct mlx5e_priv *priv = vpriv; 1084 1085 mtx_lock(&priv->async_events_mtx); 1086 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) 1087 mlx5e_async_event_sub(priv, event); 1088 mtx_unlock(&priv->async_events_mtx); 1089} 1090 1091static void 1092mlx5e_enable_async_events(struct mlx5e_priv *priv) 1093{ 1094 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1095} 1096 1097static void 1098mlx5e_disable_async_events(struct mlx5e_priv *priv) 1099{ 1100 mtx_lock(&priv->async_events_mtx); 1101 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 1102 mtx_unlock(&priv->async_events_mtx); 1103} 1104 1105static const char *mlx5e_rq_stats_desc[] = { 1106 MLX5E_RQ_STATS(MLX5E_STATS_DESC) 1107}; 1108 1109static int 1110mlx5e_create_rq(struct mlx5e_channel *c, 1111 struct mlx5e_rq_param *param, 1112 struct mlx5e_rq *rq) 1113{ 1114 struct mlx5e_priv *priv = c->priv; 1115 struct mlx5_core_dev *mdev = priv->mdev; 1116 char buffer[16]; 1117 void *rqc = param->rqc; 1118 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); 1119 int wq_sz; 1120 int err; 1121 int i; 1122 u32 nsegs, wqe_sz; 1123 1124 err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 1125 if (err != 0) 1126 goto done; 1127 1128 /* Create DMA descriptor TAG */ 1129 if ((err = -bus_dma_tag_create( 1130 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1131 1, /* any alignment */ 1132 0, /* no boundary */ 1133 BUS_SPACE_MAXADDR, /* lowaddr */ 1134 BUS_SPACE_MAXADDR, /* highaddr */ 1135 NULL, NULL, /* filter, filterarg */ 1136 nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */ 1137 nsegs, /* nsegments */ 1138 nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */ 1139 0, /* flags */ 1140 NULL, NULL, /* lockfunc, lockfuncarg */ 1141 &rq->dma_tag))) 1142 goto done; 1143 1144 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, 1145 &rq->wq_ctrl); 1146 if (err) 1147 goto err_free_dma_tag; 1148 1149 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; 1150 1151 err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs); 1152 if (err != 0) 1153 goto err_rq_wq_destroy; 1154 1155 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1156 1157 err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz); 1158 if (err) 1159 goto err_rq_wq_destroy; 1160 1161 rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1162 for (i = 0; i != wq_sz; i++) { 1163 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); 1164 int j; 1165 1166 err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map); 1167 if (err != 0) { 1168 while (i--) 1169 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1170 goto err_rq_mbuf_free; 1171 } 1172 1173 /* set value for constant fields */ 1174 for (j = 0; j < rq->nsegs; j++) 1175 wqe->data[j].lkey = cpu_to_be32(priv->mr.key); 1176 } 1177 1178 INIT_WORK(&rq->dim.work, mlx5e_dim_work); 1179 if (priv->params.rx_cq_moderation_mode < 2) { 1180 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1181 } else { 1182 void *cqc = container_of(param, 1183 struct mlx5e_channel_param, rq)->rx_cq.cqc; 1184 1185 switch (MLX5_GET(cqc, cqc, cq_period_mode)) { 1186 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: 1187 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1188 break; 1189 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: 1190 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 1191 break; 1192 default: 1193 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 1194 break; 1195 } 1196 } 1197 1198 rq->ifp = c->ifp; 1199 rq->channel = c; 1200 rq->ix = c->ix; 1201 1202 snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix); 1203 mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1204 buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM, 1205 rq->stats.arg); 1206 return (0); 1207 1208err_rq_mbuf_free: 1209 free(rq->mbuf, M_MLX5EN); 1210 tcp_lro_free(&rq->lro); 1211err_rq_wq_destroy: 1212 mlx5_wq_destroy(&rq->wq_ctrl); 1213err_free_dma_tag: 1214 bus_dma_tag_destroy(rq->dma_tag); 1215done: 1216 return (err); 1217} 1218 1219static void 1220mlx5e_destroy_rq(struct mlx5e_rq *rq) 1221{ 1222 int wq_sz; 1223 int i; 1224 1225 /* destroy all sysctl nodes */ 1226 sysctl_ctx_free(&rq->stats.ctx); 1227 1228 /* free leftover LRO packets, if any */ 1229 tcp_lro_free(&rq->lro); 1230 1231 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 1232 for (i = 0; i != wq_sz; i++) { 1233 if (rq->mbuf[i].mbuf != NULL) { 1234 bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map); 1235 m_freem(rq->mbuf[i].mbuf); 1236 } 1237 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 1238 } 1239 free(rq->mbuf, M_MLX5EN); 1240 mlx5_wq_destroy(&rq->wq_ctrl); 1241 bus_dma_tag_destroy(rq->dma_tag); 1242} 1243 1244static int 1245mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) 1246{ 1247 struct mlx5e_channel *c = rq->channel; 1248 struct mlx5e_priv *priv = c->priv; 1249 struct mlx5_core_dev *mdev = priv->mdev; 1250 1251 void *in; 1252 void *rqc; 1253 void *wq; 1254 int inlen; 1255 int err; 1256 1257 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 1258 sizeof(u64) * rq->wq_ctrl.buf.npages; 1259 in = mlx5_vzalloc(inlen); 1260 if (in == NULL) 1261 return (-ENOMEM); 1262 1263 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1264 wq = MLX5_ADDR_OF(rqc, rqc, wq); 1265 1266 memcpy(rqc, param->rqc, sizeof(param->rqc)); 1267 1268 MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); 1269 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 1270 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 1271 if (priv->counter_set_id >= 0) 1272 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); 1273 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 1274 PAGE_SHIFT); 1275 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); 1276 1277 mlx5_fill_page_array(&rq->wq_ctrl.buf, 1278 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1279 1280 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); 1281 1282 kvfree(in); 1283 1284 return (err); 1285} 1286 1287static int 1288mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) 1289{ 1290 struct mlx5e_channel *c = rq->channel; 1291 struct mlx5e_priv *priv = c->priv; 1292 struct mlx5_core_dev *mdev = priv->mdev; 1293 1294 void *in; 1295 void *rqc; 1296 int inlen; 1297 int err; 1298 1299 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 1300 in = mlx5_vzalloc(inlen); 1301 if (in == NULL) 1302 return (-ENOMEM); 1303 1304 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 1305 1306 MLX5_SET(modify_rq_in, in, rqn, rq->rqn); 1307 MLX5_SET(modify_rq_in, in, rq_state, curr_state); 1308 MLX5_SET(rqc, rqc, state, next_state); 1309 1310 err = mlx5_core_modify_rq(mdev, in, inlen); 1311 1312 kvfree(in); 1313 1314 return (err); 1315} 1316 1317static void 1318mlx5e_disable_rq(struct mlx5e_rq *rq) 1319{ 1320 struct mlx5e_channel *c = rq->channel; 1321 struct mlx5e_priv *priv = c->priv; 1322 struct mlx5_core_dev *mdev = priv->mdev; 1323 1324 mlx5_core_destroy_rq(mdev, rq->rqn); 1325} 1326 1327static int 1328mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) 1329{ 1330 struct mlx5e_channel *c = rq->channel; 1331 struct mlx5e_priv *priv = c->priv; 1332 struct mlx5_wq_ll *wq = &rq->wq; 1333 int i; 1334 1335 for (i = 0; i < 1000; i++) { 1336 if (wq->cur_sz >= priv->params.min_rx_wqes) 1337 return (0); 1338 1339 msleep(4); 1340 } 1341 return (-ETIMEDOUT); 1342} 1343 1344static int 1345mlx5e_open_rq(struct mlx5e_channel *c, 1346 struct mlx5e_rq_param *param, 1347 struct mlx5e_rq *rq) 1348{ 1349 int err; 1350 1351 err = mlx5e_create_rq(c, param, rq); 1352 if (err) 1353 return (err); 1354 1355 err = mlx5e_enable_rq(rq, param); 1356 if (err) 1357 goto err_destroy_rq; 1358 1359 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 1360 if (err) 1361 goto err_disable_rq; 1362 1363 c->rq.enabled = 1; 1364 1365 return (0); 1366 1367err_disable_rq: 1368 mlx5e_disable_rq(rq); 1369err_destroy_rq: 1370 mlx5e_destroy_rq(rq); 1371 1372 return (err); 1373} 1374 1375static void 1376mlx5e_close_rq(struct mlx5e_rq *rq) 1377{ 1378 mtx_lock(&rq->mtx); 1379 rq->enabled = 0; 1380 callout_stop(&rq->watchdog); 1381 mtx_unlock(&rq->mtx); 1382 1383 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 1384} 1385 1386static void 1387mlx5e_close_rq_wait(struct mlx5e_rq *rq) 1388{ 1389 1390 mlx5e_disable_rq(rq); 1391 mlx5e_close_cq(&rq->cq); 1392 cancel_work_sync(&rq->dim.work); 1393 mlx5e_destroy_rq(rq); 1394} 1395 1396void 1397mlx5e_free_sq_db(struct mlx5e_sq *sq) 1398{ 1399 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1400 int x; 1401 1402 for (x = 0; x != wq_sz; x++) { 1403 if (sq->mbuf[x].mbuf != NULL) { 1404 bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map); 1405 m_freem(sq->mbuf[x].mbuf); 1406 } 1407 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1408 } 1409 free(sq->mbuf, M_MLX5EN); 1410} 1411 1412int 1413mlx5e_alloc_sq_db(struct mlx5e_sq *sq) 1414{ 1415 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1416 int err; 1417 int x; 1418 1419 sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 1420 1421 /* Create DMA descriptor MAPs */ 1422 for (x = 0; x != wq_sz; x++) { 1423 err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map); 1424 if (err != 0) { 1425 while (x--) 1426 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1427 free(sq->mbuf, M_MLX5EN); 1428 return (err); 1429 } 1430 } 1431 return (0); 1432} 1433 1434static const char *mlx5e_sq_stats_desc[] = { 1435 MLX5E_SQ_STATS(MLX5E_STATS_DESC) 1436}; 1437 1438void 1439mlx5e_update_sq_inline(struct mlx5e_sq *sq) 1440{ 1441 sq->max_inline = sq->priv->params.tx_max_inline; 1442 sq->min_inline_mode = sq->priv->params.tx_min_inline_mode; 1443 1444 /* 1445 * Check if trust state is DSCP or if inline mode is NONE which 1446 * indicates CX-5 or newer hardware. 1447 */ 1448 if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP || 1449 sq->min_inline_mode == MLX5_INLINE_MODE_NONE) { 1450 if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert)) 1451 sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN; 1452 else 1453 sq->min_insert_caps = MLX5E_INSERT_NON_VLAN; 1454 } else { 1455 sq->min_insert_caps = 0; 1456 } 1457} 1458 1459static void 1460mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 1461{ 1462 int i; 1463 1464 for (i = 0; i != priv->num_tc; i++) { 1465 mtx_lock(&c->sq[i].lock); 1466 mlx5e_update_sq_inline(&c->sq[i]); 1467 mtx_unlock(&c->sq[i].lock); 1468 } 1469} 1470 1471void 1472mlx5e_refresh_sq_inline(struct mlx5e_priv *priv) 1473{ 1474 int i; 1475 1476 /* check if channels are closed */ 1477 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 1478 return; 1479 1480 for (i = 0; i < priv->params.num_channels; i++) 1481 mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]); 1482} 1483 1484static int 1485mlx5e_create_sq(struct mlx5e_channel *c, 1486 int tc, 1487 struct mlx5e_sq_param *param, 1488 struct mlx5e_sq *sq) 1489{ 1490 struct mlx5e_priv *priv = c->priv; 1491 struct mlx5_core_dev *mdev = priv->mdev; 1492 char buffer[16]; 1493 void *sqc = param->sqc; 1494 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); 1495 int err; 1496 1497 /* Create DMA descriptor TAG */ 1498 if ((err = -bus_dma_tag_create( 1499 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1500 1, /* any alignment */ 1501 0, /* no boundary */ 1502 BUS_SPACE_MAXADDR, /* lowaddr */ 1503 BUS_SPACE_MAXADDR, /* highaddr */ 1504 NULL, NULL, /* filter, filterarg */ 1505 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ 1506 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ 1507 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 1508 0, /* flags */ 1509 NULL, NULL, /* lockfunc, lockfuncarg */ 1510 &sq->dma_tag))) 1511 goto done; 1512 1513 err = mlx5_alloc_map_uar(mdev, &sq->uar); 1514 if (err) 1515 goto err_free_dma_tag; 1516 1517 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, 1518 &sq->wq_ctrl); 1519 if (err) 1520 goto err_unmap_free_uar; 1521 1522 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1523 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; 1524 1525 err = mlx5e_alloc_sq_db(sq); 1526 if (err) 1527 goto err_sq_wq_destroy; 1528 1529 sq->mkey_be = cpu_to_be32(priv->mr.key); 1530 sq->ifp = priv->ifp; 1531 sq->priv = priv; 1532 sq->tc = tc; 1533 1534 mlx5e_update_sq_inline(sq); 1535 1536 snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc); 1537 mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1538 buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM, 1539 sq->stats.arg); 1540 1541 return (0); 1542 1543err_sq_wq_destroy: 1544 mlx5_wq_destroy(&sq->wq_ctrl); 1545 1546err_unmap_free_uar: 1547 mlx5_unmap_free_uar(mdev, &sq->uar); 1548 1549err_free_dma_tag: 1550 bus_dma_tag_destroy(sq->dma_tag); 1551done: 1552 return (err); 1553} 1554 1555static void 1556mlx5e_destroy_sq(struct mlx5e_sq *sq) 1557{ 1558 /* destroy all sysctl nodes */ 1559 sysctl_ctx_free(&sq->stats.ctx); 1560 1561 mlx5e_free_sq_db(sq); 1562 mlx5_wq_destroy(&sq->wq_ctrl); 1563 mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar); 1564 bus_dma_tag_destroy(sq->dma_tag); 1565} 1566 1567int 1568mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param, 1569 int tis_num) 1570{ 1571 void *in; 1572 void *sqc; 1573 void *wq; 1574 int inlen; 1575 int err; 1576 1577 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1578 sizeof(u64) * sq->wq_ctrl.buf.npages; 1579 in = mlx5_vzalloc(inlen); 1580 if (in == NULL) 1581 return (-ENOMEM); 1582 1583 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 1584 wq = MLX5_ADDR_OF(sqc, sqc, wq); 1585 1586 memcpy(sqc, param->sqc, sizeof(param->sqc)); 1587 1588 MLX5_SET(sqc, sqc, tis_num_0, tis_num); 1589 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); 1590 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 1591 MLX5_SET(sqc, sqc, tis_lst_sz, 1); 1592 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1593 1594 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1595 MLX5_SET(wq, wq, uar_page, sq->uar.index); 1596 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - 1597 PAGE_SHIFT); 1598 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); 1599 1600 mlx5_fill_page_array(&sq->wq_ctrl.buf, 1601 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1602 1603 err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); 1604 1605 kvfree(in); 1606 1607 return (err); 1608} 1609 1610int 1611mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) 1612{ 1613 void *in; 1614 void *sqc; 1615 int inlen; 1616 int err; 1617 1618 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 1619 in = mlx5_vzalloc(inlen); 1620 if (in == NULL) 1621 return (-ENOMEM); 1622 1623 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1624 1625 MLX5_SET(modify_sq_in, in, sqn, sq->sqn); 1626 MLX5_SET(modify_sq_in, in, sq_state, curr_state); 1627 MLX5_SET(sqc, sqc, state, next_state); 1628 1629 err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); 1630 1631 kvfree(in); 1632 1633 return (err); 1634} 1635 1636void 1637mlx5e_disable_sq(struct mlx5e_sq *sq) 1638{ 1639 1640 mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); 1641} 1642 1643static int 1644mlx5e_open_sq(struct mlx5e_channel *c, 1645 int tc, 1646 struct mlx5e_sq_param *param, 1647 struct mlx5e_sq *sq) 1648{ 1649 int err; 1650 1651 sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; 1652 1653 /* ensure the TX completion event factor is not zero */ 1654 if (sq->cev_factor == 0) 1655 sq->cev_factor = 1; 1656 1657 err = mlx5e_create_sq(c, tc, param, sq); 1658 if (err) 1659 return (err); 1660 1661 err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]); 1662 if (err) 1663 goto err_destroy_sq; 1664 1665 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); 1666 if (err) 1667 goto err_disable_sq; 1668 1669 WRITE_ONCE(sq->running, 1); 1670 1671 return (0); 1672 1673err_disable_sq: 1674 mlx5e_disable_sq(sq); 1675err_destroy_sq: 1676 mlx5e_destroy_sq(sq); 1677 1678 return (err); 1679} 1680 1681static void 1682mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep) 1683{ 1684 /* fill up remainder with NOPs */ 1685 while (sq->cev_counter != 0) { 1686 while (!mlx5e_sq_has_room_for(sq, 1)) { 1687 if (can_sleep != 0) { 1688 mtx_unlock(&sq->lock); 1689 msleep(4); 1690 mtx_lock(&sq->lock); 1691 } else { 1692 goto done; 1693 } 1694 } 1695 /* send a single NOP */ 1696 mlx5e_send_nop(sq, 1); 1697 atomic_thread_fence_rel(); 1698 } 1699done: 1700 /* Check if we need to write the doorbell */ 1701 if (likely(sq->doorbell.d64 != 0)) { 1702 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 1703 sq->doorbell.d64 = 0; 1704 } 1705} 1706 1707void 1708mlx5e_sq_cev_timeout(void *arg) 1709{ 1710 struct mlx5e_sq *sq = arg; 1711 1712 mtx_assert(&sq->lock, MA_OWNED); 1713 1714 /* check next state */ 1715 switch (sq->cev_next_state) { 1716 case MLX5E_CEV_STATE_SEND_NOPS: 1717 /* fill TX ring with NOPs, if any */ 1718 mlx5e_sq_send_nops_locked(sq, 0); 1719 1720 /* check if completed */ 1721 if (sq->cev_counter == 0) { 1722 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 1723 return; 1724 } 1725 break; 1726 default: 1727 /* send NOPs on next timeout */ 1728 sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS; 1729 break; 1730 } 1731 1732 /* restart timer */ 1733 callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq); 1734} 1735 1736void 1737mlx5e_drain_sq(struct mlx5e_sq *sq) 1738{ 1739 int error; 1740 struct mlx5_core_dev *mdev= sq->priv->mdev; 1741 1742 /* 1743 * Check if already stopped. 1744 * 1745 * NOTE: Serialization of this function is managed by the 1746 * caller ensuring the priv's state lock is locked or in case 1747 * of rate limit support, a single thread manages drain and 1748 * resume of SQs. The "running" variable can therefore safely 1749 * be read without any locks. 1750 */ 1751 if (READ_ONCE(sq->running) == 0) 1752 return; 1753 1754 /* don't put more packets into the SQ */ 1755 WRITE_ONCE(sq->running, 0); 1756 1757 /* serialize access to DMA rings */ 1758 mtx_lock(&sq->lock); 1759 1760 /* teardown event factor timer, if any */ 1761 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 1762 callout_stop(&sq->cev_callout); 1763 1764 /* send dummy NOPs in order to flush the transmit ring */ 1765 mlx5e_sq_send_nops_locked(sq, 1); 1766 mtx_unlock(&sq->lock); 1767 1768 /* wait till SQ is empty or link is down */ 1769 mtx_lock(&sq->lock); 1770 while (sq->cc != sq->pc && 1771 (sq->priv->media_status_last & IFM_ACTIVE) != 0 && 1772 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1773 mtx_unlock(&sq->lock); 1774 msleep(1); 1775 sq->cq.mcq.comp(&sq->cq.mcq); 1776 mtx_lock(&sq->lock); 1777 } 1778 mtx_unlock(&sq->lock); 1779 1780 /* error out remaining requests */ 1781 error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 1782 if (error != 0) { 1783 mlx5_en_err(sq->ifp, 1784 "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); 1785 } 1786 1787 /* wait till SQ is empty */ 1788 mtx_lock(&sq->lock); 1789 while (sq->cc != sq->pc && 1790 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1791 mtx_unlock(&sq->lock); 1792 msleep(1); 1793 sq->cq.mcq.comp(&sq->cq.mcq); 1794 mtx_lock(&sq->lock); 1795 } 1796 mtx_unlock(&sq->lock); 1797} 1798 1799static void 1800mlx5e_close_sq_wait(struct mlx5e_sq *sq) 1801{ 1802 1803 mlx5e_drain_sq(sq); 1804 mlx5e_disable_sq(sq); 1805 mlx5e_destroy_sq(sq); 1806} 1807 1808static int 1809mlx5e_create_cq(struct mlx5e_priv *priv, 1810 struct mlx5e_cq_param *param, 1811 struct mlx5e_cq *cq, 1812 mlx5e_cq_comp_t *comp, 1813 int eq_ix) 1814{ 1815 struct mlx5_core_dev *mdev = priv->mdev; 1816 struct mlx5_core_cq *mcq = &cq->mcq; 1817 int eqn_not_used; 1818 int irqn; 1819 int err; 1820 u32 i; 1821 1822 param->wq.buf_numa_node = 0; 1823 param->wq.db_numa_node = 0; 1824 1825 err = mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn); 1826 if (err) 1827 return (err); 1828 1829 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, 1830 &cq->wq_ctrl); 1831 if (err) 1832 return (err); 1833 1834 mcq->cqe_sz = 64; 1835 mcq->set_ci_db = cq->wq_ctrl.db.db; 1836 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1837 *mcq->set_ci_db = 0; 1838 *mcq->arm_db = 0; 1839 mcq->vector = eq_ix; 1840 mcq->comp = comp; 1841 mcq->event = mlx5e_cq_error_event; 1842 mcq->irqn = irqn; 1843 mcq->uar = &priv->cq_uar; 1844 1845 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 1846 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); 1847 1848 cqe->op_own = 0xf1; 1849 } 1850 1851 cq->priv = priv; 1852 1853 return (0); 1854} 1855 1856static void 1857mlx5e_destroy_cq(struct mlx5e_cq *cq) 1858{ 1859 mlx5_wq_destroy(&cq->wq_ctrl); 1860} 1861 1862static int 1863mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix) 1864{ 1865 struct mlx5_core_cq *mcq = &cq->mcq; 1866 void *in; 1867 void *cqc; 1868 int inlen; 1869 int irqn_not_used; 1870 int eqn; 1871 int err; 1872 1873 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1874 sizeof(u64) * cq->wq_ctrl.buf.npages; 1875 in = mlx5_vzalloc(inlen); 1876 if (in == NULL) 1877 return (-ENOMEM); 1878 1879 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1880 1881 memcpy(cqc, param->cqc, sizeof(param->cqc)); 1882 1883 mlx5_fill_page_array(&cq->wq_ctrl.buf, 1884 (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas)); 1885 1886 mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); 1887 1888 MLX5_SET(cqc, cqc, c_eqn, eqn); 1889 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); 1890 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - 1891 PAGE_SHIFT); 1892 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); 1893 1894 err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen); 1895 1896 kvfree(in); 1897 1898 if (err) 1899 return (err); 1900 1901 mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); 1902 1903 return (0); 1904} 1905 1906static void 1907mlx5e_disable_cq(struct mlx5e_cq *cq) 1908{ 1909 1910 mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); 1911} 1912 1913int 1914mlx5e_open_cq(struct mlx5e_priv *priv, 1915 struct mlx5e_cq_param *param, 1916 struct mlx5e_cq *cq, 1917 mlx5e_cq_comp_t *comp, 1918 int eq_ix) 1919{ 1920 int err; 1921 1922 err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); 1923 if (err) 1924 return (err); 1925 1926 err = mlx5e_enable_cq(cq, param, eq_ix); 1927 if (err) 1928 goto err_destroy_cq; 1929 1930 return (0); 1931 1932err_destroy_cq: 1933 mlx5e_destroy_cq(cq); 1934 1935 return (err); 1936} 1937 1938void 1939mlx5e_close_cq(struct mlx5e_cq *cq) 1940{ 1941 mlx5e_disable_cq(cq); 1942 mlx5e_destroy_cq(cq); 1943} 1944 1945static int 1946mlx5e_open_tx_cqs(struct mlx5e_channel *c, 1947 struct mlx5e_channel_param *cparam) 1948{ 1949 int err; 1950 int tc; 1951 1952 for (tc = 0; tc < c->priv->num_tc; tc++) { 1953 /* open completion queue */ 1954 err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, 1955 &mlx5e_tx_cq_comp, c->ix); 1956 if (err) 1957 goto err_close_tx_cqs; 1958 } 1959 return (0); 1960 1961err_close_tx_cqs: 1962 for (tc--; tc >= 0; tc--) 1963 mlx5e_close_cq(&c->sq[tc].cq); 1964 1965 return (err); 1966} 1967 1968static void 1969mlx5e_close_tx_cqs(struct mlx5e_channel *c) 1970{ 1971 int tc; 1972 1973 for (tc = 0; tc < c->priv->num_tc; tc++) 1974 mlx5e_close_cq(&c->sq[tc].cq); 1975} 1976 1977static int 1978mlx5e_open_sqs(struct mlx5e_channel *c, 1979 struct mlx5e_channel_param *cparam) 1980{ 1981 int err; 1982 int tc; 1983 1984 for (tc = 0; tc < c->priv->num_tc; tc++) { 1985 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); 1986 if (err) 1987 goto err_close_sqs; 1988 } 1989 1990 return (0); 1991 1992err_close_sqs: 1993 for (tc--; tc >= 0; tc--) 1994 mlx5e_close_sq_wait(&c->sq[tc]); 1995 1996 return (err); 1997} 1998 1999static void 2000mlx5e_close_sqs_wait(struct mlx5e_channel *c) 2001{ 2002 int tc; 2003 2004 for (tc = 0; tc < c->priv->num_tc; tc++) 2005 mlx5e_close_sq_wait(&c->sq[tc]); 2006} 2007 2008static void 2009mlx5e_chan_static_init(struct mlx5e_priv *priv, struct mlx5e_channel *c, int ix) 2010{ 2011 int tc; 2012 2013 /* setup priv and channel number */ 2014 c->priv = priv; 2015 c->ix = ix; 2016 c->ifp = priv->ifp; 2017 2018 mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF); 2019 2020 callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0); 2021 2022 for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) { 2023 struct mlx5e_sq *sq = c->sq + tc; 2024 2025 mtx_init(&sq->lock, "mlx5tx", 2026 MTX_NETWORK_LOCK " TX", MTX_DEF); 2027 mtx_init(&sq->comp_lock, "mlx5comp", 2028 MTX_NETWORK_LOCK " TX", MTX_DEF); 2029 2030 callout_init_mtx(&sq->cev_callout, &sq->lock, 0); 2031 } 2032} 2033 2034static void 2035mlx5e_chan_static_destroy(struct mlx5e_channel *c) 2036{ 2037 int tc; 2038 2039 callout_drain(&c->rq.watchdog); 2040 2041 mtx_destroy(&c->rq.mtx); 2042 2043 for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) { 2044 callout_drain(&c->sq[tc].cev_callout); 2045 mtx_destroy(&c->sq[tc].lock); 2046 mtx_destroy(&c->sq[tc].comp_lock); 2047 } 2048} 2049 2050static int 2051mlx5e_open_channel(struct mlx5e_priv *priv, 2052 struct mlx5e_channel_param *cparam, 2053 struct mlx5e_channel *c) 2054{ 2055 int i, err; 2056 2057 /* zero non-persistant data */ 2058 MLX5E_ZERO(&c->rq, mlx5e_rq_zero_start); 2059 for (i = 0; i != priv->num_tc; i++) 2060 MLX5E_ZERO(&c->sq[i], mlx5e_sq_zero_start); 2061 2062 /* open transmit completion queue */ 2063 err = mlx5e_open_tx_cqs(c, cparam); 2064 if (err) 2065 goto err_free; 2066 2067 /* open receive completion queue */ 2068 err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, 2069 &mlx5e_rx_cq_comp, c->ix); 2070 if (err) 2071 goto err_close_tx_cqs; 2072 2073 err = mlx5e_open_sqs(c, cparam); 2074 if (err) 2075 goto err_close_rx_cq; 2076 2077 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 2078 if (err) 2079 goto err_close_sqs; 2080 2081 /* poll receive queue initially */ 2082 c->rq.cq.mcq.comp(&c->rq.cq.mcq); 2083 2084 return (0); 2085 2086err_close_sqs: 2087 mlx5e_close_sqs_wait(c); 2088 2089err_close_rx_cq: 2090 mlx5e_close_cq(&c->rq.cq); 2091 2092err_close_tx_cqs: 2093 mlx5e_close_tx_cqs(c); 2094 2095err_free: 2096 return (err); 2097} 2098 2099static void 2100mlx5e_close_channel(struct mlx5e_channel *c) 2101{ 2102 mlx5e_close_rq(&c->rq); 2103} 2104 2105static void 2106mlx5e_close_channel_wait(struct mlx5e_channel *c) 2107{ 2108 mlx5e_close_rq_wait(&c->rq); 2109 mlx5e_close_sqs_wait(c); 2110 mlx5e_close_tx_cqs(c); 2111} 2112 2113static int 2114mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs) 2115{ 2116 u32 r, n; 2117 2118 r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz : 2119 MLX5E_SW2MB_MTU(priv->ifp->if_mtu); 2120 if (r > MJUM16BYTES) 2121 return (-ENOMEM); 2122 2123 if (r > MJUM9BYTES) 2124 r = MJUM16BYTES; 2125 else if (r > MJUMPAGESIZE) 2126 r = MJUM9BYTES; 2127 else if (r > MCLBYTES) 2128 r = MJUMPAGESIZE; 2129 else 2130 r = MCLBYTES; 2131 2132 /* 2133 * n + 1 must be a power of two, because stride size must be. 2134 * Stride size is 16 * (n + 1), as the first segment is 2135 * control. 2136 */ 2137 for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++) 2138 ; 2139 2140 if (n > MLX5E_MAX_BUSDMA_RX_SEGS) 2141 return (-ENOMEM); 2142 2143 *wqe_sz = r; 2144 *nsegs = n; 2145 return (0); 2146} 2147 2148static void 2149mlx5e_build_rq_param(struct mlx5e_priv *priv, 2150 struct mlx5e_rq_param *param) 2151{ 2152 void *rqc = param->rqc; 2153 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 2154 u32 wqe_sz, nsegs; 2155 2156 mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); 2157 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); 2158 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 2159 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + 2160 nsegs * sizeof(struct mlx5_wqe_data_seg))); 2161 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); 2162 MLX5_SET(wq, wq, pd, priv->pdn); 2163 2164 param->wq.buf_numa_node = 0; 2165 param->wq.db_numa_node = 0; 2166 param->wq.linear = 1; 2167} 2168 2169static void 2170mlx5e_build_sq_param(struct mlx5e_priv *priv, 2171 struct mlx5e_sq_param *param) 2172{ 2173 void *sqc = param->sqc; 2174 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2175 2176 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); 2177 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 2178 MLX5_SET(wq, wq, pd, priv->pdn); 2179 2180 param->wq.buf_numa_node = 0; 2181 param->wq.db_numa_node = 0; 2182 param->wq.linear = 1; 2183} 2184 2185static void 2186mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 2187 struct mlx5e_cq_param *param) 2188{ 2189 void *cqc = param->cqc; 2190 2191 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); 2192} 2193 2194static void 2195mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr) 2196{ 2197 2198 *ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE); 2199 2200 /* apply LRO restrictions */ 2201 if (priv->params.hw_lro_en && 2202 ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) { 2203 ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO; 2204 } 2205} 2206 2207static void 2208mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, 2209 struct mlx5e_cq_param *param) 2210{ 2211 struct net_dim_cq_moder curr; 2212 void *cqc = param->cqc; 2213 2214 /* 2215 * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE 2216 * format is more beneficial for FreeBSD use case. 2217 * 2218 * Adding support for MLX5_CQE_FORMAT_CSUM will require changes 2219 * in mlx5e_decompress_cqe. 2220 */ 2221 if (priv->params.cqe_zipping_en) { 2222 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH); 2223 MLX5_SET(cqc, cqc, cqe_compression_en, 1); 2224 } 2225 2226 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); 2227 2228 switch (priv->params.rx_cq_moderation_mode) { 2229 case 0: 2230 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2231 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2232 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2233 break; 2234 case 1: 2235 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 2236 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 2237 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2238 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2239 else 2240 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2241 break; 2242 case 2: 2243 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr); 2244 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2245 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2246 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2247 break; 2248 case 3: 2249 mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr); 2250 MLX5_SET(cqc, cqc, cq_period, curr.usec); 2251 MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); 2252 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2253 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2254 else 2255 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2256 break; 2257 default: 2258 break; 2259 } 2260 2261 mlx5e_dim_build_cq_param(priv, param); 2262 2263 mlx5e_build_common_cq_param(priv, param); 2264} 2265 2266static void 2267mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 2268 struct mlx5e_cq_param *param) 2269{ 2270 void *cqc = param->cqc; 2271 2272 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); 2273 MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); 2274 MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); 2275 2276 switch (priv->params.tx_cq_moderation_mode) { 2277 case 0: 2278 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2279 break; 2280 default: 2281 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2282 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 2283 else 2284 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 2285 break; 2286 } 2287 2288 mlx5e_build_common_cq_param(priv, param); 2289} 2290 2291static void 2292mlx5e_build_channel_param(struct mlx5e_priv *priv, 2293 struct mlx5e_channel_param *cparam) 2294{ 2295 memset(cparam, 0, sizeof(*cparam)); 2296 2297 mlx5e_build_rq_param(priv, &cparam->rq); 2298 mlx5e_build_sq_param(priv, &cparam->sq); 2299 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); 2300 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); 2301} 2302 2303static int 2304mlx5e_open_channels(struct mlx5e_priv *priv) 2305{ 2306 struct mlx5e_channel_param *cparam; 2307 int err; 2308 int i; 2309 int j; 2310 2311 cparam = malloc(sizeof(*cparam), M_MLX5EN, M_WAITOK); 2312 2313 mlx5e_build_channel_param(priv, cparam); 2314 for (i = 0; i < priv->params.num_channels; i++) { 2315 err = mlx5e_open_channel(priv, cparam, &priv->channel[i]); 2316 if (err) 2317 goto err_close_channels; 2318 } 2319 2320 for (j = 0; j < priv->params.num_channels; j++) { 2321 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq); 2322 if (err) 2323 goto err_close_channels; 2324 } 2325 free(cparam, M_MLX5EN); 2326 return (0); 2327 2328err_close_channels: 2329 while (i--) { 2330 mlx5e_close_channel(&priv->channel[i]); 2331 mlx5e_close_channel_wait(&priv->channel[i]); 2332 } 2333 free(cparam, M_MLX5EN); 2334 return (err); 2335} 2336 2337static void 2338mlx5e_close_channels(struct mlx5e_priv *priv) 2339{ 2340 int i; 2341 2342 for (i = 0; i < priv->params.num_channels; i++) 2343 mlx5e_close_channel(&priv->channel[i]); 2344 for (i = 0; i < priv->params.num_channels; i++) 2345 mlx5e_close_channel_wait(&priv->channel[i]); 2346} 2347 2348static int 2349mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) 2350{ 2351 2352 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2353 uint8_t cq_mode; 2354 2355 switch (priv->params.tx_cq_moderation_mode) { 2356 case 0: 2357 case 2: 2358 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2359 break; 2360 default: 2361 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2362 break; 2363 } 2364 2365 return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, 2366 priv->params.tx_cq_moderation_usec, 2367 priv->params.tx_cq_moderation_pkts, 2368 cq_mode)); 2369 } 2370 2371 return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, 2372 priv->params.tx_cq_moderation_usec, 2373 priv->params.tx_cq_moderation_pkts)); 2374} 2375 2376static int 2377mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) 2378{ 2379 2380 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 2381 uint8_t cq_mode; 2382 uint8_t dim_mode; 2383 int retval; 2384 2385 switch (priv->params.rx_cq_moderation_mode) { 2386 case 0: 2387 case 2: 2388 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 2389 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 2390 break; 2391 default: 2392 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 2393 dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; 2394 break; 2395 } 2396 2397 /* tear down dynamic interrupt moderation */ 2398 mtx_lock(&rq->mtx); 2399 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; 2400 mtx_unlock(&rq->mtx); 2401 2402 /* wait for dynamic interrupt moderation work task, if any */ 2403 cancel_work_sync(&rq->dim.work); 2404 2405 if (priv->params.rx_cq_moderation_mode >= 2) { 2406 struct net_dim_cq_moder curr; 2407 2408 mlx5e_get_default_profile(priv, dim_mode, &curr); 2409 2410 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2411 curr.usec, curr.pkts, cq_mode); 2412 2413 /* set dynamic interrupt moderation mode and zero defaults */ 2414 mtx_lock(&rq->mtx); 2415 rq->dim.mode = dim_mode; 2416 rq->dim.state = 0; 2417 rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE; 2418 mtx_unlock(&rq->mtx); 2419 } else { 2420 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 2421 priv->params.rx_cq_moderation_usec, 2422 priv->params.rx_cq_moderation_pkts, 2423 cq_mode); 2424 } 2425 return (retval); 2426 } 2427 2428 return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, 2429 priv->params.rx_cq_moderation_usec, 2430 priv->params.rx_cq_moderation_pkts)); 2431} 2432 2433static int 2434mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 2435{ 2436 int err; 2437 int i; 2438 2439 err = mlx5e_refresh_rq_params(priv, &c->rq); 2440 if (err) 2441 goto done; 2442 2443 for (i = 0; i != priv->num_tc; i++) { 2444 err = mlx5e_refresh_sq_params(priv, &c->sq[i]); 2445 if (err) 2446 goto done; 2447 } 2448done: 2449 return (err); 2450} 2451 2452int 2453mlx5e_refresh_channel_params(struct mlx5e_priv *priv) 2454{ 2455 int i; 2456 2457 /* check if channels are closed */ 2458 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2459 return (EINVAL); 2460 2461 for (i = 0; i < priv->params.num_channels; i++) { 2462 int err; 2463 2464 err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]); 2465 if (err) 2466 return (err); 2467 } 2468 return (0); 2469} 2470 2471static int 2472mlx5e_open_tis(struct mlx5e_priv *priv, int tc) 2473{ 2474 struct mlx5_core_dev *mdev = priv->mdev; 2475 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 2476 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 2477 2478 memset(in, 0, sizeof(in)); 2479 2480 MLX5_SET(tisc, tisc, prio, tc); 2481 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 2482 2483 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); 2484} 2485 2486static void 2487mlx5e_close_tis(struct mlx5e_priv *priv, int tc) 2488{ 2489 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); 2490} 2491 2492static int 2493mlx5e_open_tises(struct mlx5e_priv *priv) 2494{ 2495 int num_tc = priv->num_tc; 2496 int err; 2497 int tc; 2498 2499 for (tc = 0; tc < num_tc; tc++) { 2500 err = mlx5e_open_tis(priv, tc); 2501 if (err) 2502 goto err_close_tises; 2503 } 2504 2505 return (0); 2506 2507err_close_tises: 2508 for (tc--; tc >= 0; tc--) 2509 mlx5e_close_tis(priv, tc); 2510 2511 return (err); 2512} 2513 2514static void 2515mlx5e_close_tises(struct mlx5e_priv *priv) 2516{ 2517 int num_tc = priv->num_tc; 2518 int tc; 2519 2520 for (tc = 0; tc < num_tc; tc++) 2521 mlx5e_close_tis(priv, tc); 2522} 2523 2524static int 2525mlx5e_open_rqt(struct mlx5e_priv *priv) 2526{ 2527 struct mlx5_core_dev *mdev = priv->mdev; 2528 u32 *in; 2529 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 2530 void *rqtc; 2531 int inlen; 2532 int err; 2533 int sz; 2534 int i; 2535 2536 sz = 1 << priv->params.rx_hash_log_tbl_sz; 2537 2538 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 2539 in = mlx5_vzalloc(inlen); 2540 if (in == NULL) 2541 return (-ENOMEM); 2542 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 2543 2544 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 2545 MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 2546 2547 for (i = 0; i < sz; i++) { 2548 int ix = i; 2549#ifdef RSS 2550 ix = rss_get_indirection_to_bucket(ix); 2551#endif 2552 /* ensure we don't overflow */ 2553 ix %= priv->params.num_channels; 2554 2555 /* apply receive side scaling stride, if any */ 2556 ix -= ix % (int)priv->params.channels_rsss; 2557 2558 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn); 2559 } 2560 2561 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 2562 2563 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); 2564 if (!err) 2565 priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); 2566 2567 kvfree(in); 2568 2569 return (err); 2570} 2571 2572static void 2573mlx5e_close_rqt(struct mlx5e_priv *priv) 2574{ 2575 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; 2576 u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; 2577 2578 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); 2579 MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); 2580 2581 mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); 2582} 2583 2584static void 2585mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt) 2586{ 2587 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 2588 __be32 *hkey; 2589 2590 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 2591 2592#define ROUGH_MAX_L2_L3_HDR_SZ 256 2593 2594#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2595 MLX5_HASH_FIELD_SEL_DST_IP) 2596 2597#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2598 MLX5_HASH_FIELD_SEL_DST_IP |\ 2599 MLX5_HASH_FIELD_SEL_L4_SPORT |\ 2600 MLX5_HASH_FIELD_SEL_L4_DPORT) 2601 2602#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2603 MLX5_HASH_FIELD_SEL_DST_IP |\ 2604 MLX5_HASH_FIELD_SEL_IPSEC_SPI) 2605 2606 if (priv->params.hw_lro_en) { 2607 MLX5_SET(tirc, tirc, lro_enable_mask, 2608 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 2609 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); 2610 MLX5_SET(tirc, tirc, lro_max_msg_sz, 2611 (priv->params.lro_wqe_sz - 2612 ROUGH_MAX_L2_L3_HDR_SZ) >> 8); 2613 /* TODO: add the option to choose timer value dynamically */ 2614 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, 2615 MLX5_CAP_ETH(priv->mdev, 2616 lro_timer_supported_periods[2])); 2617 } 2618 2619 /* setup parameters for hashing TIR type, if any */ 2620 switch (tt) { 2621 case MLX5E_TT_ANY: 2622 MLX5_SET(tirc, tirc, disp_type, 2623 MLX5_TIRC_DISP_TYPE_DIRECT); 2624 MLX5_SET(tirc, tirc, inline_rqn, 2625 priv->channel[0].rq.rqn); 2626 break; 2627 default: 2628 MLX5_SET(tirc, tirc, disp_type, 2629 MLX5_TIRC_DISP_TYPE_INDIRECT); 2630 MLX5_SET(tirc, tirc, indirect_table, 2631 priv->rqtn); 2632 MLX5_SET(tirc, tirc, rx_hash_fn, 2633 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); 2634 hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 2635#ifdef RSS 2636 /* 2637 * The FreeBSD RSS implementation does currently not 2638 * support symmetric Toeplitz hashes: 2639 */ 2640 MLX5_SET(tirc, tirc, rx_hash_symmetric, 0); 2641 rss_getkey((uint8_t *)hkey); 2642#else 2643 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2644 hkey[0] = cpu_to_be32(0xD181C62C); 2645 hkey[1] = cpu_to_be32(0xF7F4DB5B); 2646 hkey[2] = cpu_to_be32(0x1983A2FC); 2647 hkey[3] = cpu_to_be32(0x943E1ADB); 2648 hkey[4] = cpu_to_be32(0xD9389E6B); 2649 hkey[5] = cpu_to_be32(0xD1039C2C); 2650 hkey[6] = cpu_to_be32(0xA74499AD); 2651 hkey[7] = cpu_to_be32(0x593D56D9); 2652 hkey[8] = cpu_to_be32(0xF3253C06); 2653 hkey[9] = cpu_to_be32(0x2ADC1FFC); 2654#endif 2655 break; 2656 } 2657 2658 switch (tt) { 2659 case MLX5E_TT_IPV4_TCP: 2660 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2661 MLX5_L3_PROT_TYPE_IPV4); 2662 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2663 MLX5_L4_PROT_TYPE_TCP); 2664#ifdef RSS 2665 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) { 2666 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2667 MLX5_HASH_IP); 2668 } else 2669#endif 2670 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2671 MLX5_HASH_ALL); 2672 break; 2673 2674 case MLX5E_TT_IPV6_TCP: 2675 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2676 MLX5_L3_PROT_TYPE_IPV6); 2677 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2678 MLX5_L4_PROT_TYPE_TCP); 2679#ifdef RSS 2680 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) { 2681 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2682 MLX5_HASH_IP); 2683 } else 2684#endif 2685 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2686 MLX5_HASH_ALL); 2687 break; 2688 2689 case MLX5E_TT_IPV4_UDP: 2690 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2691 MLX5_L3_PROT_TYPE_IPV4); 2692 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2693 MLX5_L4_PROT_TYPE_UDP); 2694#ifdef RSS 2695 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) { 2696 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2697 MLX5_HASH_IP); 2698 } else 2699#endif 2700 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2701 MLX5_HASH_ALL); 2702 break; 2703 2704 case MLX5E_TT_IPV6_UDP: 2705 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2706 MLX5_L3_PROT_TYPE_IPV6); 2707 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2708 MLX5_L4_PROT_TYPE_UDP); 2709#ifdef RSS 2710 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) { 2711 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2712 MLX5_HASH_IP); 2713 } else 2714#endif 2715 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2716 MLX5_HASH_ALL); 2717 break; 2718 2719 case MLX5E_TT_IPV4_IPSEC_AH: 2720 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2721 MLX5_L3_PROT_TYPE_IPV4); 2722 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2723 MLX5_HASH_IP_IPSEC_SPI); 2724 break; 2725 2726 case MLX5E_TT_IPV6_IPSEC_AH: 2727 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2728 MLX5_L3_PROT_TYPE_IPV6); 2729 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2730 MLX5_HASH_IP_IPSEC_SPI); 2731 break; 2732 2733 case MLX5E_TT_IPV4_IPSEC_ESP: 2734 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2735 MLX5_L3_PROT_TYPE_IPV4); 2736 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2737 MLX5_HASH_IP_IPSEC_SPI); 2738 break; 2739 2740 case MLX5E_TT_IPV6_IPSEC_ESP: 2741 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2742 MLX5_L3_PROT_TYPE_IPV6); 2743 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2744 MLX5_HASH_IP_IPSEC_SPI); 2745 break; 2746 2747 case MLX5E_TT_IPV4: 2748 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2749 MLX5_L3_PROT_TYPE_IPV4); 2750 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2751 MLX5_HASH_IP); 2752 break; 2753 2754 case MLX5E_TT_IPV6: 2755 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2756 MLX5_L3_PROT_TYPE_IPV6); 2757 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2758 MLX5_HASH_IP); 2759 break; 2760 2761 default: 2762 break; 2763 } 2764} 2765 2766static int 2767mlx5e_open_tir(struct mlx5e_priv *priv, int tt) 2768{ 2769 struct mlx5_core_dev *mdev = priv->mdev; 2770 u32 *in; 2771 void *tirc; 2772 int inlen; 2773 int err; 2774 2775 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 2776 in = mlx5_vzalloc(inlen); 2777 if (in == NULL) 2778 return (-ENOMEM); 2779 tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context); 2780 2781 mlx5e_build_tir_ctx(priv, tirc, tt); 2782 2783 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); 2784 2785 kvfree(in); 2786 2787 return (err); 2788} 2789 2790static void 2791mlx5e_close_tir(struct mlx5e_priv *priv, int tt) 2792{ 2793 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); 2794} 2795 2796static int 2797mlx5e_open_tirs(struct mlx5e_priv *priv) 2798{ 2799 int err; 2800 int i; 2801 2802 for (i = 0; i < MLX5E_NUM_TT; i++) { 2803 err = mlx5e_open_tir(priv, i); 2804 if (err) 2805 goto err_close_tirs; 2806 } 2807 2808 return (0); 2809 2810err_close_tirs: 2811 for (i--; i >= 0; i--) 2812 mlx5e_close_tir(priv, i); 2813 2814 return (err); 2815} 2816 2817static void 2818mlx5e_close_tirs(struct mlx5e_priv *priv) 2819{ 2820 int i; 2821 2822 for (i = 0; i < MLX5E_NUM_TT; i++) 2823 mlx5e_close_tir(priv, i); 2824} 2825 2826/* 2827 * SW MTU does not include headers, 2828 * HW MTU includes all headers and checksums. 2829 */ 2830static int 2831mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu) 2832{ 2833 struct mlx5e_priv *priv = ifp->if_softc; 2834 struct mlx5_core_dev *mdev = priv->mdev; 2835 int hw_mtu; 2836 int err; 2837 2838 hw_mtu = MLX5E_SW2HW_MTU(sw_mtu); 2839 2840 err = mlx5_set_port_mtu(mdev, hw_mtu); 2841 if (err) { 2842 mlx5_en_err(ifp, "mlx5_set_port_mtu failed setting %d, err=%d\n", 2843 sw_mtu, err); 2844 return (err); 2845 } 2846 2847 /* Update vport context MTU */ 2848 err = mlx5_set_vport_mtu(mdev, hw_mtu); 2849 if (err) { 2850 mlx5_en_err(ifp, 2851 "Failed updating vport context with MTU size, err=%d\n", 2852 err); 2853 } 2854 2855 ifp->if_mtu = sw_mtu; 2856 2857 err = mlx5_query_vport_mtu(mdev, &hw_mtu); 2858 if (err || !hw_mtu) { 2859 /* fallback to port oper mtu */ 2860 err = mlx5_query_port_oper_mtu(mdev, &hw_mtu); 2861 } 2862 if (err) { 2863 mlx5_en_err(ifp, 2864 "Query port MTU, after setting new MTU value, failed\n"); 2865 return (err); 2866 } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) { 2867 err = -E2BIG, 2868 mlx5_en_err(ifp, 2869 "Port MTU %d is smaller than ifp mtu %d\n", 2870 hw_mtu, sw_mtu); 2871 } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) { 2872 err = -EINVAL; 2873 mlx5_en_err(ifp, 2874 "Port MTU %d is bigger than ifp mtu %d\n", 2875 hw_mtu, sw_mtu); 2876 } 2877 priv->params_ethtool.hw_mtu = hw_mtu; 2878 2879 return (err); 2880} 2881 2882int 2883mlx5e_open_locked(struct ifnet *ifp) 2884{ 2885 struct mlx5e_priv *priv = ifp->if_softc; 2886 int err; 2887 u16 set_id; 2888 2889 /* check if already opened */ 2890 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 2891 return (0); 2892 2893#ifdef RSS 2894 if (rss_getnumbuckets() > priv->params.num_channels) { 2895 mlx5_en_info(ifp, 2896 "NOTE: There are more RSS buckets(%u) than channels(%u) available\n", 2897 rss_getnumbuckets(), priv->params.num_channels); 2898 } 2899#endif 2900 err = mlx5e_open_tises(priv); 2901 if (err) { 2902 mlx5_en_err(ifp, "mlx5e_open_tises failed, %d\n", err); 2903 return (err); 2904 } 2905 err = mlx5_vport_alloc_q_counter(priv->mdev, 2906 MLX5_INTERFACE_PROTOCOL_ETH, &set_id); 2907 if (err) { 2908 mlx5_en_err(priv->ifp, 2909 "mlx5_vport_alloc_q_counter failed: %d\n", err); 2910 goto err_close_tises; 2911 } 2912 /* store counter set ID */ 2913 priv->counter_set_id = set_id; 2914 2915 err = mlx5e_open_channels(priv); 2916 if (err) { 2917 mlx5_en_err(ifp, 2918 "mlx5e_open_channels failed, %d\n", err); 2919 goto err_dalloc_q_counter; 2920 } 2921 err = mlx5e_open_rqt(priv); 2922 if (err) { 2923 mlx5_en_err(ifp, "mlx5e_open_rqt failed, %d\n", err); 2924 goto err_close_channels; 2925 } 2926 err = mlx5e_open_tirs(priv); 2927 if (err) { 2928 mlx5_en_err(ifp, "mlx5e_open_tir failed, %d\n", err); 2929 goto err_close_rqls; 2930 } 2931 err = mlx5e_open_flow_table(priv); 2932 if (err) { 2933 mlx5_en_err(ifp, 2934 "mlx5e_open_flow_table failed, %d\n", err); 2935 goto err_close_tirs; 2936 } 2937 err = mlx5e_add_all_vlan_rules(priv); 2938 if (err) { 2939 mlx5_en_err(ifp, 2940 "mlx5e_add_all_vlan_rules failed, %d\n", err); 2941 goto err_close_flow_table; 2942 } 2943 set_bit(MLX5E_STATE_OPENED, &priv->state); 2944 2945 mlx5e_update_carrier(priv); 2946 mlx5e_set_rx_mode_core(priv); 2947 2948 return (0); 2949 2950err_close_flow_table: 2951 mlx5e_close_flow_table(priv); 2952 2953err_close_tirs: 2954 mlx5e_close_tirs(priv); 2955 2956err_close_rqls: 2957 mlx5e_close_rqt(priv); 2958 2959err_close_channels: 2960 mlx5e_close_channels(priv); 2961 2962err_dalloc_q_counter: 2963 mlx5_vport_dealloc_q_counter(priv->mdev, 2964 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 2965 2966err_close_tises: 2967 mlx5e_close_tises(priv); 2968 2969 return (err); 2970} 2971 2972static void 2973mlx5e_open(void *arg) 2974{ 2975 struct mlx5e_priv *priv = arg; 2976 2977 PRIV_LOCK(priv); 2978 if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) 2979 mlx5_en_err(priv->ifp, 2980 "Setting port status to up failed\n"); 2981 2982 mlx5e_open_locked(priv->ifp); 2983 priv->ifp->if_drv_flags |= IFF_DRV_RUNNING; 2984 PRIV_UNLOCK(priv); 2985} 2986 2987int 2988mlx5e_close_locked(struct ifnet *ifp) 2989{ 2990 struct mlx5e_priv *priv = ifp->if_softc; 2991 2992 /* check if already closed */ 2993 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2994 return (0); 2995 2996 clear_bit(MLX5E_STATE_OPENED, &priv->state); 2997 2998 mlx5e_set_rx_mode_core(priv); 2999 mlx5e_del_all_vlan_rules(priv); 3000 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 3001 mlx5e_close_flow_table(priv); 3002 mlx5e_close_tirs(priv); 3003 mlx5e_close_rqt(priv); 3004 mlx5e_close_channels(priv); 3005 mlx5_vport_dealloc_q_counter(priv->mdev, 3006 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 3007 mlx5e_close_tises(priv); 3008 3009 return (0); 3010} 3011 3012#if (__FreeBSD_version >= 1100000) 3013static uint64_t 3014mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt) 3015{ 3016 struct mlx5e_priv *priv = ifp->if_softc; 3017 u64 retval; 3018 3019 /* PRIV_LOCK(priv); XXX not allowed */ 3020 switch (cnt) { 3021 case IFCOUNTER_IPACKETS: 3022 retval = priv->stats.vport.rx_packets; 3023 break; 3024 case IFCOUNTER_IERRORS: 3025 retval = priv->stats.pport.in_range_len_errors + 3026 priv->stats.pport.out_of_range_len + 3027 priv->stats.pport.too_long_errors + 3028 priv->stats.pport.check_seq_err + 3029 priv->stats.pport.alignment_err; 3030 break; 3031 case IFCOUNTER_IQDROPS: 3032 retval = priv->stats.vport.rx_out_of_buffer; 3033 break; 3034 case IFCOUNTER_OPACKETS: 3035 retval = priv->stats.vport.tx_packets; 3036 break; 3037 case IFCOUNTER_OERRORS: 3038 retval = priv->stats.port_stats_debug.out_discards; 3039 break; 3040 case IFCOUNTER_IBYTES: 3041 retval = priv->stats.vport.rx_bytes; 3042 break; 3043 case IFCOUNTER_OBYTES: 3044 retval = priv->stats.vport.tx_bytes; 3045 break; 3046 case IFCOUNTER_IMCASTS: 3047 retval = priv->stats.vport.rx_multicast_packets; 3048 break; 3049 case IFCOUNTER_OMCASTS: 3050 retval = priv->stats.vport.tx_multicast_packets; 3051 break; 3052 case IFCOUNTER_OQDROPS: 3053 retval = priv->stats.vport.tx_queue_dropped; 3054 break; 3055 case IFCOUNTER_COLLISIONS: 3056 retval = priv->stats.pport.collisions; 3057 break; 3058 default: 3059 retval = if_get_counter_default(ifp, cnt); 3060 break; 3061 } 3062 /* PRIV_UNLOCK(priv); XXX not allowed */ 3063 return (retval); 3064} 3065#endif 3066 3067static void 3068mlx5e_set_rx_mode(struct ifnet *ifp) 3069{ 3070 struct mlx5e_priv *priv = ifp->if_softc; 3071 3072 queue_work(priv->wq, &priv->set_rx_mode_work); 3073} 3074 3075static int 3076mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3077{ 3078 struct mlx5e_priv *priv; 3079 struct ifreq *ifr; 3080 struct ifi2creq i2c; 3081 int error = 0; 3082 int mask = 0; 3083 int size_read = 0; 3084 int module_status; 3085 int module_num; 3086 int max_mtu; 3087 uint8_t read_addr; 3088 3089 priv = ifp->if_softc; 3090 3091 /* check if detaching */ 3092 if (priv == NULL || priv->gone != 0) 3093 return (ENXIO); 3094 3095 switch (command) { 3096 case SIOCSIFMTU: 3097 ifr = (struct ifreq *)data; 3098 3099 PRIV_LOCK(priv); 3100 mlx5_query_port_max_mtu(priv->mdev, &max_mtu); 3101 3102 if (ifr->ifr_mtu >= MLX5E_MTU_MIN && 3103 ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) { 3104 int was_opened; 3105 3106 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3107 if (was_opened) 3108 mlx5e_close_locked(ifp); 3109 3110 /* set new MTU */ 3111 mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu); 3112 3113 if (was_opened) 3114 mlx5e_open_locked(ifp); 3115 } else { 3116 error = EINVAL; 3117 mlx5_en_err(ifp, 3118 "Invalid MTU value. Min val: %d, Max val: %d\n", 3119 MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu)); 3120 } 3121 PRIV_UNLOCK(priv); 3122 break; 3123 case SIOCSIFFLAGS: 3124 if ((ifp->if_flags & IFF_UP) && 3125 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3126 mlx5e_set_rx_mode(ifp); 3127 break; 3128 } 3129 PRIV_LOCK(priv); 3130 if (ifp->if_flags & IFF_UP) { 3131 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3132 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3133 mlx5e_open_locked(ifp); 3134 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3135 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); 3136 } 3137 } else { 3138 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3139 mlx5_set_port_status(priv->mdev, 3140 MLX5_PORT_DOWN); 3141 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 3142 mlx5e_close_locked(ifp); 3143 mlx5e_update_carrier(priv); 3144 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3145 } 3146 } 3147 PRIV_UNLOCK(priv); 3148 break; 3149 case SIOCADDMULTI: 3150 case SIOCDELMULTI: 3151 mlx5e_set_rx_mode(ifp); 3152 break; 3153 case SIOCSIFMEDIA: 3154 case SIOCGIFMEDIA: 3155 case SIOCGIFXMEDIA: 3156 ifr = (struct ifreq *)data; 3157 error = ifmedia_ioctl(ifp, ifr, &priv->media, command); 3158 break; 3159 case SIOCSIFCAP: 3160 ifr = (struct ifreq *)data; 3161 PRIV_LOCK(priv); 3162 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3163 3164 if (mask & IFCAP_TXCSUM) { 3165 ifp->if_capenable ^= IFCAP_TXCSUM; 3166 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3167 3168 if (IFCAP_TSO4 & ifp->if_capenable && 3169 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3170 ifp->if_capenable &= ~IFCAP_TSO4; 3171 ifp->if_hwassist &= ~CSUM_IP_TSO; 3172 mlx5_en_err(ifp, 3173 "tso4 disabled due to -txcsum.\n"); 3174 } 3175 } 3176 if (mask & IFCAP_TXCSUM_IPV6) { 3177 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 3178 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3179 3180 if (IFCAP_TSO6 & ifp->if_capenable && 3181 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3182 ifp->if_capenable &= ~IFCAP_TSO6; 3183 ifp->if_hwassist &= ~CSUM_IP6_TSO; 3184 mlx5_en_err(ifp, 3185 "tso6 disabled due to -txcsum6.\n"); 3186 } 3187 } 3188 if (mask & IFCAP_RXCSUM) 3189 ifp->if_capenable ^= IFCAP_RXCSUM; 3190 if (mask & IFCAP_RXCSUM_IPV6) 3191 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 3192 if (mask & IFCAP_TSO4) { 3193 if (!(IFCAP_TSO4 & ifp->if_capenable) && 3194 !(IFCAP_TXCSUM & ifp->if_capenable)) { 3195 mlx5_en_err(ifp, "enable txcsum first.\n"); 3196 error = EAGAIN; 3197 goto out; 3198 } 3199 ifp->if_capenable ^= IFCAP_TSO4; 3200 ifp->if_hwassist ^= CSUM_IP_TSO; 3201 } 3202 if (mask & IFCAP_TSO6) { 3203 if (!(IFCAP_TSO6 & ifp->if_capenable) && 3204 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 3205 mlx5_en_err(ifp, "enable txcsum6 first.\n"); 3206 error = EAGAIN; 3207 goto out; 3208 } 3209 ifp->if_capenable ^= IFCAP_TSO6; 3210 ifp->if_hwassist ^= CSUM_IP6_TSO; 3211 } 3212 if (mask & IFCAP_VLAN_HWFILTER) { 3213 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3214 mlx5e_disable_vlan_filter(priv); 3215 else 3216 mlx5e_enable_vlan_filter(priv); 3217 3218 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 3219 } 3220 if (mask & IFCAP_VLAN_HWTAGGING) 3221 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3222 if (mask & IFCAP_WOL_MAGIC) 3223 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3224 3225 VLAN_CAPABILITIES(ifp); 3226 /* turn off LRO means also turn of HW LRO - if it's on */ 3227 if (mask & IFCAP_LRO) { 3228 int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 3229 bool need_restart = false; 3230 3231 ifp->if_capenable ^= IFCAP_LRO; 3232 3233 /* figure out if updating HW LRO is needed */ 3234 if (!(ifp->if_capenable & IFCAP_LRO)) { 3235 if (priv->params.hw_lro_en) { 3236 priv->params.hw_lro_en = false; 3237 need_restart = true; 3238 } 3239 } else { 3240 if (priv->params.hw_lro_en == false && 3241 priv->params_ethtool.hw_lro != 0) { 3242 priv->params.hw_lro_en = true; 3243 need_restart = true; 3244 } 3245 } 3246 if (was_opened && need_restart) { 3247 mlx5e_close_locked(ifp); 3248 mlx5e_open_locked(ifp); 3249 } 3250 } 3251out: 3252 PRIV_UNLOCK(priv); 3253 break; 3254 3255 case SIOCGI2C: 3256 ifr = (struct ifreq *)data; 3257 3258 /* 3259 * Copy from the user-space address ifr_data to the 3260 * kernel-space address i2c 3261 */ 3262 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 3263 if (error) 3264 break; 3265 3266 if (i2c.len > sizeof(i2c.data)) { 3267 error = EINVAL; 3268 break; 3269 } 3270 3271 PRIV_LOCK(priv); 3272 /* Get module_num which is required for the query_eeprom */ 3273 error = mlx5_query_module_num(priv->mdev, &module_num); 3274 if (error) { 3275 mlx5_en_err(ifp, 3276 "Query module num failed, eeprom reading is not supported\n"); 3277 error = EINVAL; 3278 goto err_i2c; 3279 } 3280 /* Check if module is present before doing an access */ 3281 module_status = mlx5_query_module_status(priv->mdev, module_num); 3282 if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED) { 3283 error = EINVAL; 3284 goto err_i2c; 3285 } 3286 /* 3287 * Currently 0XA0 and 0xA2 are the only addresses permitted. 3288 * The internal conversion is as follows: 3289 */ 3290 if (i2c.dev_addr == 0xA0) 3291 read_addr = MLX5_I2C_ADDR_LOW; 3292 else if (i2c.dev_addr == 0xA2) 3293 read_addr = MLX5_I2C_ADDR_HIGH; 3294 else { 3295 mlx5_en_err(ifp, 3296 "Query eeprom failed, Invalid Address: %X\n", 3297 i2c.dev_addr); 3298 error = EINVAL; 3299 goto err_i2c; 3300 } 3301 error = mlx5_query_eeprom(priv->mdev, 3302 read_addr, MLX5_EEPROM_LOW_PAGE, 3303 (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num, 3304 (uint32_t *)i2c.data, &size_read); 3305 if (error) { 3306 mlx5_en_err(ifp, 3307 "Query eeprom failed, eeprom reading is not supported\n"); 3308 error = EINVAL; 3309 goto err_i2c; 3310 } 3311 3312 if (i2c.len > MLX5_EEPROM_MAX_BYTES) { 3313 error = mlx5_query_eeprom(priv->mdev, 3314 read_addr, MLX5_EEPROM_LOW_PAGE, 3315 (uint32_t)(i2c.offset + size_read), 3316 (uint32_t)(i2c.len - size_read), module_num, 3317 (uint32_t *)(i2c.data + size_read), &size_read); 3318 } 3319 if (error) { 3320 mlx5_en_err(ifp, 3321 "Query eeprom failed, eeprom reading is not supported\n"); 3322 error = EINVAL; 3323 goto err_i2c; 3324 } 3325 3326 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 3327err_i2c: 3328 PRIV_UNLOCK(priv); 3329 break; 3330 3331 default: 3332 error = ether_ioctl(ifp, command, data); 3333 break; 3334 } 3335 return (error); 3336} 3337 3338static int 3339mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3340{ 3341 /* 3342 * TODO: uncoment once FW really sets all these bits if 3343 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap || 3344 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap || 3345 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return 3346 * -ENOTSUPP; 3347 */ 3348 3349 /* TODO: add more must-to-have features */ 3350 3351 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3352 return (-ENODEV); 3353 3354 return (0); 3355} 3356 3357static u16 3358mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 3359{ 3360 const int min_size = ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN; 3361 const int max_size = MLX5E_MAX_TX_INLINE; 3362 const int bf_buf_size = 3363 ((1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U) - 3364 (sizeof(struct mlx5e_tx_wqe) - 2); 3365 3366 /* verify against driver limits */ 3367 if (bf_buf_size > max_size) 3368 return (max_size); 3369 else if (bf_buf_size < min_size) 3370 return (min_size); 3371 else 3372 return (bf_buf_size); 3373} 3374 3375static int 3376mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev, 3377 struct mlx5e_priv *priv, 3378 int num_comp_vectors) 3379{ 3380 int err; 3381 3382 /* 3383 * TODO: Consider link speed for setting "log_sq_size", 3384 * "log_rq_size" and "cq_moderation_xxx": 3385 */ 3386 priv->params.log_sq_size = 3387 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 3388 priv->params.log_rq_size = 3389 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 3390 priv->params.rx_cq_moderation_usec = 3391 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 3392 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : 3393 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 3394 priv->params.rx_cq_moderation_mode = 3395 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0; 3396 priv->params.rx_cq_moderation_pkts = 3397 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 3398 priv->params.tx_cq_moderation_usec = 3399 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 3400 priv->params.tx_cq_moderation_pkts = 3401 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 3402 priv->params.min_rx_wqes = 3403 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; 3404 priv->params.rx_hash_log_tbl_sz = 3405 (order_base_2(num_comp_vectors) > 3406 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? 3407 order_base_2(num_comp_vectors) : 3408 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; 3409 priv->params.num_tc = 1; 3410 priv->params.default_vlan_prio = 0; 3411 priv->counter_set_id = -1; 3412 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); 3413 3414 err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); 3415 if (err) 3416 return (err); 3417 3418 /* 3419 * hw lro is currently defaulted to off. when it won't anymore we 3420 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)" 3421 */ 3422 priv->params.hw_lro_en = false; 3423 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 3424 3425 /* 3426 * CQE zipping is currently defaulted to off. when it won't 3427 * anymore we will consider the HW capability: 3428 * "!!MLX5_CAP_GEN(mdev, cqe_compression)" 3429 */ 3430 priv->params.cqe_zipping_en = false; 3431 3432 priv->mdev = mdev; 3433 priv->params.num_channels = num_comp_vectors; 3434 priv->params.channels_rsss = 1; 3435 priv->order_base_2_num_channels = order_base_2(num_comp_vectors); 3436 priv->queue_mapping_channel_mask = 3437 roundup_pow_of_two(num_comp_vectors) - 1; 3438 priv->num_tc = priv->params.num_tc; 3439 priv->default_vlan_prio = priv->params.default_vlan_prio; 3440 3441 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 3442 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 3443 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 3444 3445 return (0); 3446} 3447 3448static int 3449mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, 3450 struct mlx5_core_mr *mkey) 3451{ 3452 struct ifnet *ifp = priv->ifp; 3453 struct mlx5_core_dev *mdev = priv->mdev; 3454 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 3455 void *mkc; 3456 u32 *in; 3457 int err; 3458 3459 in = mlx5_vzalloc(inlen); 3460 if (in == NULL) { 3461 mlx5_en_err(ifp, "failed to allocate inbox\n"); 3462 return (-ENOMEM); 3463 } 3464 3465 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 3466 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA); 3467 MLX5_SET(mkc, mkc, lw, 1); 3468 MLX5_SET(mkc, mkc, lr, 1); 3469 3470 MLX5_SET(mkc, mkc, pd, pdn); 3471 MLX5_SET(mkc, mkc, length64, 1); 3472 MLX5_SET(mkc, mkc, qpn, 0xffffff); 3473 3474 err = mlx5_core_create_mkey(mdev, mkey, in, inlen); 3475 if (err) 3476 mlx5_en_err(ifp, "mlx5_core_create_mkey failed, %d\n", 3477 err); 3478 3479 kvfree(in); 3480 return (err); 3481} 3482 3483static const char *mlx5e_vport_stats_desc[] = { 3484 MLX5E_VPORT_STATS(MLX5E_STATS_DESC) 3485}; 3486 3487static const char *mlx5e_pport_stats_desc[] = { 3488 MLX5E_PPORT_STATS(MLX5E_STATS_DESC) 3489}; 3490 3491static void 3492mlx5e_priv_static_init(struct mlx5e_priv *priv, const uint32_t channels) 3493{ 3494 uint32_t x; 3495 3496 mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); 3497 sx_init(&priv->state_lock, "mlx5state"); 3498 callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); 3499 MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); 3500 for (x = 0; x != channels; x++) 3501 mlx5e_chan_static_init(priv, &priv->channel[x], x); 3502} 3503 3504static void 3505mlx5e_priv_static_destroy(struct mlx5e_priv *priv, const uint32_t channels) 3506{ 3507 uint32_t x; 3508 3509 for (x = 0; x != channels; x++) 3510 mlx5e_chan_static_destroy(&priv->channel[x]); 3511 callout_drain(&priv->watchdog); 3512 mtx_destroy(&priv->async_events_mtx); 3513 sx_destroy(&priv->state_lock); 3514} 3515 3516static int 3517sysctl_firmware(SYSCTL_HANDLER_ARGS) 3518{ 3519 /* 3520 * %d.%d%.d the string format. 3521 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536. 3522 * We need at most 5 chars to store that. 3523 * It also has: two "." and NULL at the end, which means we need 18 3524 * (5*3 + 3) chars at most. 3525 */ 3526 char fw[18]; 3527 struct mlx5e_priv *priv = arg1; 3528 int error; 3529 3530 snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), 3531 fw_rev_sub(priv->mdev)); 3532 error = sysctl_handle_string(oidp, fw, sizeof(fw), req); 3533 return (error); 3534} 3535 3536static void 3537mlx5e_disable_tx_dma(struct mlx5e_channel *ch) 3538{ 3539 int i; 3540 3541 for (i = 0; i < ch->priv->num_tc; i++) 3542 mlx5e_drain_sq(&ch->sq[i]); 3543} 3544 3545static void 3546mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq) 3547{ 3548 3549 sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP); 3550 sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8); 3551 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 3552 sq->doorbell.d64 = 0; 3553} 3554 3555void 3556mlx5e_resume_sq(struct mlx5e_sq *sq) 3557{ 3558 int err; 3559 3560 /* check if already enabled */ 3561 if (READ_ONCE(sq->running) != 0) 3562 return; 3563 3564 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR, 3565 MLX5_SQC_STATE_RST); 3566 if (err != 0) { 3567 mlx5_en_err(sq->ifp, 3568 "mlx5e_modify_sq() from ERR to RST failed: %d\n", err); 3569 } 3570 3571 sq->cc = 0; 3572 sq->pc = 0; 3573 3574 /* reset doorbell prior to moving from RST to RDY */ 3575 mlx5e_reset_sq_doorbell_record(sq); 3576 3577 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, 3578 MLX5_SQC_STATE_RDY); 3579 if (err != 0) { 3580 mlx5_en_err(sq->ifp, 3581 "mlx5e_modify_sq() from RST to RDY failed: %d\n", err); 3582 } 3583 3584 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 3585 WRITE_ONCE(sq->running, 1); 3586} 3587 3588static void 3589mlx5e_enable_tx_dma(struct mlx5e_channel *ch) 3590{ 3591 int i; 3592 3593 for (i = 0; i < ch->priv->num_tc; i++) 3594 mlx5e_resume_sq(&ch->sq[i]); 3595} 3596 3597static void 3598mlx5e_disable_rx_dma(struct mlx5e_channel *ch) 3599{ 3600 struct mlx5e_rq *rq = &ch->rq; 3601 int err; 3602 3603 mtx_lock(&rq->mtx); 3604 rq->enabled = 0; 3605 callout_stop(&rq->watchdog); 3606 mtx_unlock(&rq->mtx); 3607 3608 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 3609 if (err != 0) { 3610 mlx5_en_err(rq->ifp, 3611 "mlx5e_modify_rq() from RDY to RST failed: %d\n", err); 3612 } 3613 3614 while (!mlx5_wq_ll_is_empty(&rq->wq)) { 3615 msleep(1); 3616 rq->cq.mcq.comp(&rq->cq.mcq); 3617 } 3618 3619 /* 3620 * Transitioning into RST state will allow the FW to track less ERR state queues, 3621 * thus reducing the recv queue flushing time 3622 */ 3623 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST); 3624 if (err != 0) { 3625 mlx5_en_err(rq->ifp, 3626 "mlx5e_modify_rq() from ERR to RST failed: %d\n", err); 3627 } 3628} 3629 3630static void 3631mlx5e_enable_rx_dma(struct mlx5e_channel *ch) 3632{ 3633 struct mlx5e_rq *rq = &ch->rq; 3634 int err; 3635 3636 rq->wq.wqe_ctr = 0; 3637 mlx5_wq_ll_update_db_record(&rq->wq); 3638 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 3639 if (err != 0) { 3640 mlx5_en_err(rq->ifp, 3641 "mlx5e_modify_rq() from RST to RDY failed: %d\n", err); 3642 } 3643 3644 rq->enabled = 1; 3645 3646 rq->cq.mcq.comp(&rq->cq.mcq); 3647} 3648 3649void 3650mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) 3651{ 3652 int i; 3653 3654 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3655 return; 3656 3657 for (i = 0; i < priv->params.num_channels; i++) { 3658 if (value) 3659 mlx5e_disable_tx_dma(&priv->channel[i]); 3660 else 3661 mlx5e_enable_tx_dma(&priv->channel[i]); 3662 } 3663} 3664 3665void 3666mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) 3667{ 3668 int i; 3669 3670 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 3671 return; 3672 3673 for (i = 0; i < priv->params.num_channels; i++) { 3674 if (value) 3675 mlx5e_disable_rx_dma(&priv->channel[i]); 3676 else 3677 mlx5e_enable_rx_dma(&priv->channel[i]); 3678 } 3679} 3680 3681static void 3682mlx5e_add_hw_stats(struct mlx5e_priv *priv) 3683{ 3684 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3685 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0, 3686 sysctl_firmware, "A", "HCA firmware version"); 3687 3688 SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3689 OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, 3690 "Board ID"); 3691} 3692 3693static int 3694mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3695{ 3696 struct mlx5e_priv *priv = arg1; 3697 uint8_t temp[MLX5E_MAX_PRIORITY]; 3698 uint32_t tx_pfc; 3699 int err; 3700 int i; 3701 3702 PRIV_LOCK(priv); 3703 3704 tx_pfc = priv->params.tx_priority_flow_control; 3705 3706 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3707 temp[i] = (tx_pfc >> i) & 1; 3708 3709 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3710 if (err || !req->newptr) 3711 goto done; 3712 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3713 if (err) 3714 goto done; 3715 3716 priv->params.tx_priority_flow_control = 0; 3717 3718 /* range check input value */ 3719 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3720 if (temp[i] > 1) { 3721 err = ERANGE; 3722 goto done; 3723 } 3724 priv->params.tx_priority_flow_control |= (temp[i] << i); 3725 } 3726 3727 /* check if update is required */ 3728 if (tx_pfc != priv->params.tx_priority_flow_control) 3729 err = -mlx5e_set_port_pfc(priv); 3730done: 3731 if (err != 0) 3732 priv->params.tx_priority_flow_control= tx_pfc; 3733 PRIV_UNLOCK(priv); 3734 3735 return (err); 3736} 3737 3738static int 3739mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3740{ 3741 struct mlx5e_priv *priv = arg1; 3742 uint8_t temp[MLX5E_MAX_PRIORITY]; 3743 uint32_t rx_pfc; 3744 int err; 3745 int i; 3746 3747 PRIV_LOCK(priv); 3748 3749 rx_pfc = priv->params.rx_priority_flow_control; 3750 3751 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) 3752 temp[i] = (rx_pfc >> i) & 1; 3753 3754 err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); 3755 if (err || !req->newptr) 3756 goto done; 3757 err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); 3758 if (err) 3759 goto done; 3760 3761 priv->params.rx_priority_flow_control = 0; 3762 3763 /* range check input value */ 3764 for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { 3765 if (temp[i] > 1) { 3766 err = ERANGE; 3767 goto done; 3768 } 3769 priv->params.rx_priority_flow_control |= (temp[i] << i); 3770 } 3771 3772 /* check if update is required */ 3773 if (rx_pfc != priv->params.rx_priority_flow_control) { 3774 err = -mlx5e_set_port_pfc(priv); 3775 if (err == 0 && priv->sw_is_port_buf_owner) 3776 err = mlx5e_update_buf_lossy(priv); 3777 } 3778done: 3779 if (err != 0) 3780 priv->params.rx_priority_flow_control= rx_pfc; 3781 PRIV_UNLOCK(priv); 3782 3783 return (err); 3784} 3785 3786static void 3787mlx5e_setup_pauseframes(struct mlx5e_priv *priv) 3788{ 3789#if (__FreeBSD_version < 1100000) 3790 char path[96]; 3791#endif 3792 int error; 3793 3794 /* enable pauseframes by default */ 3795 priv->params.tx_pauseframe_control = 1; 3796 priv->params.rx_pauseframe_control = 1; 3797 3798 /* disable ports flow control, PFC, by default */ 3799 priv->params.tx_priority_flow_control = 0; 3800 priv->params.rx_priority_flow_control = 0; 3801 3802#if (__FreeBSD_version < 1100000) 3803 /* compute path for sysctl */ 3804 snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control", 3805 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3806 3807 /* try to fetch tunable, if any */ 3808 TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control); 3809 3810 /* compute path for sysctl */ 3811 snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control", 3812 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3813 3814 /* try to fetch tunable, if any */ 3815 TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control); 3816#endif 3817 3818 /* register pauseframe SYSCTLs */ 3819 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3820 OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN, 3821 &priv->params.tx_pauseframe_control, 0, 3822 "Set to enable TX pause frames. Clear to disable."); 3823 3824 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3825 OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN, 3826 &priv->params.rx_pauseframe_control, 0, 3827 "Set to enable RX pause frames. Clear to disable."); 3828 3829 /* register priority flow control, PFC, SYSCTLs */ 3830 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3831 OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3832 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU", 3833 "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable."); 3834 3835 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3836 OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | 3837 CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU", 3838 "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable."); 3839 3840 PRIV_LOCK(priv); 3841 3842 /* range check */ 3843 priv->params.tx_pauseframe_control = 3844 priv->params.tx_pauseframe_control ? 1 : 0; 3845 priv->params.rx_pauseframe_control = 3846 priv->params.rx_pauseframe_control ? 1 : 0; 3847 3848 /* update firmware */ 3849 error = mlx5e_set_port_pause_and_pfc(priv); 3850 if (error == -EINVAL) { 3851 mlx5_en_err(priv->ifp, 3852 "Global pauseframes must be disabled before enabling PFC.\n"); 3853 priv->params.rx_priority_flow_control = 0; 3854 priv->params.tx_priority_flow_control = 0; 3855 3856 /* update firmware */ 3857 (void) mlx5e_set_port_pause_and_pfc(priv); 3858 } 3859 PRIV_UNLOCK(priv); 3860} 3861 3862static void * 3863mlx5e_create_ifp(struct mlx5_core_dev *mdev) 3864{ 3865 struct ifnet *ifp; 3866 struct mlx5e_priv *priv; 3867 u8 dev_addr[ETHER_ADDR_LEN] __aligned(4); 3868 u8 connector_type; 3869 struct sysctl_oid_list *child; 3870 int ncv = mdev->priv.eq_table.num_comp_vectors; 3871 char unit[16]; 3872 int err; 3873 int i,j; 3874 u32 eth_proto_cap; 3875 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 3876 bool ext = 0; 3877 u32 speeds_num; 3878 struct media media_entry = {}; 3879 3880 if (mlx5e_check_required_hca_cap(mdev)) { 3881 mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n"); 3882 return (NULL); 3883 } 3884 /* 3885 * Try to allocate the priv and make room for worst-case 3886 * number of channel structures: 3887 */ 3888 priv = malloc(sizeof(*priv) + 3889 (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors), 3890 M_MLX5EN, M_WAITOK | M_ZERO); 3891 3892 ifp = priv->ifp = if_alloc(IFT_ETHER); 3893 if (ifp == NULL) { 3894 mlx5_core_err(mdev, "if_alloc() failed\n"); 3895 goto err_free_priv; 3896 } 3897 /* setup all static fields */ 3898 mlx5e_priv_static_init(priv, mdev->priv.eq_table.num_comp_vectors); 3899 3900 ifp->if_softc = priv; 3901 if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev)); 3902 ifp->if_mtu = ETHERMTU; 3903 ifp->if_init = mlx5e_open; 3904 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3905 ifp->if_ioctl = mlx5e_ioctl; 3906 ifp->if_transmit = mlx5e_xmit; 3907 ifp->if_qflush = if_qflush; 3908#if (__FreeBSD_version >= 1100000) 3909 ifp->if_get_counter = mlx5e_get_counter; 3910#endif 3911 ifp->if_snd.ifq_maxlen = ifqmaxlen; 3912 /* 3913 * Set driver features 3914 */ 3915 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 3916 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 3917 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 3918 ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 3919 ifp->if_capabilities |= IFCAP_LRO; 3920 ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO; 3921 ifp->if_capabilities |= IFCAP_HWSTATS; 3922 3923 /* set TSO limits so that we don't have to drop TX packets */ 3924 ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 3925 ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */; 3926 ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE; 3927 3928 ifp->if_capenable = ifp->if_capabilities; 3929 ifp->if_hwassist = 0; 3930 if (ifp->if_capenable & IFCAP_TSO) 3931 ifp->if_hwassist |= CSUM_TSO; 3932 if (ifp->if_capenable & IFCAP_TXCSUM) 3933 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3934 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 3935 ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3936 3937 /* ifnet sysctl tree */ 3938 sysctl_ctx_init(&priv->sysctl_ctx); 3939 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), 3940 OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name"); 3941 if (priv->sysctl_ifnet == NULL) { 3942 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3943 goto err_free_sysctl; 3944 } 3945 snprintf(unit, sizeof(unit), "%d", ifp->if_dunit); 3946 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3947 OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit"); 3948 if (priv->sysctl_ifnet == NULL) { 3949 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3950 goto err_free_sysctl; 3951 } 3952 3953 /* HW sysctl tree */ 3954 child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev)); 3955 priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, 3956 OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw"); 3957 if (priv->sysctl_hw == NULL) { 3958 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3959 goto err_free_sysctl; 3960 } 3961 3962 err = mlx5e_build_ifp_priv(mdev, priv, ncv); 3963 if (err) { 3964 mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err); 3965 goto err_free_sysctl; 3966 } 3967 3968 /* reuse mlx5core's watchdog workqueue */ 3969 priv->wq = mdev->priv.health.wq_watchdog; 3970 3971 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); 3972 if (err) { 3973 mlx5_en_err(ifp, "mlx5_alloc_map_uar failed, %d\n", err); 3974 goto err_free_wq; 3975 } 3976 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 3977 if (err) { 3978 mlx5_en_err(ifp, "mlx5_core_alloc_pd failed, %d\n", err); 3979 goto err_unmap_free_uar; 3980 } 3981 err = mlx5_alloc_transport_domain(mdev, &priv->tdn); 3982 if (err) { 3983 mlx5_en_err(ifp, 3984 "mlx5_alloc_transport_domain failed, %d\n", err); 3985 goto err_dealloc_pd; 3986 } 3987 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); 3988 if (err) { 3989 mlx5_en_err(ifp, "mlx5e_create_mkey failed, %d\n", err); 3990 goto err_dealloc_transport_domain; 3991 } 3992 mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); 3993 3994 /* check if we should generate a random MAC address */ 3995 if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && 3996 is_zero_ether_addr(dev_addr)) { 3997 random_ether_addr(dev_addr); 3998 mlx5_en_err(ifp, "Assigned random MAC address\n"); 3999 } 4000 4001 /* set default MTU */ 4002 mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu); 4003 4004 /* Set default media status */ 4005 priv->media_status_last = IFM_AVALID; 4006 priv->media_active_last = IFM_ETHER | IFM_AUTO | 4007 IFM_ETH_RXPAUSE | IFM_FDX; 4008 4009 /* setup default pauseframes configuration */ 4010 mlx5e_setup_pauseframes(priv); 4011 4012 /* Setup supported medias */ 4013 //TODO: If we failed to query ptys is it ok to proceed?? 4014 if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) { 4015 ext = MLX5_CAP_PCAM_FEATURE(mdev, 4016 ptys_extended_ethernet); 4017 eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 4018 eth_proto_capability); 4019 if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) 4020 connector_type = MLX5_GET(ptys_reg, out, 4021 connector_type); 4022 } else { 4023 eth_proto_cap = 0; 4024 mlx5_en_err(ifp, "Query port media capability failed, %d\n", err); 4025 } 4026 4027 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 4028 mlx5e_media_change, mlx5e_media_status); 4029 4030 speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER; 4031 for (i = 0; i != speeds_num; i++) { 4032 for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { 4033 media_entry = ext ? mlx5e_ext_mode_table[i][j] : 4034 mlx5e_mode_table[i][j]; 4035 if (media_entry.baudrate == 0) 4036 continue; 4037 if (MLX5E_PROT_MASK(i) & eth_proto_cap) { 4038 ifmedia_add(&priv->media, 4039 media_entry.subtype | 4040 IFM_ETHER, 0, NULL); 4041 ifmedia_add(&priv->media, 4042 media_entry.subtype | 4043 IFM_ETHER | IFM_FDX | 4044 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4045 } 4046 } 4047 } 4048 4049 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 4050 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4051 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 4052 4053 /* Set autoselect by default */ 4054 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 4055 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 4056 ether_ifattach(ifp, dev_addr); 4057 4058 /* Register for VLAN events */ 4059 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 4060 mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 4061 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 4062 mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 4063 4064 /* Link is down by default */ 4065 if_link_state_change(ifp, LINK_STATE_DOWN); 4066 4067 mlx5e_enable_async_events(priv); 4068 4069 mlx5e_add_hw_stats(priv); 4070 4071 mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4072 "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM, 4073 priv->stats.vport.arg); 4074 4075 mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 4076 "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM, 4077 priv->stats.pport.arg); 4078 4079 mlx5e_create_ethtool(priv); 4080 4081 mtx_lock(&priv->async_events_mtx); 4082 mlx5e_update_stats(priv); 4083 mtx_unlock(&priv->async_events_mtx); 4084 4085 return (priv); 4086 4087err_dealloc_transport_domain: 4088 mlx5_dealloc_transport_domain(mdev, priv->tdn); 4089 4090err_dealloc_pd: 4091 mlx5_core_dealloc_pd(mdev, priv->pdn); 4092 4093err_unmap_free_uar: 4094 mlx5_unmap_free_uar(mdev, &priv->cq_uar); 4095 4096err_free_wq: 4097 flush_workqueue(priv->wq); 4098 4099err_free_sysctl: 4100 sysctl_ctx_free(&priv->sysctl_ctx); 4101 if (priv->sysctl_debug) 4102 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4103 mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors); 4104 if_free(ifp); 4105 4106err_free_priv: 4107 free(priv, M_MLX5EN); 4108 return (NULL); 4109} 4110 4111static void 4112mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv) 4113{ 4114 struct mlx5e_priv *priv = vpriv; 4115 struct ifnet *ifp = priv->ifp; 4116 4117 /* don't allow more IOCTLs */ 4118 priv->gone = 1; 4119 4120 /* XXX wait a bit to allow IOCTL handlers to complete */ 4121 pause("W", hz); 4122 4123 /* stop watchdog timer */ 4124 callout_drain(&priv->watchdog); 4125 4126 if (priv->vlan_attach != NULL) 4127 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 4128 if (priv->vlan_detach != NULL) 4129 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 4130 4131 /* make sure device gets closed */ 4132 PRIV_LOCK(priv); 4133 mlx5e_close_locked(ifp); 4134 PRIV_UNLOCK(priv); 4135 4136 /* unregister device */ 4137 ifmedia_removeall(&priv->media); 4138 ether_ifdetach(ifp); 4139 4140 /* destroy all remaining sysctl nodes */ 4141 sysctl_ctx_free(&priv->stats.vport.ctx); 4142 sysctl_ctx_free(&priv->stats.pport.ctx); 4143 if (priv->sysctl_debug) 4144 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 4145 sysctl_ctx_free(&priv->sysctl_ctx); 4146 4147 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 4148 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); 4149 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 4150 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 4151 mlx5e_disable_async_events(priv); 4152 flush_workqueue(priv->wq); 4153 mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors); 4154 if_free(ifp); 4155 free(priv, M_MLX5EN); 4156} 4157 4158static void * 4159mlx5e_get_ifp(void *vpriv) 4160{ 4161 struct mlx5e_priv *priv = vpriv; 4162 4163 return (priv->ifp); 4164} 4165 4166static struct mlx5_interface mlx5e_interface = { 4167 .add = mlx5e_create_ifp, 4168 .remove = mlx5e_destroy_ifp, 4169 .event = mlx5e_async_event, 4170 .protocol = MLX5_INTERFACE_PROTOCOL_ETH, 4171 .get_dev = mlx5e_get_ifp, 4172}; 4173 4174void 4175mlx5e_init(void) 4176{ 4177 mlx5_register_interface(&mlx5e_interface); 4178} 4179 4180void 4181mlx5e_cleanup(void) 4182{ 4183 mlx5_unregister_interface(&mlx5e_interface); 4184} 4185 4186static void 4187mlx5e_show_version(void __unused *arg) 4188{ 4189 4190 printf("%s", mlx5e_version); 4191} 4192SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL); 4193 4194module_init_order(mlx5e_init, SI_ORDER_THIRD); 4195module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD); 4196 4197#if (__FreeBSD_version >= 1100000) 4198MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1); 4199#endif 4200MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1); 4201MODULE_VERSION(mlx5en, 1); 4202