mlx5_en_main.c revision 331818
1/*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 331818 2018-03-30 19:26:45Z hselasky $ 26 */ 27 28#include "en.h" 29 30#include <sys/sockio.h> 31#include <machine/atomic.h> 32 33#define ETH_DRIVER_VERSION "3.1.0-dev" 34char mlx5e_version[] = "Mellanox Ethernet driver" 35 " (" ETH_DRIVER_VERSION ")"; 36 37struct mlx5e_channel_param { 38 struct mlx5e_rq_param rq; 39 struct mlx5e_sq_param sq; 40 struct mlx5e_cq_param rx_cq; 41 struct mlx5e_cq_param tx_cq; 42}; 43 44static const struct { 45 u32 subtype; 46 u64 baudrate; 47} mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = { 48 49 [MLX5E_1000BASE_CX_SGMII] = { 50 .subtype = IFM_1000_CX_SGMII, 51 .baudrate = IF_Mbps(1000ULL), 52 }, 53 [MLX5E_1000BASE_KX] = { 54 .subtype = IFM_1000_KX, 55 .baudrate = IF_Mbps(1000ULL), 56 }, 57 [MLX5E_10GBASE_CX4] = { 58 .subtype = IFM_10G_CX4, 59 .baudrate = IF_Gbps(10ULL), 60 }, 61 [MLX5E_10GBASE_KX4] = { 62 .subtype = IFM_10G_KX4, 63 .baudrate = IF_Gbps(10ULL), 64 }, 65 [MLX5E_10GBASE_KR] = { 66 .subtype = IFM_10G_KR, 67 .baudrate = IF_Gbps(10ULL), 68 }, 69 [MLX5E_20GBASE_KR2] = { 70 .subtype = IFM_20G_KR2, 71 .baudrate = IF_Gbps(20ULL), 72 }, 73 [MLX5E_40GBASE_CR4] = { 74 .subtype = IFM_40G_CR4, 75 .baudrate = IF_Gbps(40ULL), 76 }, 77 [MLX5E_40GBASE_KR4] = { 78 .subtype = IFM_40G_KR4, 79 .baudrate = IF_Gbps(40ULL), 80 }, 81 [MLX5E_56GBASE_R4] = { 82 .subtype = IFM_56G_R4, 83 .baudrate = IF_Gbps(56ULL), 84 }, 85 [MLX5E_10GBASE_CR] = { 86 .subtype = IFM_10G_CR1, 87 .baudrate = IF_Gbps(10ULL), 88 }, 89 [MLX5E_10GBASE_SR] = { 90 .subtype = IFM_10G_SR, 91 .baudrate = IF_Gbps(10ULL), 92 }, 93 [MLX5E_10GBASE_ER] = { 94 .subtype = IFM_10G_ER, 95 .baudrate = IF_Gbps(10ULL), 96 }, 97 [MLX5E_40GBASE_SR4] = { 98 .subtype = IFM_40G_SR4, 99 .baudrate = IF_Gbps(40ULL), 100 }, 101 [MLX5E_40GBASE_LR4] = { 102 .subtype = IFM_40G_LR4, 103 .baudrate = IF_Gbps(40ULL), 104 }, 105 [MLX5E_100GBASE_CR4] = { 106 .subtype = IFM_100G_CR4, 107 .baudrate = IF_Gbps(100ULL), 108 }, 109 [MLX5E_100GBASE_SR4] = { 110 .subtype = IFM_100G_SR4, 111 .baudrate = IF_Gbps(100ULL), 112 }, 113 [MLX5E_100GBASE_KR4] = { 114 .subtype = IFM_100G_KR4, 115 .baudrate = IF_Gbps(100ULL), 116 }, 117 [MLX5E_100GBASE_LR4] = { 118 .subtype = IFM_100G_LR4, 119 .baudrate = IF_Gbps(100ULL), 120 }, 121 [MLX5E_100BASE_TX] = { 122 .subtype = IFM_100_TX, 123 .baudrate = IF_Mbps(100ULL), 124 }, 125 [MLX5E_1000BASE_T] = { 126 .subtype = IFM_1000_T, 127 .baudrate = IF_Mbps(1000ULL), 128 }, 129 [MLX5E_10GBASE_T] = { 130 .subtype = IFM_10G_T, 131 .baudrate = IF_Gbps(10ULL), 132 }, 133 [MLX5E_25GBASE_CR] = { 134 .subtype = IFM_25G_CR, 135 .baudrate = IF_Gbps(25ULL), 136 }, 137 [MLX5E_25GBASE_KR] = { 138 .subtype = IFM_25G_KR, 139 .baudrate = IF_Gbps(25ULL), 140 }, 141 [MLX5E_25GBASE_SR] = { 142 .subtype = IFM_25G_SR, 143 .baudrate = IF_Gbps(25ULL), 144 }, 145 [MLX5E_50GBASE_CR2] = { 146 .subtype = IFM_50G_CR2, 147 .baudrate = IF_Gbps(50ULL), 148 }, 149 [MLX5E_50GBASE_KR2] = { 150 .subtype = IFM_50G_KR2, 151 .baudrate = IF_Gbps(50ULL), 152 }, 153}; 154 155MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet"); 156 157static void 158mlx5e_update_carrier(struct mlx5e_priv *priv) 159{ 160 struct mlx5_core_dev *mdev = priv->mdev; 161 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 162 u32 eth_proto_oper; 163 int error; 164 u8 port_state; 165 u8 i; 166 167 port_state = mlx5_query_vport_state(mdev, 168 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 169 170 if (port_state == VPORT_STATE_UP) { 171 priv->media_status_last |= IFM_ACTIVE; 172 } else { 173 priv->media_status_last &= ~IFM_ACTIVE; 174 priv->media_active_last = IFM_ETHER; 175 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 176 return; 177 } 178 179 error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); 180 if (error) { 181 priv->media_active_last = IFM_ETHER; 182 priv->ifp->if_baudrate = 1; 183 if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n", 184 __func__, error); 185 return; 186 } 187 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); 188 189 for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) { 190 if (mlx5e_mode_table[i].baudrate == 0) 191 continue; 192 if (MLX5E_PROT_MASK(i) & eth_proto_oper) { 193 priv->ifp->if_baudrate = 194 mlx5e_mode_table[i].baudrate; 195 priv->media_active_last = 196 mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX; 197 } 198 } 199 if_link_state_change(priv->ifp, LINK_STATE_UP); 200} 201 202static void 203mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 204{ 205 struct mlx5e_priv *priv = dev->if_softc; 206 207 ifmr->ifm_status = priv->media_status_last; 208 ifmr->ifm_active = priv->media_active_last | 209 (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | 210 (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); 211 212} 213 214static u32 215mlx5e_find_link_mode(u32 subtype) 216{ 217 u32 i; 218 u32 link_mode = 0; 219 220 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { 221 if (mlx5e_mode_table[i].baudrate == 0) 222 continue; 223 if (mlx5e_mode_table[i].subtype == subtype) 224 link_mode |= MLX5E_PROT_MASK(i); 225 } 226 227 return (link_mode); 228} 229 230static int 231mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) 232{ 233 return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, 234 priv->params.rx_pauseframe_control, 235 priv->params.tx_pauseframe_control, 236 priv->params.rx_priority_flow_control, 237 priv->params.tx_priority_flow_control)); 238} 239 240static int 241mlx5e_set_port_pfc(struct mlx5e_priv *priv) 242{ 243 int error; 244 245 if (priv->params.rx_pauseframe_control || 246 priv->params.tx_pauseframe_control) { 247 if_printf(priv->ifp, 248 "Global pauseframes must be disabled before enabling PFC.\n"); 249 error = -EINVAL; 250 } else { 251 error = mlx5e_set_port_pause_and_pfc(priv); 252 } 253 return (error); 254} 255 256static int 257mlx5e_media_change(struct ifnet *dev) 258{ 259 struct mlx5e_priv *priv = dev->if_softc; 260 struct mlx5_core_dev *mdev = priv->mdev; 261 u32 eth_proto_cap; 262 u32 link_mode; 263 int was_opened; 264 int locked; 265 int error; 266 267 locked = PRIV_LOCKED(priv); 268 if (!locked) 269 PRIV_LOCK(priv); 270 271 if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { 272 error = EINVAL; 273 goto done; 274 } 275 link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media)); 276 277 /* query supported capabilities */ 278 error = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); 279 if (error != 0) { 280 if_printf(dev, "Query port media capability failed\n"); 281 goto done; 282 } 283 /* check for autoselect */ 284 if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { 285 link_mode = eth_proto_cap; 286 if (link_mode == 0) { 287 if_printf(dev, "Port media capability is zero\n"); 288 error = EINVAL; 289 goto done; 290 } 291 } else { 292 link_mode = link_mode & eth_proto_cap; 293 if (link_mode == 0) { 294 if_printf(dev, "Not supported link mode requested\n"); 295 error = EINVAL; 296 goto done; 297 } 298 } 299 if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 300 /* check if PFC is enabled */ 301 if (priv->params.rx_priority_flow_control || 302 priv->params.tx_priority_flow_control) { 303 if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n"); 304 error = EINVAL; 305 goto done; 306 } 307 } 308 /* update pauseframe control bits */ 309 priv->params.rx_pauseframe_control = 310 (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; 311 priv->params.tx_pauseframe_control = 312 (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; 313 314 /* check if device is opened */ 315 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 316 317 /* reconfigure the hardware */ 318 mlx5_set_port_status(mdev, MLX5_PORT_DOWN); 319 mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN); 320 error = -mlx5e_set_port_pause_and_pfc(priv); 321 if (was_opened) 322 mlx5_set_port_status(mdev, MLX5_PORT_UP); 323 324done: 325 if (!locked) 326 PRIV_UNLOCK(priv); 327 return (error); 328} 329 330static void 331mlx5e_update_carrier_work(struct work_struct *work) 332{ 333 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 334 update_carrier_work); 335 336 PRIV_LOCK(priv); 337 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 338 mlx5e_update_carrier(priv); 339 PRIV_UNLOCK(priv); 340} 341 342/* 343 * This function reads the physical port counters from the firmware 344 * using a pre-defined layout defined by various MLX5E_PPORT_XXX() 345 * macros. The output is converted from big-endian 64-bit values into 346 * host endian ones and stored in the "priv->stats.pport" structure. 347 */ 348static void 349mlx5e_update_pport_counters(struct mlx5e_priv *priv) 350{ 351 struct mlx5_core_dev *mdev = priv->mdev; 352 struct mlx5e_pport_stats *s = &priv->stats.pport; 353 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; 354 u32 *in; 355 u32 *out; 356 const u64 *ptr; 357 unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 358 unsigned x; 359 unsigned y; 360 unsigned z; 361 362 /* allocate firmware request structures */ 363 in = mlx5_vzalloc(sz); 364 out = mlx5_vzalloc(sz); 365 if (in == NULL || out == NULL) 366 goto free_out; 367 368 /* 369 * Get pointer to the 64-bit counter set which is located at a 370 * fixed offset in the output firmware request structure: 371 */ 372 ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); 373 374 MLX5_SET(ppcnt_reg, in, local_port, 1); 375 376 /* read IEEE802_3 counter group using predefined counter layout */ 377 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 378 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 379 for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM; 380 x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) 381 s->arg[y] = be64toh(ptr[x]); 382 383 /* read RFC2819 counter group using predefined counter layout */ 384 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 385 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 386 for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) 387 s->arg[y] = be64toh(ptr[x]); 388 for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM + 389 MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) 390 s_debug->arg[y] = be64toh(ptr[x]); 391 392 /* read RFC2863 counter group using predefined counter layout */ 393 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 394 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 395 for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) 396 s_debug->arg[y] = be64toh(ptr[x]); 397 398 /* read physical layer stats counter group using predefined counter layout */ 399 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 400 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 401 for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) 402 s_debug->arg[y] = be64toh(ptr[x]); 403 404 /* read per-priority counters */ 405 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 406 407 /* iterate all the priorities */ 408 for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) { 409 MLX5_SET(ppcnt_reg, in, prio_tc, z); 410 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 411 412 /* read per priority stats counter group using predefined counter layout */ 413 for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM / 414 MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++) 415 s->arg[y] = be64toh(ptr[x]); 416 } 417free_out: 418 /* free firmware request structures */ 419 kvfree(in); 420 kvfree(out); 421} 422 423/* 424 * This function is called regularly to collect all statistics 425 * counters from the firmware. The values can be viewed through the 426 * sysctl interface. Execution is serialized using the priv's global 427 * configuration lock. 428 */ 429static void 430mlx5e_update_stats_work(struct work_struct *work) 431{ 432 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 433 update_stats_work); 434 struct mlx5_core_dev *mdev = priv->mdev; 435 struct mlx5e_vport_stats *s = &priv->stats.vport; 436 struct mlx5e_rq_stats *rq_stats; 437 struct mlx5e_sq_stats *sq_stats; 438 struct buf_ring *sq_br; 439#if (__FreeBSD_version < 1100000) 440 struct ifnet *ifp = priv->ifp; 441#endif 442 443 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; 444 u32 *out; 445 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 446 u64 tso_packets = 0; 447 u64 tso_bytes = 0; 448 u64 tx_queue_dropped = 0; 449 u64 tx_defragged = 0; 450 u64 tx_offload_none = 0; 451 u64 lro_packets = 0; 452 u64 lro_bytes = 0; 453 u64 sw_lro_queued = 0; 454 u64 sw_lro_flushed = 0; 455 u64 rx_csum_none = 0; 456 u64 rx_wqe_err = 0; 457 u32 rx_out_of_buffer = 0; 458 int i; 459 int j; 460 461 PRIV_LOCK(priv); 462 out = mlx5_vzalloc(outlen); 463 if (out == NULL) 464 goto free_out; 465 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 466 goto free_out; 467 468 /* Collect firts the SW counters and then HW for consistency */ 469 for (i = 0; i < priv->params.num_channels; i++) { 470 struct mlx5e_rq *rq = &priv->channel[i]->rq; 471 472 rq_stats = &priv->channel[i]->rq.stats; 473 474 /* collect stats from LRO */ 475 rq_stats->sw_lro_queued = rq->lro.lro_queued; 476 rq_stats->sw_lro_flushed = rq->lro.lro_flushed; 477 sw_lro_queued += rq_stats->sw_lro_queued; 478 sw_lro_flushed += rq_stats->sw_lro_flushed; 479 lro_packets += rq_stats->lro_packets; 480 lro_bytes += rq_stats->lro_bytes; 481 rx_csum_none += rq_stats->csum_none; 482 rx_wqe_err += rq_stats->wqe_err; 483 484 for (j = 0; j < priv->num_tc; j++) { 485 sq_stats = &priv->channel[i]->sq[j].stats; 486 sq_br = priv->channel[i]->sq[j].br; 487 488 tso_packets += sq_stats->tso_packets; 489 tso_bytes += sq_stats->tso_bytes; 490 tx_queue_dropped += sq_stats->dropped; 491 if (sq_br != NULL) 492 tx_queue_dropped += sq_br->br_drops; 493 tx_defragged += sq_stats->defragged; 494 tx_offload_none += sq_stats->csum_offload_none; 495 } 496 } 497 498 /* update counters */ 499 s->tso_packets = tso_packets; 500 s->tso_bytes = tso_bytes; 501 s->tx_queue_dropped = tx_queue_dropped; 502 s->tx_defragged = tx_defragged; 503 s->lro_packets = lro_packets; 504 s->lro_bytes = lro_bytes; 505 s->sw_lro_queued = sw_lro_queued; 506 s->sw_lro_flushed = sw_lro_flushed; 507 s->rx_csum_none = rx_csum_none; 508 s->rx_wqe_err = rx_wqe_err; 509 510 /* HW counters */ 511 memset(in, 0, sizeof(in)); 512 513 MLX5_SET(query_vport_counter_in, in, opcode, 514 MLX5_CMD_OP_QUERY_VPORT_COUNTER); 515 MLX5_SET(query_vport_counter_in, in, op_mod, 0); 516 MLX5_SET(query_vport_counter_in, in, other_vport, 0); 517 518 memset(out, 0, outlen); 519 520 /* get number of out-of-buffer drops first */ 521 if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, 522 &rx_out_of_buffer)) 523 goto free_out; 524 525 /* accumulate difference into a 64-bit counter */ 526 s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev); 527 s->rx_out_of_buffer_prev = rx_out_of_buffer; 528 529 /* get port statistics */ 530 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen)) 531 goto free_out; 532 533#define MLX5_GET_CTR(out, x) \ 534 MLX5_GET64(query_vport_counter_out, out, x) 535 536 s->rx_error_packets = 537 MLX5_GET_CTR(out, received_errors.packets); 538 s->rx_error_bytes = 539 MLX5_GET_CTR(out, received_errors.octets); 540 s->tx_error_packets = 541 MLX5_GET_CTR(out, transmit_errors.packets); 542 s->tx_error_bytes = 543 MLX5_GET_CTR(out, transmit_errors.octets); 544 545 s->rx_unicast_packets = 546 MLX5_GET_CTR(out, received_eth_unicast.packets); 547 s->rx_unicast_bytes = 548 MLX5_GET_CTR(out, received_eth_unicast.octets); 549 s->tx_unicast_packets = 550 MLX5_GET_CTR(out, transmitted_eth_unicast.packets); 551 s->tx_unicast_bytes = 552 MLX5_GET_CTR(out, transmitted_eth_unicast.octets); 553 554 s->rx_multicast_packets = 555 MLX5_GET_CTR(out, received_eth_multicast.packets); 556 s->rx_multicast_bytes = 557 MLX5_GET_CTR(out, received_eth_multicast.octets); 558 s->tx_multicast_packets = 559 MLX5_GET_CTR(out, transmitted_eth_multicast.packets); 560 s->tx_multicast_bytes = 561 MLX5_GET_CTR(out, transmitted_eth_multicast.octets); 562 563 s->rx_broadcast_packets = 564 MLX5_GET_CTR(out, received_eth_broadcast.packets); 565 s->rx_broadcast_bytes = 566 MLX5_GET_CTR(out, received_eth_broadcast.octets); 567 s->tx_broadcast_packets = 568 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); 569 s->tx_broadcast_bytes = 570 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 571 572 s->rx_packets = 573 s->rx_unicast_packets + 574 s->rx_multicast_packets + 575 s->rx_broadcast_packets - 576 s->rx_out_of_buffer; 577 s->rx_bytes = 578 s->rx_unicast_bytes + 579 s->rx_multicast_bytes + 580 s->rx_broadcast_bytes; 581 s->tx_packets = 582 s->tx_unicast_packets + 583 s->tx_multicast_packets + 584 s->tx_broadcast_packets; 585 s->tx_bytes = 586 s->tx_unicast_bytes + 587 s->tx_multicast_bytes + 588 s->tx_broadcast_bytes; 589 590 /* Update calculated offload counters */ 591 s->tx_csum_offload = s->tx_packets - tx_offload_none; 592 s->rx_csum_good = s->rx_packets - s->rx_csum_none; 593 594 /* Get physical port counters */ 595 mlx5e_update_pport_counters(priv); 596 597#if (__FreeBSD_version < 1100000) 598 /* no get_counters interface in fbsd 10 */ 599 ifp->if_ipackets = s->rx_packets; 600 ifp->if_ierrors = s->rx_error_packets + 601 priv->stats.pport.alignment_err + 602 priv->stats.pport.check_seq_err + 603 priv->stats.pport.crc_align_errors + 604 priv->stats.pport.in_range_len_errors + 605 priv->stats.pport.jabbers + 606 priv->stats.pport.out_of_range_len + 607 priv->stats.pport.oversize_pkts + 608 priv->stats.pport.symbol_err + 609 priv->stats.pport.too_long_errors + 610 priv->stats.pport.undersize_pkts + 611 priv->stats.pport.unsupported_op_rx; 612 ifp->if_iqdrops = s->rx_out_of_buffer + 613 priv->stats.pport.drop_events; 614 ifp->if_opackets = s->tx_packets; 615 ifp->if_oerrors = s->tx_error_packets; 616 ifp->if_snd.ifq_drops = s->tx_queue_dropped; 617 ifp->if_ibytes = s->rx_bytes; 618 ifp->if_obytes = s->tx_bytes; 619 ifp->if_collisions = 620 priv->stats.pport.collisions; 621#endif 622 623free_out: 624 kvfree(out); 625 626 /* Update diagnostics, if any */ 627 if (priv->params_ethtool.diag_pci_enable || 628 priv->params_ethtool.diag_general_enable) { 629 int error = mlx5_core_get_diagnostics_full(mdev, 630 priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, 631 priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); 632 if (error != 0) 633 if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error); 634 } 635 PRIV_UNLOCK(priv); 636} 637 638static void 639mlx5e_update_stats(void *arg) 640{ 641 struct mlx5e_priv *priv = arg; 642 643 queue_work(priv->wq, &priv->update_stats_work); 644 645 callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv); 646} 647 648static void 649mlx5e_async_event_sub(struct mlx5e_priv *priv, 650 enum mlx5_dev_event event) 651{ 652 switch (event) { 653 case MLX5_DEV_EVENT_PORT_UP: 654 case MLX5_DEV_EVENT_PORT_DOWN: 655 queue_work(priv->wq, &priv->update_carrier_work); 656 break; 657 658 default: 659 break; 660 } 661} 662 663static void 664mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, 665 enum mlx5_dev_event event, unsigned long param) 666{ 667 struct mlx5e_priv *priv = vpriv; 668 669 mtx_lock(&priv->async_events_mtx); 670 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) 671 mlx5e_async_event_sub(priv, event); 672 mtx_unlock(&priv->async_events_mtx); 673} 674 675static void 676mlx5e_enable_async_events(struct mlx5e_priv *priv) 677{ 678 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 679} 680 681static void 682mlx5e_disable_async_events(struct mlx5e_priv *priv) 683{ 684 mtx_lock(&priv->async_events_mtx); 685 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 686 mtx_unlock(&priv->async_events_mtx); 687} 688 689static const char *mlx5e_rq_stats_desc[] = { 690 MLX5E_RQ_STATS(MLX5E_STATS_DESC) 691}; 692 693static int 694mlx5e_create_rq(struct mlx5e_channel *c, 695 struct mlx5e_rq_param *param, 696 struct mlx5e_rq *rq) 697{ 698 struct mlx5e_priv *priv = c->priv; 699 struct mlx5_core_dev *mdev = priv->mdev; 700 char buffer[16]; 701 void *rqc = param->rqc; 702 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); 703 int wq_sz; 704 int err; 705 int i; 706 707 /* Create DMA descriptor TAG */ 708 if ((err = -bus_dma_tag_create( 709 bus_get_dma_tag(mdev->pdev->dev.bsddev), 710 1, /* any alignment */ 711 0, /* no boundary */ 712 BUS_SPACE_MAXADDR, /* lowaddr */ 713 BUS_SPACE_MAXADDR, /* highaddr */ 714 NULL, NULL, /* filter, filterarg */ 715 MJUM16BYTES, /* maxsize */ 716 1, /* nsegments */ 717 MJUM16BYTES, /* maxsegsize */ 718 0, /* flags */ 719 NULL, NULL, /* lockfunc, lockfuncarg */ 720 &rq->dma_tag))) 721 goto done; 722 723 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, 724 &rq->wq_ctrl); 725 if (err) 726 goto err_free_dma_tag; 727 728 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; 729 730 if (priv->params.hw_lro_en) { 731 rq->wqe_sz = priv->params.lro_wqe_sz; 732 } else { 733 rq->wqe_sz = MLX5E_SW2MB_MTU(priv->ifp->if_mtu); 734 } 735 if (rq->wqe_sz > MJUM16BYTES) { 736 err = -ENOMEM; 737 goto err_rq_wq_destroy; 738 } else if (rq->wqe_sz > MJUM9BYTES) { 739 rq->wqe_sz = MJUM16BYTES; 740 } else if (rq->wqe_sz > MJUMPAGESIZE) { 741 rq->wqe_sz = MJUM9BYTES; 742 } else if (rq->wqe_sz > MCLBYTES) { 743 rq->wqe_sz = MJUMPAGESIZE; 744 } else { 745 rq->wqe_sz = MCLBYTES; 746 } 747 748 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 749 750 err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz); 751 if (err) 752 goto err_rq_wq_destroy; 753 754 rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 755 for (i = 0; i != wq_sz; i++) { 756 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); 757 uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN; 758 759 err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map); 760 if (err != 0) { 761 while (i--) 762 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 763 goto err_rq_mbuf_free; 764 } 765 wqe->data.lkey = c->mkey_be; 766 wqe->data.byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING); 767 } 768 769 rq->ifp = c->ifp; 770 rq->channel = c; 771 rq->ix = c->ix; 772 773 snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix); 774 mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 775 buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM, 776 rq->stats.arg); 777 return (0); 778 779err_rq_mbuf_free: 780 free(rq->mbuf, M_MLX5EN); 781 tcp_lro_free(&rq->lro); 782err_rq_wq_destroy: 783 mlx5_wq_destroy(&rq->wq_ctrl); 784err_free_dma_tag: 785 bus_dma_tag_destroy(rq->dma_tag); 786done: 787 return (err); 788} 789 790static void 791mlx5e_destroy_rq(struct mlx5e_rq *rq) 792{ 793 int wq_sz; 794 int i; 795 796 /* destroy all sysctl nodes */ 797 sysctl_ctx_free(&rq->stats.ctx); 798 799 /* free leftover LRO packets, if any */ 800 tcp_lro_free(&rq->lro); 801 802 wq_sz = mlx5_wq_ll_get_size(&rq->wq); 803 for (i = 0; i != wq_sz; i++) { 804 if (rq->mbuf[i].mbuf != NULL) { 805 bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map); 806 m_freem(rq->mbuf[i].mbuf); 807 } 808 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); 809 } 810 free(rq->mbuf, M_MLX5EN); 811 mlx5_wq_destroy(&rq->wq_ctrl); 812} 813 814static int 815mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) 816{ 817 struct mlx5e_channel *c = rq->channel; 818 struct mlx5e_priv *priv = c->priv; 819 struct mlx5_core_dev *mdev = priv->mdev; 820 821 void *in; 822 void *rqc; 823 void *wq; 824 int inlen; 825 int err; 826 827 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 828 sizeof(u64) * rq->wq_ctrl.buf.npages; 829 in = mlx5_vzalloc(inlen); 830 if (in == NULL) 831 return (-ENOMEM); 832 833 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 834 wq = MLX5_ADDR_OF(rqc, rqc, wq); 835 836 memcpy(rqc, param->rqc, sizeof(param->rqc)); 837 838 MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); 839 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 840 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 841 if (priv->counter_set_id >= 0) 842 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); 843 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 844 PAGE_SHIFT); 845 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); 846 847 mlx5_fill_page_array(&rq->wq_ctrl.buf, 848 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 849 850 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); 851 852 kvfree(in); 853 854 return (err); 855} 856 857static int 858mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) 859{ 860 struct mlx5e_channel *c = rq->channel; 861 struct mlx5e_priv *priv = c->priv; 862 struct mlx5_core_dev *mdev = priv->mdev; 863 864 void *in; 865 void *rqc; 866 int inlen; 867 int err; 868 869 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 870 in = mlx5_vzalloc(inlen); 871 if (in == NULL) 872 return (-ENOMEM); 873 874 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 875 876 MLX5_SET(modify_rq_in, in, rqn, rq->rqn); 877 MLX5_SET(modify_rq_in, in, rq_state, curr_state); 878 MLX5_SET(rqc, rqc, state, next_state); 879 880 err = mlx5_core_modify_rq(mdev, in, inlen); 881 882 kvfree(in); 883 884 return (err); 885} 886 887static void 888mlx5e_disable_rq(struct mlx5e_rq *rq) 889{ 890 struct mlx5e_channel *c = rq->channel; 891 struct mlx5e_priv *priv = c->priv; 892 struct mlx5_core_dev *mdev = priv->mdev; 893 894 mlx5_core_destroy_rq(mdev, rq->rqn); 895} 896 897static int 898mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) 899{ 900 struct mlx5e_channel *c = rq->channel; 901 struct mlx5e_priv *priv = c->priv; 902 struct mlx5_wq_ll *wq = &rq->wq; 903 int i; 904 905 for (i = 0; i < 1000; i++) { 906 if (wq->cur_sz >= priv->params.min_rx_wqes) 907 return (0); 908 909 msleep(4); 910 } 911 return (-ETIMEDOUT); 912} 913 914static int 915mlx5e_open_rq(struct mlx5e_channel *c, 916 struct mlx5e_rq_param *param, 917 struct mlx5e_rq *rq) 918{ 919 int err; 920 921 err = mlx5e_create_rq(c, param, rq); 922 if (err) 923 return (err); 924 925 err = mlx5e_enable_rq(rq, param); 926 if (err) 927 goto err_destroy_rq; 928 929 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 930 if (err) 931 goto err_disable_rq; 932 933 c->rq.enabled = 1; 934 935 return (0); 936 937err_disable_rq: 938 mlx5e_disable_rq(rq); 939err_destroy_rq: 940 mlx5e_destroy_rq(rq); 941 942 return (err); 943} 944 945static void 946mlx5e_close_rq(struct mlx5e_rq *rq) 947{ 948 mtx_lock(&rq->mtx); 949 rq->enabled = 0; 950 callout_stop(&rq->watchdog); 951 mtx_unlock(&rq->mtx); 952 953 callout_drain(&rq->watchdog); 954 955 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 956} 957 958static void 959mlx5e_close_rq_wait(struct mlx5e_rq *rq) 960{ 961 struct mlx5_core_dev *mdev = rq->channel->priv->mdev; 962 963 /* wait till RQ is empty */ 964 while (!mlx5_wq_ll_is_empty(&rq->wq) && 965 (mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) { 966 msleep(4); 967 rq->cq.mcq.comp(&rq->cq.mcq); 968 } 969 970 mlx5e_disable_rq(rq); 971 mlx5e_destroy_rq(rq); 972} 973 974void 975mlx5e_free_sq_db(struct mlx5e_sq *sq) 976{ 977 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 978 int x; 979 980 for (x = 0; x != wq_sz; x++) 981 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 982 free(sq->mbuf, M_MLX5EN); 983} 984 985int 986mlx5e_alloc_sq_db(struct mlx5e_sq *sq) 987{ 988 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 989 int err; 990 int x; 991 992 sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); 993 994 /* Create DMA descriptor MAPs */ 995 for (x = 0; x != wq_sz; x++) { 996 err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map); 997 if (err != 0) { 998 while (x--) 999 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); 1000 free(sq->mbuf, M_MLX5EN); 1001 return (err); 1002 } 1003 } 1004 return (0); 1005} 1006 1007static const char *mlx5e_sq_stats_desc[] = { 1008 MLX5E_SQ_STATS(MLX5E_STATS_DESC) 1009}; 1010 1011static int 1012mlx5e_create_sq(struct mlx5e_channel *c, 1013 int tc, 1014 struct mlx5e_sq_param *param, 1015 struct mlx5e_sq *sq) 1016{ 1017 struct mlx5e_priv *priv = c->priv; 1018 struct mlx5_core_dev *mdev = priv->mdev; 1019 char buffer[16]; 1020 1021 void *sqc = param->sqc; 1022 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); 1023#ifdef RSS 1024 cpuset_t cpu_mask; 1025 int cpu_id; 1026#endif 1027 int err; 1028 1029 /* Create DMA descriptor TAG */ 1030 if ((err = -bus_dma_tag_create( 1031 bus_get_dma_tag(mdev->pdev->dev.bsddev), 1032 1, /* any alignment */ 1033 0, /* no boundary */ 1034 BUS_SPACE_MAXADDR, /* lowaddr */ 1035 BUS_SPACE_MAXADDR, /* highaddr */ 1036 NULL, NULL, /* filter, filterarg */ 1037 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ 1038 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ 1039 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 1040 0, /* flags */ 1041 NULL, NULL, /* lockfunc, lockfuncarg */ 1042 &sq->dma_tag))) 1043 goto done; 1044 1045 err = mlx5_alloc_map_uar(mdev, &sq->uar); 1046 if (err) 1047 goto err_free_dma_tag; 1048 1049 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, 1050 &sq->wq_ctrl); 1051 if (err) 1052 goto err_unmap_free_uar; 1053 1054 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1055 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; 1056 1057 err = mlx5e_alloc_sq_db(sq); 1058 if (err) 1059 goto err_sq_wq_destroy; 1060 1061 sq->mkey_be = c->mkey_be; 1062 sq->ifp = priv->ifp; 1063 sq->priv = priv; 1064 sq->tc = tc; 1065 1066 /* check if we should allocate a second packet buffer */ 1067 if (priv->params_ethtool.tx_bufring_disable == 0) { 1068 sq->br = buf_ring_alloc(MLX5E_SQ_TX_QUEUE_SIZE, M_MLX5EN, 1069 M_WAITOK, &sq->lock); 1070 if (sq->br == NULL) { 1071 if_printf(c->ifp, "%s: Failed allocating sq drbr buffer\n", 1072 __func__); 1073 err = -ENOMEM; 1074 goto err_free_sq_db; 1075 } 1076 1077 sq->sq_tq = taskqueue_create_fast("mlx5e_que", M_WAITOK, 1078 taskqueue_thread_enqueue, &sq->sq_tq); 1079 if (sq->sq_tq == NULL) { 1080 if_printf(c->ifp, "%s: Failed allocating taskqueue\n", 1081 __func__); 1082 err = -ENOMEM; 1083 goto err_free_drbr; 1084 } 1085 1086 TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq); 1087#ifdef RSS 1088 cpu_id = rss_getcpu(c->ix % rss_getnumbuckets()); 1089 CPU_SETOF(cpu_id, &cpu_mask); 1090 taskqueue_start_threads_cpuset(&sq->sq_tq, 1, PI_NET, &cpu_mask, 1091 "%s TX SQ%d.%d CPU%d", c->ifp->if_xname, c->ix, tc, cpu_id); 1092#else 1093 taskqueue_start_threads(&sq->sq_tq, 1, PI_NET, 1094 "%s TX SQ%d.%d", c->ifp->if_xname, c->ix, tc); 1095#endif 1096 } 1097 snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc); 1098 mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 1099 buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM, 1100 sq->stats.arg); 1101 1102 return (0); 1103 1104err_free_drbr: 1105 buf_ring_free(sq->br, M_MLX5EN); 1106err_free_sq_db: 1107 mlx5e_free_sq_db(sq); 1108err_sq_wq_destroy: 1109 mlx5_wq_destroy(&sq->wq_ctrl); 1110 1111err_unmap_free_uar: 1112 mlx5_unmap_free_uar(mdev, &sq->uar); 1113 1114err_free_dma_tag: 1115 bus_dma_tag_destroy(sq->dma_tag); 1116done: 1117 return (err); 1118} 1119 1120static void 1121mlx5e_destroy_sq(struct mlx5e_sq *sq) 1122{ 1123 /* destroy all sysctl nodes */ 1124 sysctl_ctx_free(&sq->stats.ctx); 1125 1126 mlx5e_free_sq_db(sq); 1127 mlx5_wq_destroy(&sq->wq_ctrl); 1128 mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar); 1129 if (sq->sq_tq != NULL) { 1130 taskqueue_drain(sq->sq_tq, &sq->sq_task); 1131 taskqueue_free(sq->sq_tq); 1132 } 1133 if (sq->br != NULL) 1134 buf_ring_free(sq->br, M_MLX5EN); 1135} 1136 1137int 1138mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param, 1139 int tis_num) 1140{ 1141 void *in; 1142 void *sqc; 1143 void *wq; 1144 int inlen; 1145 int err; 1146 1147 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1148 sizeof(u64) * sq->wq_ctrl.buf.npages; 1149 in = mlx5_vzalloc(inlen); 1150 if (in == NULL) 1151 return (-ENOMEM); 1152 1153 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 1154 wq = MLX5_ADDR_OF(sqc, sqc, wq); 1155 1156 memcpy(sqc, param->sqc, sizeof(param->sqc)); 1157 1158 MLX5_SET(sqc, sqc, tis_num_0, tis_num); 1159 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); 1160 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 1161 MLX5_SET(sqc, sqc, tis_lst_sz, 1); 1162 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1163 1164 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1165 MLX5_SET(wq, wq, uar_page, sq->uar.index); 1166 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - 1167 PAGE_SHIFT); 1168 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); 1169 1170 mlx5_fill_page_array(&sq->wq_ctrl.buf, 1171 (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); 1172 1173 err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); 1174 1175 kvfree(in); 1176 1177 return (err); 1178} 1179 1180int 1181mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) 1182{ 1183 void *in; 1184 void *sqc; 1185 int inlen; 1186 int err; 1187 1188 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 1189 in = mlx5_vzalloc(inlen); 1190 if (in == NULL) 1191 return (-ENOMEM); 1192 1193 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1194 1195 MLX5_SET(modify_sq_in, in, sqn, sq->sqn); 1196 MLX5_SET(modify_sq_in, in, sq_state, curr_state); 1197 MLX5_SET(sqc, sqc, state, next_state); 1198 1199 err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); 1200 1201 kvfree(in); 1202 1203 return (err); 1204} 1205 1206void 1207mlx5e_disable_sq(struct mlx5e_sq *sq) 1208{ 1209 1210 mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); 1211} 1212 1213static int 1214mlx5e_open_sq(struct mlx5e_channel *c, 1215 int tc, 1216 struct mlx5e_sq_param *param, 1217 struct mlx5e_sq *sq) 1218{ 1219 int err; 1220 1221 err = mlx5e_create_sq(c, tc, param, sq); 1222 if (err) 1223 return (err); 1224 1225 err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]); 1226 if (err) 1227 goto err_destroy_sq; 1228 1229 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); 1230 if (err) 1231 goto err_disable_sq; 1232 1233 atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_READY); 1234 1235 return (0); 1236 1237err_disable_sq: 1238 mlx5e_disable_sq(sq); 1239err_destroy_sq: 1240 mlx5e_destroy_sq(sq); 1241 1242 return (err); 1243} 1244 1245static void 1246mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep) 1247{ 1248 /* fill up remainder with NOPs */ 1249 while (sq->cev_counter != 0) { 1250 while (!mlx5e_sq_has_room_for(sq, 1)) { 1251 if (can_sleep != 0) { 1252 mtx_unlock(&sq->lock); 1253 msleep(4); 1254 mtx_lock(&sq->lock); 1255 } else { 1256 goto done; 1257 } 1258 } 1259 /* send a single NOP */ 1260 mlx5e_send_nop(sq, 1); 1261 atomic_thread_fence_rel(); 1262 } 1263done: 1264 /* Check if we need to write the doorbell */ 1265 if (likely(sq->doorbell.d64 != 0)) { 1266 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 1267 sq->doorbell.d64 = 0; 1268 } 1269} 1270 1271void 1272mlx5e_sq_cev_timeout(void *arg) 1273{ 1274 struct mlx5e_sq *sq = arg; 1275 1276 mtx_assert(&sq->lock, MA_OWNED); 1277 1278 /* check next state */ 1279 switch (sq->cev_next_state) { 1280 case MLX5E_CEV_STATE_SEND_NOPS: 1281 /* fill TX ring with NOPs, if any */ 1282 mlx5e_sq_send_nops_locked(sq, 0); 1283 1284 /* check if completed */ 1285 if (sq->cev_counter == 0) { 1286 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 1287 return; 1288 } 1289 break; 1290 default: 1291 /* send NOPs on next timeout */ 1292 sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS; 1293 break; 1294 } 1295 1296 /* restart timer */ 1297 callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq); 1298} 1299 1300void 1301mlx5e_drain_sq(struct mlx5e_sq *sq) 1302{ 1303 int error; 1304 struct mlx5_core_dev *mdev= sq->priv->mdev; 1305 1306 /* 1307 * Check if already stopped. 1308 * 1309 * NOTE: The "stopped" variable is only written when both the 1310 * priv's configuration lock and the SQ's lock is locked. It 1311 * can therefore safely be read when only one of the two locks 1312 * is locked. This function is always called when the priv's 1313 * configuration lock is locked. 1314 */ 1315 if (sq->stopped != 0) 1316 return; 1317 1318 mtx_lock(&sq->lock); 1319 1320 /* don't put more packets into the SQ */ 1321 sq->stopped = 1; 1322 1323 /* teardown event factor timer, if any */ 1324 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 1325 callout_stop(&sq->cev_callout); 1326 1327 /* send dummy NOPs in order to flush the transmit ring */ 1328 mlx5e_sq_send_nops_locked(sq, 1); 1329 mtx_unlock(&sq->lock); 1330 1331 /* make sure it is safe to free the callout */ 1332 callout_drain(&sq->cev_callout); 1333 1334 /* wait till SQ is empty or link is down */ 1335 mtx_lock(&sq->lock); 1336 while (sq->cc != sq->pc && 1337 (sq->priv->media_status_last & IFM_ACTIVE) != 0 && 1338 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1339 mtx_unlock(&sq->lock); 1340 msleep(1); 1341 sq->cq.mcq.comp(&sq->cq.mcq); 1342 mtx_lock(&sq->lock); 1343 } 1344 mtx_unlock(&sq->lock); 1345 1346 /* error out remaining requests */ 1347 error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 1348 if (error != 0) { 1349 if_printf(sq->ifp, 1350 "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); 1351 } 1352 1353 /* wait till SQ is empty */ 1354 mtx_lock(&sq->lock); 1355 while (sq->cc != sq->pc && 1356 mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1357 mtx_unlock(&sq->lock); 1358 msleep(1); 1359 sq->cq.mcq.comp(&sq->cq.mcq); 1360 mtx_lock(&sq->lock); 1361 } 1362 mtx_unlock(&sq->lock); 1363} 1364 1365static void 1366mlx5e_close_sq_wait(struct mlx5e_sq *sq) 1367{ 1368 1369 mlx5e_drain_sq(sq); 1370 mlx5e_disable_sq(sq); 1371 mlx5e_destroy_sq(sq); 1372} 1373 1374static int 1375mlx5e_create_cq(struct mlx5e_priv *priv, 1376 struct mlx5e_cq_param *param, 1377 struct mlx5e_cq *cq, 1378 mlx5e_cq_comp_t *comp, 1379 int eq_ix) 1380{ 1381 struct mlx5_core_dev *mdev = priv->mdev; 1382 struct mlx5_core_cq *mcq = &cq->mcq; 1383 int eqn_not_used; 1384 int irqn; 1385 int err; 1386 u32 i; 1387 1388 param->wq.buf_numa_node = 0; 1389 param->wq.db_numa_node = 0; 1390 1391 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, 1392 &cq->wq_ctrl); 1393 if (err) 1394 return (err); 1395 1396 mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn); 1397 1398 mcq->cqe_sz = 64; 1399 mcq->set_ci_db = cq->wq_ctrl.db.db; 1400 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1401 *mcq->set_ci_db = 0; 1402 *mcq->arm_db = 0; 1403 mcq->vector = eq_ix; 1404 mcq->comp = comp; 1405 mcq->event = mlx5e_cq_error_event; 1406 mcq->irqn = irqn; 1407 mcq->uar = &priv->cq_uar; 1408 1409 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 1410 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); 1411 1412 cqe->op_own = 0xf1; 1413 } 1414 1415 cq->priv = priv; 1416 1417 return (0); 1418} 1419 1420static void 1421mlx5e_destroy_cq(struct mlx5e_cq *cq) 1422{ 1423 mlx5_wq_destroy(&cq->wq_ctrl); 1424} 1425 1426static int 1427mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix) 1428{ 1429 struct mlx5_core_cq *mcq = &cq->mcq; 1430 void *in; 1431 void *cqc; 1432 int inlen; 1433 int irqn_not_used; 1434 int eqn; 1435 int err; 1436 1437 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1438 sizeof(u64) * cq->wq_ctrl.buf.npages; 1439 in = mlx5_vzalloc(inlen); 1440 if (in == NULL) 1441 return (-ENOMEM); 1442 1443 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1444 1445 memcpy(cqc, param->cqc, sizeof(param->cqc)); 1446 1447 mlx5_fill_page_array(&cq->wq_ctrl.buf, 1448 (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas)); 1449 1450 mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); 1451 1452 MLX5_SET(cqc, cqc, c_eqn, eqn); 1453 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); 1454 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - 1455 PAGE_SHIFT); 1456 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); 1457 1458 err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen); 1459 1460 kvfree(in); 1461 1462 if (err) 1463 return (err); 1464 1465 mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); 1466 1467 return (0); 1468} 1469 1470static void 1471mlx5e_disable_cq(struct mlx5e_cq *cq) 1472{ 1473 1474 mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); 1475} 1476 1477int 1478mlx5e_open_cq(struct mlx5e_priv *priv, 1479 struct mlx5e_cq_param *param, 1480 struct mlx5e_cq *cq, 1481 mlx5e_cq_comp_t *comp, 1482 int eq_ix) 1483{ 1484 int err; 1485 1486 err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); 1487 if (err) 1488 return (err); 1489 1490 err = mlx5e_enable_cq(cq, param, eq_ix); 1491 if (err) 1492 goto err_destroy_cq; 1493 1494 return (0); 1495 1496err_destroy_cq: 1497 mlx5e_destroy_cq(cq); 1498 1499 return (err); 1500} 1501 1502void 1503mlx5e_close_cq(struct mlx5e_cq *cq) 1504{ 1505 mlx5e_disable_cq(cq); 1506 mlx5e_destroy_cq(cq); 1507} 1508 1509static int 1510mlx5e_open_tx_cqs(struct mlx5e_channel *c, 1511 struct mlx5e_channel_param *cparam) 1512{ 1513 int err; 1514 int tc; 1515 1516 for (tc = 0; tc < c->num_tc; tc++) { 1517 /* open completion queue */ 1518 err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, 1519 &mlx5e_tx_cq_comp, c->ix); 1520 if (err) 1521 goto err_close_tx_cqs; 1522 } 1523 return (0); 1524 1525err_close_tx_cqs: 1526 for (tc--; tc >= 0; tc--) 1527 mlx5e_close_cq(&c->sq[tc].cq); 1528 1529 return (err); 1530} 1531 1532static void 1533mlx5e_close_tx_cqs(struct mlx5e_channel *c) 1534{ 1535 int tc; 1536 1537 for (tc = 0; tc < c->num_tc; tc++) 1538 mlx5e_close_cq(&c->sq[tc].cq); 1539} 1540 1541static int 1542mlx5e_open_sqs(struct mlx5e_channel *c, 1543 struct mlx5e_channel_param *cparam) 1544{ 1545 int err; 1546 int tc; 1547 1548 for (tc = 0; tc < c->num_tc; tc++) { 1549 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); 1550 if (err) 1551 goto err_close_sqs; 1552 } 1553 1554 return (0); 1555 1556err_close_sqs: 1557 for (tc--; tc >= 0; tc--) 1558 mlx5e_close_sq_wait(&c->sq[tc]); 1559 1560 return (err); 1561} 1562 1563static void 1564mlx5e_close_sqs_wait(struct mlx5e_channel *c) 1565{ 1566 int tc; 1567 1568 for (tc = 0; tc < c->num_tc; tc++) 1569 mlx5e_close_sq_wait(&c->sq[tc]); 1570} 1571 1572static void 1573mlx5e_chan_mtx_init(struct mlx5e_channel *c) 1574{ 1575 int tc; 1576 1577 mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF); 1578 1579 callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0); 1580 1581 for (tc = 0; tc < c->num_tc; tc++) { 1582 struct mlx5e_sq *sq = c->sq + tc; 1583 1584 mtx_init(&sq->lock, "mlx5tx", 1585 MTX_NETWORK_LOCK " TX", MTX_DEF); 1586 mtx_init(&sq->comp_lock, "mlx5comp", 1587 MTX_NETWORK_LOCK " TX", MTX_DEF); 1588 1589 callout_init_mtx(&sq->cev_callout, &sq->lock, 0); 1590 1591 sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; 1592 1593 /* ensure the TX completion event factor is not zero */ 1594 if (sq->cev_factor == 0) 1595 sq->cev_factor = 1; 1596 } 1597} 1598 1599static void 1600mlx5e_chan_mtx_destroy(struct mlx5e_channel *c) 1601{ 1602 int tc; 1603 1604 mtx_destroy(&c->rq.mtx); 1605 1606 for (tc = 0; tc < c->num_tc; tc++) { 1607 mtx_destroy(&c->sq[tc].lock); 1608 mtx_destroy(&c->sq[tc].comp_lock); 1609 } 1610} 1611 1612static int 1613mlx5e_open_channel(struct mlx5e_priv *priv, int ix, 1614 struct mlx5e_channel_param *cparam, 1615 struct mlx5e_channel *volatile *cp) 1616{ 1617 struct mlx5e_channel *c; 1618 int err; 1619 1620 c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO); 1621 c->priv = priv; 1622 c->ix = ix; 1623 c->cpu = 0; 1624 c->ifp = priv->ifp; 1625 c->mkey_be = cpu_to_be32(priv->mr.key); 1626 c->num_tc = priv->num_tc; 1627 1628 /* init mutexes */ 1629 mlx5e_chan_mtx_init(c); 1630 1631 /* open transmit completion queue */ 1632 err = mlx5e_open_tx_cqs(c, cparam); 1633 if (err) 1634 goto err_free; 1635 1636 /* open receive completion queue */ 1637 err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, 1638 &mlx5e_rx_cq_comp, c->ix); 1639 if (err) 1640 goto err_close_tx_cqs; 1641 1642 err = mlx5e_open_sqs(c, cparam); 1643 if (err) 1644 goto err_close_rx_cq; 1645 1646 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 1647 if (err) 1648 goto err_close_sqs; 1649 1650 /* store channel pointer */ 1651 *cp = c; 1652 1653 /* poll receive queue initially */ 1654 c->rq.cq.mcq.comp(&c->rq.cq.mcq); 1655 1656 return (0); 1657 1658err_close_sqs: 1659 mlx5e_close_sqs_wait(c); 1660 1661err_close_rx_cq: 1662 mlx5e_close_cq(&c->rq.cq); 1663 1664err_close_tx_cqs: 1665 mlx5e_close_tx_cqs(c); 1666 1667err_free: 1668 /* destroy mutexes */ 1669 mlx5e_chan_mtx_destroy(c); 1670 free(c, M_MLX5EN); 1671 return (err); 1672} 1673 1674static void 1675mlx5e_close_channel(struct mlx5e_channel *volatile *pp) 1676{ 1677 struct mlx5e_channel *c = *pp; 1678 1679 /* check if channel is already closed */ 1680 if (c == NULL) 1681 return; 1682 mlx5e_close_rq(&c->rq); 1683} 1684 1685static void 1686mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp) 1687{ 1688 struct mlx5e_channel *c = *pp; 1689 1690 /* check if channel is already closed */ 1691 if (c == NULL) 1692 return; 1693 /* ensure channel pointer is no longer used */ 1694 *pp = NULL; 1695 1696 mlx5e_close_rq_wait(&c->rq); 1697 mlx5e_close_sqs_wait(c); 1698 mlx5e_close_cq(&c->rq.cq); 1699 mlx5e_close_tx_cqs(c); 1700 /* destroy mutexes */ 1701 mlx5e_chan_mtx_destroy(c); 1702 free(c, M_MLX5EN); 1703} 1704 1705static void 1706mlx5e_build_rq_param(struct mlx5e_priv *priv, 1707 struct mlx5e_rq_param *param) 1708{ 1709 void *rqc = param->rqc; 1710 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 1711 1712 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); 1713 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 1714 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); 1715 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); 1716 MLX5_SET(wq, wq, pd, priv->pdn); 1717 1718 param->wq.buf_numa_node = 0; 1719 param->wq.db_numa_node = 0; 1720 param->wq.linear = 1; 1721} 1722 1723static void 1724mlx5e_build_sq_param(struct mlx5e_priv *priv, 1725 struct mlx5e_sq_param *param) 1726{ 1727 void *sqc = param->sqc; 1728 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1729 1730 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); 1731 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 1732 MLX5_SET(wq, wq, pd, priv->pdn); 1733 1734 param->wq.buf_numa_node = 0; 1735 param->wq.db_numa_node = 0; 1736 param->wq.linear = 1; 1737} 1738 1739static void 1740mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 1741 struct mlx5e_cq_param *param) 1742{ 1743 void *cqc = param->cqc; 1744 1745 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); 1746} 1747 1748static void 1749mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, 1750 struct mlx5e_cq_param *param) 1751{ 1752 void *cqc = param->cqc; 1753 1754 1755 /* 1756 * TODO The sysctl to control on/off is a bool value for now, which means 1757 * we only support CSUM, once HASH is implemnted we'll need to address that. 1758 */ 1759 if (priv->params.cqe_zipping_en) { 1760 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); 1761 MLX5_SET(cqc, cqc, cqe_compression_en, 1); 1762 } 1763 1764 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); 1765 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); 1766 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); 1767 1768 switch (priv->params.rx_cq_moderation_mode) { 1769 case 0: 1770 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 1771 break; 1772 default: 1773 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 1774 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 1775 else 1776 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 1777 break; 1778 } 1779 1780 mlx5e_build_common_cq_param(priv, param); 1781} 1782 1783static void 1784mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 1785 struct mlx5e_cq_param *param) 1786{ 1787 void *cqc = param->cqc; 1788 1789 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); 1790 MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); 1791 MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); 1792 1793 switch (priv->params.tx_cq_moderation_mode) { 1794 case 0: 1795 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 1796 break; 1797 default: 1798 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 1799 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 1800 else 1801 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 1802 break; 1803 } 1804 1805 mlx5e_build_common_cq_param(priv, param); 1806} 1807 1808static void 1809mlx5e_build_channel_param(struct mlx5e_priv *priv, 1810 struct mlx5e_channel_param *cparam) 1811{ 1812 memset(cparam, 0, sizeof(*cparam)); 1813 1814 mlx5e_build_rq_param(priv, &cparam->rq); 1815 mlx5e_build_sq_param(priv, &cparam->sq); 1816 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); 1817 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); 1818} 1819 1820static int 1821mlx5e_open_channels(struct mlx5e_priv *priv) 1822{ 1823 struct mlx5e_channel_param cparam; 1824 void *ptr; 1825 int err; 1826 int i; 1827 int j; 1828 1829 priv->channel = malloc(priv->params.num_channels * 1830 sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO); 1831 1832 mlx5e_build_channel_param(priv, &cparam); 1833 for (i = 0; i < priv->params.num_channels; i++) { 1834 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); 1835 if (err) 1836 goto err_close_channels; 1837 } 1838 1839 for (j = 0; j < priv->params.num_channels; j++) { 1840 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); 1841 if (err) 1842 goto err_close_channels; 1843 } 1844 1845 return (0); 1846 1847err_close_channels: 1848 for (i--; i >= 0; i--) { 1849 mlx5e_close_channel(&priv->channel[i]); 1850 mlx5e_close_channel_wait(&priv->channel[i]); 1851 } 1852 1853 /* remove "volatile" attribute from "channel" pointer */ 1854 ptr = __DECONST(void *, priv->channel); 1855 priv->channel = NULL; 1856 1857 free(ptr, M_MLX5EN); 1858 1859 return (err); 1860} 1861 1862static void 1863mlx5e_close_channels(struct mlx5e_priv *priv) 1864{ 1865 void *ptr; 1866 int i; 1867 1868 if (priv->channel == NULL) 1869 return; 1870 1871 for (i = 0; i < priv->params.num_channels; i++) 1872 mlx5e_close_channel(&priv->channel[i]); 1873 for (i = 0; i < priv->params.num_channels; i++) 1874 mlx5e_close_channel_wait(&priv->channel[i]); 1875 1876 /* remove "volatile" attribute from "channel" pointer */ 1877 ptr = __DECONST(void *, priv->channel); 1878 priv->channel = NULL; 1879 1880 free(ptr, M_MLX5EN); 1881} 1882 1883static int 1884mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) 1885{ 1886 1887 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 1888 uint8_t cq_mode; 1889 1890 switch (priv->params.tx_cq_moderation_mode) { 1891 case 0: 1892 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 1893 break; 1894 default: 1895 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 1896 break; 1897 } 1898 1899 return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, 1900 priv->params.tx_cq_moderation_usec, 1901 priv->params.tx_cq_moderation_pkts, 1902 cq_mode)); 1903 } 1904 1905 return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, 1906 priv->params.tx_cq_moderation_usec, 1907 priv->params.tx_cq_moderation_pkts)); 1908} 1909 1910static int 1911mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) 1912{ 1913 1914 if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { 1915 uint8_t cq_mode; 1916 int retval; 1917 1918 switch (priv->params.rx_cq_moderation_mode) { 1919 case 0: 1920 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 1921 break; 1922 default: 1923 cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 1924 break; 1925 } 1926 1927 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, 1928 priv->params.rx_cq_moderation_usec, 1929 priv->params.rx_cq_moderation_pkts, 1930 cq_mode); 1931 1932 return (retval); 1933 } 1934 1935 return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, 1936 priv->params.rx_cq_moderation_usec, 1937 priv->params.rx_cq_moderation_pkts)); 1938} 1939 1940static int 1941mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) 1942{ 1943 int err; 1944 int i; 1945 1946 if (c == NULL) 1947 return (EINVAL); 1948 1949 err = mlx5e_refresh_rq_params(priv, &c->rq); 1950 if (err) 1951 goto done; 1952 1953 for (i = 0; i != c->num_tc; i++) { 1954 err = mlx5e_refresh_sq_params(priv, &c->sq[i]); 1955 if (err) 1956 goto done; 1957 } 1958done: 1959 return (err); 1960} 1961 1962int 1963mlx5e_refresh_channel_params(struct mlx5e_priv *priv) 1964{ 1965 int i; 1966 1967 if (priv->channel == NULL) 1968 return (EINVAL); 1969 1970 for (i = 0; i < priv->params.num_channels; i++) { 1971 int err; 1972 1973 err = mlx5e_refresh_channel_params_sub(priv, priv->channel[i]); 1974 if (err) 1975 return (err); 1976 } 1977 return (0); 1978} 1979 1980static int 1981mlx5e_open_tis(struct mlx5e_priv *priv, int tc) 1982{ 1983 struct mlx5_core_dev *mdev = priv->mdev; 1984 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 1985 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 1986 1987 memset(in, 0, sizeof(in)); 1988 1989 MLX5_SET(tisc, tisc, prio, tc); 1990 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 1991 1992 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); 1993} 1994 1995static void 1996mlx5e_close_tis(struct mlx5e_priv *priv, int tc) 1997{ 1998 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); 1999} 2000 2001static int 2002mlx5e_open_tises(struct mlx5e_priv *priv) 2003{ 2004 int num_tc = priv->num_tc; 2005 int err; 2006 int tc; 2007 2008 for (tc = 0; tc < num_tc; tc++) { 2009 err = mlx5e_open_tis(priv, tc); 2010 if (err) 2011 goto err_close_tises; 2012 } 2013 2014 return (0); 2015 2016err_close_tises: 2017 for (tc--; tc >= 0; tc--) 2018 mlx5e_close_tis(priv, tc); 2019 2020 return (err); 2021} 2022 2023static void 2024mlx5e_close_tises(struct mlx5e_priv *priv) 2025{ 2026 int num_tc = priv->num_tc; 2027 int tc; 2028 2029 for (tc = 0; tc < num_tc; tc++) 2030 mlx5e_close_tis(priv, tc); 2031} 2032 2033static int 2034mlx5e_open_rqt(struct mlx5e_priv *priv) 2035{ 2036 struct mlx5_core_dev *mdev = priv->mdev; 2037 u32 *in; 2038 u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 2039 void *rqtc; 2040 int inlen; 2041 int err; 2042 int sz; 2043 int i; 2044 2045 sz = 1 << priv->params.rx_hash_log_tbl_sz; 2046 2047 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 2048 in = mlx5_vzalloc(inlen); 2049 if (in == NULL) 2050 return (-ENOMEM); 2051 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 2052 2053 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 2054 MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 2055 2056 for (i = 0; i < sz; i++) { 2057 int ix; 2058#ifdef RSS 2059 ix = rss_get_indirection_to_bucket(i); 2060#else 2061 ix = i; 2062#endif 2063 /* ensure we don't overflow */ 2064 ix %= priv->params.num_channels; 2065 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); 2066 } 2067 2068 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 2069 2070 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); 2071 if (!err) 2072 priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); 2073 2074 kvfree(in); 2075 2076 return (err); 2077} 2078 2079static void 2080mlx5e_close_rqt(struct mlx5e_priv *priv) 2081{ 2082 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; 2083 u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; 2084 2085 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); 2086 MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); 2087 2088 mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); 2089} 2090 2091static void 2092mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt) 2093{ 2094 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 2095 __be32 *hkey; 2096 2097 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 2098 2099#define ROUGH_MAX_L2_L3_HDR_SZ 256 2100 2101#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2102 MLX5_HASH_FIELD_SEL_DST_IP) 2103 2104#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2105 MLX5_HASH_FIELD_SEL_DST_IP |\ 2106 MLX5_HASH_FIELD_SEL_L4_SPORT |\ 2107 MLX5_HASH_FIELD_SEL_L4_DPORT) 2108 2109#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2110 MLX5_HASH_FIELD_SEL_DST_IP |\ 2111 MLX5_HASH_FIELD_SEL_IPSEC_SPI) 2112 2113 if (priv->params.hw_lro_en) { 2114 MLX5_SET(tirc, tirc, lro_enable_mask, 2115 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 2116 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); 2117 MLX5_SET(tirc, tirc, lro_max_msg_sz, 2118 (priv->params.lro_wqe_sz - 2119 ROUGH_MAX_L2_L3_HDR_SZ) >> 8); 2120 /* TODO: add the option to choose timer value dynamically */ 2121 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, 2122 MLX5_CAP_ETH(priv->mdev, 2123 lro_timer_supported_periods[2])); 2124 } 2125 2126 /* setup parameters for hashing TIR type, if any */ 2127 switch (tt) { 2128 case MLX5E_TT_ANY: 2129 MLX5_SET(tirc, tirc, disp_type, 2130 MLX5_TIRC_DISP_TYPE_DIRECT); 2131 MLX5_SET(tirc, tirc, inline_rqn, 2132 priv->channel[0]->rq.rqn); 2133 break; 2134 default: 2135 MLX5_SET(tirc, tirc, disp_type, 2136 MLX5_TIRC_DISP_TYPE_INDIRECT); 2137 MLX5_SET(tirc, tirc, indirect_table, 2138 priv->rqtn); 2139 MLX5_SET(tirc, tirc, rx_hash_fn, 2140 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); 2141 hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 2142#ifdef RSS 2143 /* 2144 * The FreeBSD RSS implementation does currently not 2145 * support symmetric Toeplitz hashes: 2146 */ 2147 MLX5_SET(tirc, tirc, rx_hash_symmetric, 0); 2148 rss_getkey((uint8_t *)hkey); 2149#else 2150 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2151 hkey[0] = cpu_to_be32(0xD181C62C); 2152 hkey[1] = cpu_to_be32(0xF7F4DB5B); 2153 hkey[2] = cpu_to_be32(0x1983A2FC); 2154 hkey[3] = cpu_to_be32(0x943E1ADB); 2155 hkey[4] = cpu_to_be32(0xD9389E6B); 2156 hkey[5] = cpu_to_be32(0xD1039C2C); 2157 hkey[6] = cpu_to_be32(0xA74499AD); 2158 hkey[7] = cpu_to_be32(0x593D56D9); 2159 hkey[8] = cpu_to_be32(0xF3253C06); 2160 hkey[9] = cpu_to_be32(0x2ADC1FFC); 2161#endif 2162 break; 2163 } 2164 2165 switch (tt) { 2166 case MLX5E_TT_IPV4_TCP: 2167 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2168 MLX5_L3_PROT_TYPE_IPV4); 2169 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2170 MLX5_L4_PROT_TYPE_TCP); 2171#ifdef RSS 2172 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) { 2173 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2174 MLX5_HASH_IP); 2175 } else 2176#endif 2177 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2178 MLX5_HASH_ALL); 2179 break; 2180 2181 case MLX5E_TT_IPV6_TCP: 2182 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2183 MLX5_L3_PROT_TYPE_IPV6); 2184 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2185 MLX5_L4_PROT_TYPE_TCP); 2186#ifdef RSS 2187 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) { 2188 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2189 MLX5_HASH_IP); 2190 } else 2191#endif 2192 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2193 MLX5_HASH_ALL); 2194 break; 2195 2196 case MLX5E_TT_IPV4_UDP: 2197 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2198 MLX5_L3_PROT_TYPE_IPV4); 2199 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2200 MLX5_L4_PROT_TYPE_UDP); 2201#ifdef RSS 2202 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) { 2203 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2204 MLX5_HASH_IP); 2205 } else 2206#endif 2207 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2208 MLX5_HASH_ALL); 2209 break; 2210 2211 case MLX5E_TT_IPV6_UDP: 2212 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2213 MLX5_L3_PROT_TYPE_IPV6); 2214 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2215 MLX5_L4_PROT_TYPE_UDP); 2216#ifdef RSS 2217 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) { 2218 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2219 MLX5_HASH_IP); 2220 } else 2221#endif 2222 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2223 MLX5_HASH_ALL); 2224 break; 2225 2226 case MLX5E_TT_IPV4_IPSEC_AH: 2227 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2228 MLX5_L3_PROT_TYPE_IPV4); 2229 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2230 MLX5_HASH_IP_IPSEC_SPI); 2231 break; 2232 2233 case MLX5E_TT_IPV6_IPSEC_AH: 2234 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2235 MLX5_L3_PROT_TYPE_IPV6); 2236 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2237 MLX5_HASH_IP_IPSEC_SPI); 2238 break; 2239 2240 case MLX5E_TT_IPV4_IPSEC_ESP: 2241 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2242 MLX5_L3_PROT_TYPE_IPV4); 2243 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2244 MLX5_HASH_IP_IPSEC_SPI); 2245 break; 2246 2247 case MLX5E_TT_IPV6_IPSEC_ESP: 2248 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2249 MLX5_L3_PROT_TYPE_IPV6); 2250 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2251 MLX5_HASH_IP_IPSEC_SPI); 2252 break; 2253 2254 case MLX5E_TT_IPV4: 2255 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2256 MLX5_L3_PROT_TYPE_IPV4); 2257 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2258 MLX5_HASH_IP); 2259 break; 2260 2261 case MLX5E_TT_IPV6: 2262 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2263 MLX5_L3_PROT_TYPE_IPV6); 2264 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2265 MLX5_HASH_IP); 2266 break; 2267 2268 default: 2269 break; 2270 } 2271} 2272 2273static int 2274mlx5e_open_tir(struct mlx5e_priv *priv, int tt) 2275{ 2276 struct mlx5_core_dev *mdev = priv->mdev; 2277 u32 *in; 2278 void *tirc; 2279 int inlen; 2280 int err; 2281 2282 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 2283 in = mlx5_vzalloc(inlen); 2284 if (in == NULL) 2285 return (-ENOMEM); 2286 tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context); 2287 2288 mlx5e_build_tir_ctx(priv, tirc, tt); 2289 2290 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); 2291 2292 kvfree(in); 2293 2294 return (err); 2295} 2296 2297static void 2298mlx5e_close_tir(struct mlx5e_priv *priv, int tt) 2299{ 2300 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); 2301} 2302 2303static int 2304mlx5e_open_tirs(struct mlx5e_priv *priv) 2305{ 2306 int err; 2307 int i; 2308 2309 for (i = 0; i < MLX5E_NUM_TT; i++) { 2310 err = mlx5e_open_tir(priv, i); 2311 if (err) 2312 goto err_close_tirs; 2313 } 2314 2315 return (0); 2316 2317err_close_tirs: 2318 for (i--; i >= 0; i--) 2319 mlx5e_close_tir(priv, i); 2320 2321 return (err); 2322} 2323 2324static void 2325mlx5e_close_tirs(struct mlx5e_priv *priv) 2326{ 2327 int i; 2328 2329 for (i = 0; i < MLX5E_NUM_TT; i++) 2330 mlx5e_close_tir(priv, i); 2331} 2332 2333/* 2334 * SW MTU does not include headers, 2335 * HW MTU includes all headers and checksums. 2336 */ 2337static int 2338mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu) 2339{ 2340 struct mlx5e_priv *priv = ifp->if_softc; 2341 struct mlx5_core_dev *mdev = priv->mdev; 2342 int hw_mtu; 2343 int err; 2344 2345 hw_mtu = MLX5E_SW2HW_MTU(sw_mtu); 2346 2347 err = mlx5_set_port_mtu(mdev, hw_mtu); 2348 if (err) { 2349 if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n", 2350 __func__, sw_mtu, err); 2351 return (err); 2352 } 2353 2354 /* Update vport context MTU */ 2355 err = mlx5_set_vport_mtu(mdev, hw_mtu); 2356 if (err) { 2357 if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n", 2358 __func__, err); 2359 } 2360 2361 ifp->if_mtu = sw_mtu; 2362 2363 err = mlx5_query_vport_mtu(mdev, &hw_mtu); 2364 if (err || !hw_mtu) { 2365 /* fallback to port oper mtu */ 2366 err = mlx5_query_port_oper_mtu(mdev, &hw_mtu); 2367 } 2368 if (err) { 2369 if_printf(ifp, "Query port MTU, after setting new " 2370 "MTU value, failed\n"); 2371 return (err); 2372 } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) { 2373 err = -E2BIG, 2374 if_printf(ifp, "Port MTU %d is smaller than " 2375 "ifp mtu %d\n", hw_mtu, sw_mtu); 2376 } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) { 2377 err = -EINVAL; 2378 if_printf(ifp, "Port MTU %d is bigger than " 2379 "ifp mtu %d\n", hw_mtu, sw_mtu); 2380 } 2381 priv->params_ethtool.hw_mtu = hw_mtu; 2382 2383 return (err); 2384} 2385 2386int 2387mlx5e_open_locked(struct ifnet *ifp) 2388{ 2389 struct mlx5e_priv *priv = ifp->if_softc; 2390 int err; 2391 u16 set_id; 2392 2393 /* check if already opened */ 2394 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 2395 return (0); 2396 2397#ifdef RSS 2398 if (rss_getnumbuckets() > priv->params.num_channels) { 2399 if_printf(ifp, "NOTE: There are more RSS buckets(%u) than " 2400 "channels(%u) available\n", rss_getnumbuckets(), 2401 priv->params.num_channels); 2402 } 2403#endif 2404 err = mlx5e_open_tises(priv); 2405 if (err) { 2406 if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n", 2407 __func__, err); 2408 return (err); 2409 } 2410 err = mlx5_vport_alloc_q_counter(priv->mdev, 2411 MLX5_INTERFACE_PROTOCOL_ETH, &set_id); 2412 if (err) { 2413 if_printf(priv->ifp, 2414 "%s: mlx5_vport_alloc_q_counter failed: %d\n", 2415 __func__, err); 2416 goto err_close_tises; 2417 } 2418 /* store counter set ID */ 2419 priv->counter_set_id = set_id; 2420 2421 err = mlx5e_open_channels(priv); 2422 if (err) { 2423 if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n", 2424 __func__, err); 2425 goto err_dalloc_q_counter; 2426 } 2427 err = mlx5e_open_rqt(priv); 2428 if (err) { 2429 if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n", 2430 __func__, err); 2431 goto err_close_channels; 2432 } 2433 err = mlx5e_open_tirs(priv); 2434 if (err) { 2435 if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n", 2436 __func__, err); 2437 goto err_close_rqls; 2438 } 2439 err = mlx5e_open_flow_table(priv); 2440 if (err) { 2441 if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n", 2442 __func__, err); 2443 goto err_close_tirs; 2444 } 2445 err = mlx5e_add_all_vlan_rules(priv); 2446 if (err) { 2447 if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n", 2448 __func__, err); 2449 goto err_close_flow_table; 2450 } 2451 set_bit(MLX5E_STATE_OPENED, &priv->state); 2452 2453 mlx5e_update_carrier(priv); 2454 mlx5e_set_rx_mode_core(priv); 2455 2456 return (0); 2457 2458err_close_flow_table: 2459 mlx5e_close_flow_table(priv); 2460 2461err_close_tirs: 2462 mlx5e_close_tirs(priv); 2463 2464err_close_rqls: 2465 mlx5e_close_rqt(priv); 2466 2467err_close_channels: 2468 mlx5e_close_channels(priv); 2469 2470err_dalloc_q_counter: 2471 mlx5_vport_dealloc_q_counter(priv->mdev, 2472 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 2473 2474err_close_tises: 2475 mlx5e_close_tises(priv); 2476 2477 return (err); 2478} 2479 2480static void 2481mlx5e_open(void *arg) 2482{ 2483 struct mlx5e_priv *priv = arg; 2484 2485 PRIV_LOCK(priv); 2486 if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) 2487 if_printf(priv->ifp, 2488 "%s: Setting port status to up failed\n", 2489 __func__); 2490 2491 mlx5e_open_locked(priv->ifp); 2492 priv->ifp->if_drv_flags |= IFF_DRV_RUNNING; 2493 PRIV_UNLOCK(priv); 2494} 2495 2496int 2497mlx5e_close_locked(struct ifnet *ifp) 2498{ 2499 struct mlx5e_priv *priv = ifp->if_softc; 2500 2501 /* check if already closed */ 2502 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2503 return (0); 2504 2505 clear_bit(MLX5E_STATE_OPENED, &priv->state); 2506 2507 mlx5e_set_rx_mode_core(priv); 2508 mlx5e_del_all_vlan_rules(priv); 2509 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 2510 mlx5e_close_flow_table(priv); 2511 mlx5e_close_tirs(priv); 2512 mlx5e_close_rqt(priv); 2513 mlx5e_close_channels(priv); 2514 mlx5_vport_dealloc_q_counter(priv->mdev, 2515 MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); 2516 mlx5e_close_tises(priv); 2517 2518 return (0); 2519} 2520 2521#if (__FreeBSD_version >= 1100000) 2522static uint64_t 2523mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt) 2524{ 2525 struct mlx5e_priv *priv = ifp->if_softc; 2526 u64 retval; 2527 2528 /* PRIV_LOCK(priv); XXX not allowed */ 2529 switch (cnt) { 2530 case IFCOUNTER_IPACKETS: 2531 retval = priv->stats.vport.rx_packets; 2532 break; 2533 case IFCOUNTER_IERRORS: 2534 retval = priv->stats.vport.rx_error_packets + 2535 priv->stats.pport.alignment_err + 2536 priv->stats.pport.check_seq_err + 2537 priv->stats.pport.crc_align_errors + 2538 priv->stats.pport.in_range_len_errors + 2539 priv->stats.pport.jabbers + 2540 priv->stats.pport.out_of_range_len + 2541 priv->stats.pport.oversize_pkts + 2542 priv->stats.pport.symbol_err + 2543 priv->stats.pport.too_long_errors + 2544 priv->stats.pport.undersize_pkts + 2545 priv->stats.pport.unsupported_op_rx; 2546 break; 2547 case IFCOUNTER_IQDROPS: 2548 retval = priv->stats.vport.rx_out_of_buffer + 2549 priv->stats.pport.drop_events; 2550 break; 2551 case IFCOUNTER_OPACKETS: 2552 retval = priv->stats.vport.tx_packets; 2553 break; 2554 case IFCOUNTER_OERRORS: 2555 retval = priv->stats.vport.tx_error_packets; 2556 break; 2557 case IFCOUNTER_IBYTES: 2558 retval = priv->stats.vport.rx_bytes; 2559 break; 2560 case IFCOUNTER_OBYTES: 2561 retval = priv->stats.vport.tx_bytes; 2562 break; 2563 case IFCOUNTER_IMCASTS: 2564 retval = priv->stats.vport.rx_multicast_packets; 2565 break; 2566 case IFCOUNTER_OMCASTS: 2567 retval = priv->stats.vport.tx_multicast_packets; 2568 break; 2569 case IFCOUNTER_OQDROPS: 2570 retval = priv->stats.vport.tx_queue_dropped; 2571 break; 2572 case IFCOUNTER_COLLISIONS: 2573 retval = priv->stats.pport.collisions; 2574 break; 2575 default: 2576 retval = if_get_counter_default(ifp, cnt); 2577 break; 2578 } 2579 /* PRIV_UNLOCK(priv); XXX not allowed */ 2580 return (retval); 2581} 2582#endif 2583 2584static void 2585mlx5e_set_rx_mode(struct ifnet *ifp) 2586{ 2587 struct mlx5e_priv *priv = ifp->if_softc; 2588 2589 queue_work(priv->wq, &priv->set_rx_mode_work); 2590} 2591 2592static int 2593mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2594{ 2595 struct mlx5e_priv *priv; 2596 struct ifreq *ifr; 2597 struct ifi2creq i2c; 2598 int error = 0; 2599 int mask = 0; 2600 int size_read = 0; 2601 int module_status; 2602 int module_num; 2603 int max_mtu; 2604 uint8_t read_addr; 2605 2606 priv = ifp->if_softc; 2607 2608 /* check if detaching */ 2609 if (priv == NULL || priv->gone != 0) 2610 return (ENXIO); 2611 2612 switch (command) { 2613 case SIOCSIFMTU: 2614 ifr = (struct ifreq *)data; 2615 2616 PRIV_LOCK(priv); 2617 mlx5_query_port_max_mtu(priv->mdev, &max_mtu); 2618 2619 if (ifr->ifr_mtu >= MLX5E_MTU_MIN && 2620 ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) { 2621 int was_opened; 2622 2623 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 2624 if (was_opened) 2625 mlx5e_close_locked(ifp); 2626 2627 /* set new MTU */ 2628 mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu); 2629 2630 if (was_opened) 2631 mlx5e_open_locked(ifp); 2632 } else { 2633 error = EINVAL; 2634 if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n", 2635 MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu)); 2636 } 2637 PRIV_UNLOCK(priv); 2638 break; 2639 case SIOCSIFFLAGS: 2640 if ((ifp->if_flags & IFF_UP) && 2641 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2642 mlx5e_set_rx_mode(ifp); 2643 break; 2644 } 2645 PRIV_LOCK(priv); 2646 if (ifp->if_flags & IFF_UP) { 2647 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2648 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) 2649 mlx5e_open_locked(ifp); 2650 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2651 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); 2652 } 2653 } else { 2654 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2655 mlx5_set_port_status(priv->mdev, 2656 MLX5_PORT_DOWN); 2657 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) 2658 mlx5e_close_locked(ifp); 2659 mlx5e_update_carrier(priv); 2660 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2661 } 2662 } 2663 PRIV_UNLOCK(priv); 2664 break; 2665 case SIOCADDMULTI: 2666 case SIOCDELMULTI: 2667 mlx5e_set_rx_mode(ifp); 2668 break; 2669 case SIOCSIFMEDIA: 2670 case SIOCGIFMEDIA: 2671 case SIOCGIFXMEDIA: 2672 ifr = (struct ifreq *)data; 2673 error = ifmedia_ioctl(ifp, ifr, &priv->media, command); 2674 break; 2675 case SIOCSIFCAP: 2676 ifr = (struct ifreq *)data; 2677 PRIV_LOCK(priv); 2678 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2679 2680 if (mask & IFCAP_TXCSUM) { 2681 ifp->if_capenable ^= IFCAP_TXCSUM; 2682 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2683 2684 if (IFCAP_TSO4 & ifp->if_capenable && 2685 !(IFCAP_TXCSUM & ifp->if_capenable)) { 2686 ifp->if_capenable &= ~IFCAP_TSO4; 2687 ifp->if_hwassist &= ~CSUM_IP_TSO; 2688 if_printf(ifp, 2689 "tso4 disabled due to -txcsum.\n"); 2690 } 2691 } 2692 if (mask & IFCAP_TXCSUM_IPV6) { 2693 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 2694 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2695 2696 if (IFCAP_TSO6 & ifp->if_capenable && 2697 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 2698 ifp->if_capenable &= ~IFCAP_TSO6; 2699 ifp->if_hwassist &= ~CSUM_IP6_TSO; 2700 if_printf(ifp, 2701 "tso6 disabled due to -txcsum6.\n"); 2702 } 2703 } 2704 if (mask & IFCAP_RXCSUM) 2705 ifp->if_capenable ^= IFCAP_RXCSUM; 2706 if (mask & IFCAP_RXCSUM_IPV6) 2707 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 2708 if (mask & IFCAP_TSO4) { 2709 if (!(IFCAP_TSO4 & ifp->if_capenable) && 2710 !(IFCAP_TXCSUM & ifp->if_capenable)) { 2711 if_printf(ifp, "enable txcsum first.\n"); 2712 error = EAGAIN; 2713 goto out; 2714 } 2715 ifp->if_capenable ^= IFCAP_TSO4; 2716 ifp->if_hwassist ^= CSUM_IP_TSO; 2717 } 2718 if (mask & IFCAP_TSO6) { 2719 if (!(IFCAP_TSO6 & ifp->if_capenable) && 2720 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 2721 if_printf(ifp, "enable txcsum6 first.\n"); 2722 error = EAGAIN; 2723 goto out; 2724 } 2725 ifp->if_capenable ^= IFCAP_TSO6; 2726 ifp->if_hwassist ^= CSUM_IP6_TSO; 2727 } 2728 if (mask & IFCAP_VLAN_HWFILTER) { 2729 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2730 mlx5e_disable_vlan_filter(priv); 2731 else 2732 mlx5e_enable_vlan_filter(priv); 2733 2734 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 2735 } 2736 if (mask & IFCAP_VLAN_HWTAGGING) 2737 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2738 if (mask & IFCAP_WOL_MAGIC) 2739 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2740 2741 VLAN_CAPABILITIES(ifp); 2742 /* turn off LRO means also turn of HW LRO - if it's on */ 2743 if (mask & IFCAP_LRO) { 2744 int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 2745 bool need_restart = false; 2746 2747 ifp->if_capenable ^= IFCAP_LRO; 2748 if (!(ifp->if_capenable & IFCAP_LRO)) { 2749 if (priv->params.hw_lro_en) { 2750 priv->params.hw_lro_en = false; 2751 need_restart = true; 2752 /* Not sure this is the correct way */ 2753 priv->params_ethtool.hw_lro = priv->params.hw_lro_en; 2754 } 2755 } 2756 if (was_opened && need_restart) { 2757 mlx5e_close_locked(ifp); 2758 mlx5e_open_locked(ifp); 2759 } 2760 } 2761out: 2762 PRIV_UNLOCK(priv); 2763 break; 2764 2765 case SIOCGI2C: 2766 ifr = (struct ifreq *)data; 2767 2768 /* 2769 * Copy from the user-space address ifr_data to the 2770 * kernel-space address i2c 2771 */ 2772 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 2773 if (error) 2774 break; 2775 2776 if (i2c.len > sizeof(i2c.data)) { 2777 error = EINVAL; 2778 break; 2779 } 2780 2781 PRIV_LOCK(priv); 2782 /* Get module_num which is required for the query_eeprom */ 2783 error = mlx5_query_module_num(priv->mdev, &module_num); 2784 if (error) { 2785 if_printf(ifp, "Query module num failed, eeprom " 2786 "reading is not supported\n"); 2787 error = EINVAL; 2788 goto err_i2c; 2789 } 2790 /* Check if module is present before doing an access */ 2791 module_status = mlx5_query_module_status(priv->mdev, module_num); 2792 if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED && 2793 module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) { 2794 error = EINVAL; 2795 goto err_i2c; 2796 } 2797 /* 2798 * Currently 0XA0 and 0xA2 are the only addresses permitted. 2799 * The internal conversion is as follows: 2800 */ 2801 if (i2c.dev_addr == 0xA0) 2802 read_addr = MLX5E_I2C_ADDR_LOW; 2803 else if (i2c.dev_addr == 0xA2) 2804 read_addr = MLX5E_I2C_ADDR_HIGH; 2805 else { 2806 if_printf(ifp, "Query eeprom failed, " 2807 "Invalid Address: %X\n", i2c.dev_addr); 2808 error = EINVAL; 2809 goto err_i2c; 2810 } 2811 error = mlx5_query_eeprom(priv->mdev, 2812 read_addr, MLX5E_EEPROM_LOW_PAGE, 2813 (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num, 2814 (uint32_t *)i2c.data, &size_read); 2815 if (error) { 2816 if_printf(ifp, "Query eeprom failed, eeprom " 2817 "reading is not supported\n"); 2818 error = EINVAL; 2819 goto err_i2c; 2820 } 2821 2822 if (i2c.len > MLX5_EEPROM_MAX_BYTES) { 2823 error = mlx5_query_eeprom(priv->mdev, 2824 read_addr, MLX5E_EEPROM_LOW_PAGE, 2825 (uint32_t)(i2c.offset + size_read), 2826 (uint32_t)(i2c.len - size_read), module_num, 2827 (uint32_t *)(i2c.data + size_read), &size_read); 2828 } 2829 if (error) { 2830 if_printf(ifp, "Query eeprom failed, eeprom " 2831 "reading is not supported\n"); 2832 error = EINVAL; 2833 goto err_i2c; 2834 } 2835 2836 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 2837err_i2c: 2838 PRIV_UNLOCK(priv); 2839 break; 2840 2841 default: 2842 error = ether_ioctl(ifp, command, data); 2843 break; 2844 } 2845 return (error); 2846} 2847 2848static int 2849mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2850{ 2851 /* 2852 * TODO: uncoment once FW really sets all these bits if 2853 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap || 2854 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap || 2855 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return 2856 * -ENOTSUPP; 2857 */ 2858 2859 /* TODO: add more must-to-have features */ 2860 2861 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 2862 return (-ENODEV); 2863 2864 return (0); 2865} 2866 2867static void 2868mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev, 2869 struct mlx5e_priv *priv, 2870 int num_comp_vectors) 2871{ 2872 /* 2873 * TODO: Consider link speed for setting "log_sq_size", 2874 * "log_rq_size" and "cq_moderation_xxx": 2875 */ 2876 priv->params.log_sq_size = 2877 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 2878 priv->params.log_rq_size = 2879 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 2880 priv->params.rx_cq_moderation_usec = 2881 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 2882 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : 2883 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 2884 priv->params.rx_cq_moderation_mode = 2885 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0; 2886 priv->params.rx_cq_moderation_pkts = 2887 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 2888 priv->params.tx_cq_moderation_usec = 2889 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 2890 priv->params.tx_cq_moderation_pkts = 2891 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 2892 priv->params.min_rx_wqes = 2893 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; 2894 priv->params.rx_hash_log_tbl_sz = 2895 (order_base_2(num_comp_vectors) > 2896 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? 2897 order_base_2(num_comp_vectors) : 2898 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; 2899 priv->params.num_tc = 1; 2900 priv->params.default_vlan_prio = 0; 2901 priv->counter_set_id = -1; 2902 2903 /* 2904 * hw lro is currently defaulted to off. when it won't anymore we 2905 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)" 2906 */ 2907 priv->params.hw_lro_en = false; 2908 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 2909 2910 priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression); 2911 2912 priv->mdev = mdev; 2913 priv->params.num_channels = num_comp_vectors; 2914 priv->order_base_2_num_channels = order_base_2(num_comp_vectors); 2915 priv->queue_mapping_channel_mask = 2916 roundup_pow_of_two(num_comp_vectors) - 1; 2917 priv->num_tc = priv->params.num_tc; 2918 priv->default_vlan_prio = priv->params.default_vlan_prio; 2919 2920 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 2921 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 2922 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 2923} 2924 2925static int 2926mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, 2927 struct mlx5_core_mr *mkey) 2928{ 2929 struct ifnet *ifp = priv->ifp; 2930 struct mlx5_core_dev *mdev = priv->mdev; 2931 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 2932 void *mkc; 2933 u32 *in; 2934 int err; 2935 2936 in = mlx5_vzalloc(inlen); 2937 if (in == NULL) { 2938 if_printf(ifp, "%s: failed to allocate inbox\n", __func__); 2939 return (-ENOMEM); 2940 } 2941 2942 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2943 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA); 2944 MLX5_SET(mkc, mkc, lw, 1); 2945 MLX5_SET(mkc, mkc, lr, 1); 2946 2947 MLX5_SET(mkc, mkc, pd, pdn); 2948 MLX5_SET(mkc, mkc, length64, 1); 2949 MLX5_SET(mkc, mkc, qpn, 0xffffff); 2950 2951 err = mlx5_core_create_mkey(mdev, mkey, in, inlen); 2952 if (err) 2953 if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n", 2954 __func__, err); 2955 2956 kvfree(in); 2957 return (err); 2958} 2959 2960static const char *mlx5e_vport_stats_desc[] = { 2961 MLX5E_VPORT_STATS(MLX5E_STATS_DESC) 2962}; 2963 2964static const char *mlx5e_pport_stats_desc[] = { 2965 MLX5E_PPORT_STATS(MLX5E_STATS_DESC) 2966}; 2967 2968static void 2969mlx5e_priv_mtx_init(struct mlx5e_priv *priv) 2970{ 2971 mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); 2972 sx_init(&priv->state_lock, "mlx5state"); 2973 callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); 2974 MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); 2975} 2976 2977static void 2978mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv) 2979{ 2980 mtx_destroy(&priv->async_events_mtx); 2981 sx_destroy(&priv->state_lock); 2982} 2983 2984static int 2985sysctl_firmware(SYSCTL_HANDLER_ARGS) 2986{ 2987 /* 2988 * %d.%d%.d the string format. 2989 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536. 2990 * We need at most 5 chars to store that. 2991 * It also has: two "." and NULL at the end, which means we need 18 2992 * (5*3 + 3) chars at most. 2993 */ 2994 char fw[18]; 2995 struct mlx5e_priv *priv = arg1; 2996 int error; 2997 2998 snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), 2999 fw_rev_sub(priv->mdev)); 3000 error = sysctl_handle_string(oidp, fw, sizeof(fw), req); 3001 return (error); 3002} 3003 3004static void 3005mlx5e_disable_tx_dma(struct mlx5e_channel *ch) 3006{ 3007 int i; 3008 3009 for (i = 0; i < ch->num_tc; i++) 3010 mlx5e_drain_sq(&ch->sq[i]); 3011} 3012 3013static void 3014mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq) 3015{ 3016 3017 sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP); 3018 sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8); 3019 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 3020 sq->doorbell.d64 = 0; 3021} 3022 3023void 3024mlx5e_resume_sq(struct mlx5e_sq *sq) 3025{ 3026 int err; 3027 3028 /* check if already enabled */ 3029 if (sq->stopped == 0) 3030 return; 3031 3032 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR, 3033 MLX5_SQC_STATE_RST); 3034 if (err != 0) { 3035 if_printf(sq->ifp, 3036 "mlx5e_modify_sq() from ERR to RST failed: %d\n", err); 3037 } 3038 3039 sq->cc = 0; 3040 sq->pc = 0; 3041 3042 /* reset doorbell prior to moving from RST to RDY */ 3043 mlx5e_reset_sq_doorbell_record(sq); 3044 3045 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, 3046 MLX5_SQC_STATE_RDY); 3047 if (err != 0) { 3048 if_printf(sq->ifp, 3049 "mlx5e_modify_sq() from RST to RDY failed: %d\n", err); 3050 } 3051 3052 mtx_lock(&sq->lock); 3053 sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; 3054 sq->stopped = 0; 3055 mtx_unlock(&sq->lock); 3056 3057} 3058 3059static void 3060mlx5e_enable_tx_dma(struct mlx5e_channel *ch) 3061{ 3062 int i; 3063 3064 for (i = 0; i < ch->num_tc; i++) 3065 mlx5e_resume_sq(&ch->sq[i]); 3066} 3067 3068static void 3069mlx5e_disable_rx_dma(struct mlx5e_channel *ch) 3070{ 3071 struct mlx5e_rq *rq = &ch->rq; 3072 int err; 3073 3074 mtx_lock(&rq->mtx); 3075 rq->enabled = 0; 3076 callout_stop(&rq->watchdog); 3077 mtx_unlock(&rq->mtx); 3078 3079 callout_drain(&rq->watchdog); 3080 3081 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 3082 if (err != 0) { 3083 if_printf(rq->ifp, 3084 "mlx5e_modify_rq() from RDY to RST failed: %d\n", err); 3085 } 3086 3087 while (!mlx5_wq_ll_is_empty(&rq->wq)) { 3088 msleep(1); 3089 rq->cq.mcq.comp(&rq->cq.mcq); 3090 } 3091 3092 /* 3093 * Transitioning into RST state will allow the FW to track less ERR state queues, 3094 * thus reducing the recv queue flushing time 3095 */ 3096 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST); 3097 if (err != 0) { 3098 if_printf(rq->ifp, 3099 "mlx5e_modify_rq() from ERR to RST failed: %d\n", err); 3100 } 3101} 3102 3103static void 3104mlx5e_enable_rx_dma(struct mlx5e_channel *ch) 3105{ 3106 struct mlx5e_rq *rq = &ch->rq; 3107 int err; 3108 3109 rq->wq.wqe_ctr = 0; 3110 mlx5_wq_ll_update_db_record(&rq->wq); 3111 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 3112 if (err != 0) { 3113 if_printf(rq->ifp, 3114 "mlx5e_modify_rq() from RST to RDY failed: %d\n", err); 3115 } 3116 3117 rq->enabled = 1; 3118 3119 rq->cq.mcq.comp(&rq->cq.mcq); 3120} 3121 3122void 3123mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) 3124{ 3125 int i; 3126 3127 if (priv->channel == NULL) 3128 return; 3129 3130 for (i = 0; i < priv->params.num_channels; i++) { 3131 3132 if (!priv->channel[i]) 3133 continue; 3134 3135 if (value) 3136 mlx5e_disable_tx_dma(priv->channel[i]); 3137 else 3138 mlx5e_enable_tx_dma(priv->channel[i]); 3139 } 3140} 3141 3142void 3143mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) 3144{ 3145 int i; 3146 3147 if (priv->channel == NULL) 3148 return; 3149 3150 for (i = 0; i < priv->params.num_channels; i++) { 3151 3152 if (!priv->channel[i]) 3153 continue; 3154 3155 if (value) 3156 mlx5e_disable_rx_dma(priv->channel[i]); 3157 else 3158 mlx5e_enable_rx_dma(priv->channel[i]); 3159 } 3160} 3161 3162static void 3163mlx5e_add_hw_stats(struct mlx5e_priv *priv) 3164{ 3165 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3166 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0, 3167 sysctl_firmware, "A", "HCA firmware version"); 3168 3169 SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), 3170 OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, 3171 "Board ID"); 3172} 3173 3174static int 3175mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3176{ 3177 struct mlx5e_priv *priv = arg1; 3178 uint32_t tx_pfc; 3179 uint32_t value; 3180 int error; 3181 3182 PRIV_LOCK(priv); 3183 3184 tx_pfc = priv->params.tx_priority_flow_control; 3185 3186 /* get current value */ 3187 value = (tx_pfc >> arg2) & 1; 3188 3189 error = sysctl_handle_32(oidp, &value, 0, req); 3190 3191 /* range check value */ 3192 if (value != 0) 3193 priv->params.tx_priority_flow_control |= (1 << arg2); 3194 else 3195 priv->params.tx_priority_flow_control &= ~(1 << arg2); 3196 3197 /* check if update is required */ 3198 if (error == 0 && priv->gone == 0 && 3199 tx_pfc != priv->params.tx_priority_flow_control) { 3200 error = -mlx5e_set_port_pfc(priv); 3201 /* restore previous value */ 3202 if (error != 0) 3203 priv->params.tx_priority_flow_control= tx_pfc; 3204 } 3205 PRIV_UNLOCK(priv); 3206 3207 return (error); 3208} 3209 3210static int 3211mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS) 3212{ 3213 struct mlx5e_priv *priv = arg1; 3214 uint32_t rx_pfc; 3215 uint32_t value; 3216 int error; 3217 3218 PRIV_LOCK(priv); 3219 3220 rx_pfc = priv->params.rx_priority_flow_control; 3221 3222 /* get current value */ 3223 value = (rx_pfc >> arg2) & 1; 3224 3225 error = sysctl_handle_32(oidp, &value, 0, req); 3226 3227 /* range check value */ 3228 if (value != 0) 3229 priv->params.rx_priority_flow_control |= (1 << arg2); 3230 else 3231 priv->params.rx_priority_flow_control &= ~(1 << arg2); 3232 3233 /* check if update is required */ 3234 if (error == 0 && priv->gone == 0 && 3235 rx_pfc != priv->params.rx_priority_flow_control) { 3236 error = -mlx5e_set_port_pfc(priv); 3237 /* restore previous value */ 3238 if (error != 0) 3239 priv->params.rx_priority_flow_control= rx_pfc; 3240 } 3241 PRIV_UNLOCK(priv); 3242 3243 return (error); 3244} 3245 3246static void 3247mlx5e_setup_pauseframes(struct mlx5e_priv *priv) 3248{ 3249 unsigned int x; 3250 char path[96]; 3251 int error; 3252 3253 /* Only receiving pauseframes is enabled by default */ 3254 priv->params.tx_pauseframe_control = 0; 3255 priv->params.rx_pauseframe_control = 1; 3256 3257 /* disable ports flow control, PFC, by default */ 3258 priv->params.tx_priority_flow_control = 0; 3259 priv->params.rx_priority_flow_control = 0; 3260 3261#if (__FreeBSD_version < 1100000) 3262 /* compute path for sysctl */ 3263 snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control", 3264 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3265 3266 /* try to fetch tunable, if any */ 3267 TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control); 3268 3269 /* compute path for sysctl */ 3270 snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control", 3271 device_get_unit(priv->mdev->pdev->dev.bsddev)); 3272 3273 /* try to fetch tunable, if any */ 3274 TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control); 3275 3276 for (x = 0; x != 8; x++) { 3277 3278 /* compute path for sysctl */ 3279 snprintf(path, sizeof(path), "dev.mce.%d.tx_priority_flow_control_%u", 3280 device_get_unit(priv->mdev->pdev->dev.bsddev), x); 3281 3282 /* try to fetch tunable, if any */ 3283 if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0) 3284 priv->params.tx_priority_flow_control |= 1 << x; 3285 3286 /* compute path for sysctl */ 3287 snprintf(path, sizeof(path), "dev.mce.%d.rx_priority_flow_control_%u", 3288 device_get_unit(priv->mdev->pdev->dev.bsddev), x); 3289 3290 /* try to fetch tunable, if any */ 3291 if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0) 3292 priv->params.rx_priority_flow_control |= 1 << x; 3293 } 3294#endif 3295 3296 /* register pauseframe SYSCTLs */ 3297 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3298 OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN, 3299 &priv->params.tx_pauseframe_control, 0, 3300 "Set to enable TX pause frames. Clear to disable."); 3301 3302 SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3303 OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN, 3304 &priv->params.rx_pauseframe_control, 0, 3305 "Set to enable RX pause frames. Clear to disable."); 3306 3307 /* register priority_flow control, PFC, SYSCTLs */ 3308 for (x = 0; x != 8; x++) { 3309 snprintf(path, sizeof(path), "tx_priority_flow_control_%u", x); 3310 3311 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3312 OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN | 3313 CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_tx_priority_flow_control, "IU", 3314 "Set to enable TX ports flow control frames for given priority. Clear to disable."); 3315 3316 snprintf(path, sizeof(path), "rx_priority_flow_control_%u", x); 3317 3318 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3319 OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN | 3320 CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_rx_priority_flow_control, "IU", 3321 "Set to enable RX ports flow control frames for given priority. Clear to disable."); 3322 } 3323 3324 PRIV_LOCK(priv); 3325 3326 /* range check */ 3327 priv->params.tx_pauseframe_control = 3328 priv->params.tx_pauseframe_control ? 1 : 0; 3329 priv->params.rx_pauseframe_control = 3330 priv->params.rx_pauseframe_control ? 1 : 0; 3331 3332 /* update firmware */ 3333 error = mlx5e_set_port_pause_and_pfc(priv); 3334 if (error == -EINVAL) { 3335 if_printf(priv->ifp, 3336 "Global pauseframes must be disabled before enabling PFC.\n"); 3337 priv->params.rx_priority_flow_control = 0; 3338 priv->params.tx_priority_flow_control = 0; 3339 3340 /* update firmware */ 3341 (void) mlx5e_set_port_pause_and_pfc(priv); 3342 } 3343 PRIV_UNLOCK(priv); 3344} 3345 3346static void * 3347mlx5e_create_ifp(struct mlx5_core_dev *mdev) 3348{ 3349 struct ifnet *ifp; 3350 struct mlx5e_priv *priv; 3351 u8 dev_addr[ETHER_ADDR_LEN] __aligned(4); 3352 struct sysctl_oid_list *child; 3353 int ncv = mdev->priv.eq_table.num_comp_vectors; 3354 char unit[16]; 3355 int err; 3356 int i; 3357 u32 eth_proto_cap; 3358 3359 if (mlx5e_check_required_hca_cap(mdev)) { 3360 mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n"); 3361 return (NULL); 3362 } 3363 priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO); 3364 mlx5e_priv_mtx_init(priv); 3365 3366 ifp = priv->ifp = if_alloc(IFT_ETHER); 3367 if (ifp == NULL) { 3368 mlx5_core_err(mdev, "if_alloc() failed\n"); 3369 goto err_free_priv; 3370 } 3371 ifp->if_softc = priv; 3372 if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev)); 3373 ifp->if_mtu = ETHERMTU; 3374 ifp->if_init = mlx5e_open; 3375 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3376 ifp->if_ioctl = mlx5e_ioctl; 3377 ifp->if_transmit = mlx5e_xmit; 3378 ifp->if_qflush = if_qflush; 3379#if (__FreeBSD_version >= 1100000) 3380 ifp->if_get_counter = mlx5e_get_counter; 3381#endif 3382 ifp->if_snd.ifq_maxlen = ifqmaxlen; 3383 /* 3384 * Set driver features 3385 */ 3386 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 3387 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 3388 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 3389 ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 3390 ifp->if_capabilities |= IFCAP_LRO; 3391 ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO; 3392 ifp->if_capabilities |= IFCAP_HWSTATS; 3393 3394 /* set TSO limits so that we don't have to drop TX packets */ 3395 ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 3396 ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */; 3397 ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE; 3398 3399 ifp->if_capenable = ifp->if_capabilities; 3400 ifp->if_hwassist = 0; 3401 if (ifp->if_capenable & IFCAP_TSO) 3402 ifp->if_hwassist |= CSUM_TSO; 3403 if (ifp->if_capenable & IFCAP_TXCSUM) 3404 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3405 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 3406 ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 3407 3408 /* ifnet sysctl tree */ 3409 sysctl_ctx_init(&priv->sysctl_ctx); 3410 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), 3411 OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name"); 3412 if (priv->sysctl_ifnet == NULL) { 3413 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3414 goto err_free_sysctl; 3415 } 3416 snprintf(unit, sizeof(unit), "%d", ifp->if_dunit); 3417 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3418 OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit"); 3419 if (priv->sysctl_ifnet == NULL) { 3420 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3421 goto err_free_sysctl; 3422 } 3423 3424 /* HW sysctl tree */ 3425 child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev)); 3426 priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, 3427 OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw"); 3428 if (priv->sysctl_hw == NULL) { 3429 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); 3430 goto err_free_sysctl; 3431 } 3432 mlx5e_build_ifp_priv(mdev, priv, ncv); 3433 3434 snprintf(unit, sizeof(unit), "mce%u_wq", 3435 device_get_unit(mdev->pdev->dev.bsddev)); 3436 priv->wq = alloc_workqueue(unit, 0, 1); 3437 if (priv->wq == NULL) { 3438 if_printf(ifp, "%s: alloc_workqueue failed\n", __func__); 3439 goto err_free_sysctl; 3440 } 3441 3442 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); 3443 if (err) { 3444 if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n", 3445 __func__, err); 3446 goto err_free_wq; 3447 } 3448 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 3449 if (err) { 3450 if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n", 3451 __func__, err); 3452 goto err_unmap_free_uar; 3453 } 3454 err = mlx5_alloc_transport_domain(mdev, &priv->tdn); 3455 if (err) { 3456 if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n", 3457 __func__, err); 3458 goto err_dealloc_pd; 3459 } 3460 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); 3461 if (err) { 3462 if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n", 3463 __func__, err); 3464 goto err_dealloc_transport_domain; 3465 } 3466 mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); 3467 3468 /* check if we should generate a random MAC address */ 3469 if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && 3470 is_zero_ether_addr(dev_addr)) { 3471 random_ether_addr(dev_addr); 3472 if_printf(ifp, "Assigned random MAC address\n"); 3473 } 3474 3475 /* set default MTU */ 3476 mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu); 3477 3478 /* Set desc */ 3479 device_set_desc(mdev->pdev->dev.bsddev, mlx5e_version); 3480 3481 /* Set default media status */ 3482 priv->media_status_last = IFM_AVALID; 3483 priv->media_active_last = IFM_ETHER | IFM_AUTO | 3484 IFM_ETH_RXPAUSE | IFM_FDX; 3485 3486 /* setup default pauseframes configuration */ 3487 mlx5e_setup_pauseframes(priv); 3488 3489 err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); 3490 if (err) { 3491 eth_proto_cap = 0; 3492 if_printf(ifp, "%s: Query port media capability failed, %d\n", 3493 __func__, err); 3494 } 3495 3496 /* Setup supported medias */ 3497 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 3498 mlx5e_media_change, mlx5e_media_status); 3499 3500 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { 3501 if (mlx5e_mode_table[i].baudrate == 0) 3502 continue; 3503 if (MLX5E_PROT_MASK(i) & eth_proto_cap) { 3504 ifmedia_add(&priv->media, 3505 mlx5e_mode_table[i].subtype | 3506 IFM_ETHER, 0, NULL); 3507 ifmedia_add(&priv->media, 3508 mlx5e_mode_table[i].subtype | 3509 IFM_ETHER | IFM_FDX | 3510 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 3511 } 3512 } 3513 3514 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 3515 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 3516 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); 3517 3518 /* Set autoselect by default */ 3519 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | 3520 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 3521 ether_ifattach(ifp, dev_addr); 3522 3523 /* Register for VLAN events */ 3524 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 3525 mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 3526 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 3527 mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 3528 3529 /* Link is down by default */ 3530 if_link_state_change(ifp, LINK_STATE_DOWN); 3531 3532 mlx5e_enable_async_events(priv); 3533 3534 mlx5e_add_hw_stats(priv); 3535 3536 mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3537 "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM, 3538 priv->stats.vport.arg); 3539 3540 mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), 3541 "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM, 3542 priv->stats.pport.arg); 3543 3544 mlx5e_create_ethtool(priv); 3545 3546 mtx_lock(&priv->async_events_mtx); 3547 mlx5e_update_stats(priv); 3548 mtx_unlock(&priv->async_events_mtx); 3549 3550 return (priv); 3551 3552err_dealloc_transport_domain: 3553 mlx5_dealloc_transport_domain(mdev, priv->tdn); 3554 3555err_dealloc_pd: 3556 mlx5_core_dealloc_pd(mdev, priv->pdn); 3557 3558err_unmap_free_uar: 3559 mlx5_unmap_free_uar(mdev, &priv->cq_uar); 3560 3561err_free_wq: 3562 destroy_workqueue(priv->wq); 3563 3564err_free_sysctl: 3565 sysctl_ctx_free(&priv->sysctl_ctx); 3566 3567 if_free(ifp); 3568 3569err_free_priv: 3570 mlx5e_priv_mtx_destroy(priv); 3571 free(priv, M_MLX5EN); 3572 return (NULL); 3573} 3574 3575static void 3576mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv) 3577{ 3578 struct mlx5e_priv *priv = vpriv; 3579 struct ifnet *ifp = priv->ifp; 3580 3581 /* don't allow more IOCTLs */ 3582 priv->gone = 1; 3583 3584 /* 3585 * Clear the device description to avoid use after free, 3586 * because the bsddev is not destroyed when this module is 3587 * unloaded: 3588 */ 3589 device_set_desc(mdev->pdev->dev.bsddev, NULL); 3590 3591 /* XXX wait a bit to allow IOCTL handlers to complete */ 3592 pause("W", hz); 3593 3594 /* stop watchdog timer */ 3595 callout_drain(&priv->watchdog); 3596 3597 if (priv->vlan_attach != NULL) 3598 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 3599 if (priv->vlan_detach != NULL) 3600 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 3601 3602 /* make sure device gets closed */ 3603 PRIV_LOCK(priv); 3604 mlx5e_close_locked(ifp); 3605 PRIV_UNLOCK(priv); 3606 3607 /* unregister device */ 3608 ifmedia_removeall(&priv->media); 3609 ether_ifdetach(ifp); 3610 if_free(ifp); 3611 3612 /* destroy all remaining sysctl nodes */ 3613 if (priv->sysctl_debug) 3614 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); 3615 sysctl_ctx_free(&priv->stats.vport.ctx); 3616 sysctl_ctx_free(&priv->stats.pport.ctx); 3617 sysctl_ctx_free(&priv->sysctl_ctx); 3618 3619 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 3620 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); 3621 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 3622 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 3623 mlx5e_disable_async_events(priv); 3624 destroy_workqueue(priv->wq); 3625 mlx5e_priv_mtx_destroy(priv); 3626 free(priv, M_MLX5EN); 3627} 3628 3629static void * 3630mlx5e_get_ifp(void *vpriv) 3631{ 3632 struct mlx5e_priv *priv = vpriv; 3633 3634 return (priv->ifp); 3635} 3636 3637static struct mlx5_interface mlx5e_interface = { 3638 .add = mlx5e_create_ifp, 3639 .remove = mlx5e_destroy_ifp, 3640 .event = mlx5e_async_event, 3641 .protocol = MLX5_INTERFACE_PROTOCOL_ETH, 3642 .get_dev = mlx5e_get_ifp, 3643}; 3644 3645void 3646mlx5e_init(void) 3647{ 3648 mlx5_register_interface(&mlx5e_interface); 3649} 3650 3651void 3652mlx5e_cleanup(void) 3653{ 3654 mlx5_unregister_interface(&mlx5e_interface); 3655} 3656 3657module_init_order(mlx5e_init, SI_ORDER_THIRD); 3658module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD); 3659 3660#if (__FreeBSD_version >= 1100000) 3661MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1); 3662#endif 3663MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1); 3664MODULE_VERSION(mlx5en, 1); 3665