en_netdev.c revision 322531
1/* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#include <linux/etherdevice.h> 35#include <linux/delay.h> 36#include <linux/slab.h> 37#ifdef CONFIG_NET_RX_BUSY_POLL 38#include <net/busy_poll.h> 39#endif 40 41#include <linux/list.h> 42#include <linux/if_ether.h> 43 44#include <linux/mlx4/driver.h> 45#include <linux/mlx4/device.h> 46#include <linux/mlx4/cmd.h> 47#include <linux/mlx4/cq.h> 48 49#include <sys/sockio.h> 50#include <sys/sysctl.h> 51 52#include "mlx4_en.h" 53#include "en_port.h" 54 55static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv); 56static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv); 57 58#ifdef CONFIG_NET_RX_BUSY_POLL 59/* must be called with local_bh_disable()d */ 60static int mlx4_en_low_latency_recv(struct napi_struct *napi) 61{ 62 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 63 struct net_device *dev = cq->dev; 64 struct mlx4_en_priv *priv = netdev_priv(dev); 65 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; 66 int done; 67 68 if (!priv->port_up) 69 return LL_FLUSH_FAILED; 70 71 if (!mlx4_en_cq_lock_poll(cq)) 72 return LL_FLUSH_BUSY; 73 74 done = mlx4_en_process_rx_cq(dev, cq, 4); 75#ifdef LL_EXTENDED_STATS 76 if (done) 77 rx_ring->cleaned += done; 78 else 79 rx_ring->misses++; 80#endif 81 82 mlx4_en_cq_unlock_poll(cq); 83 84 return done; 85} 86#endif /* CONFIG_NET_RX_BUSY_POLL */ 87 88#ifdef CONFIG_RFS_ACCEL 89 90struct mlx4_en_filter { 91 struct list_head next; 92 struct work_struct work; 93 94 u8 ip_proto; 95 __be32 src_ip; 96 __be32 dst_ip; 97 __be16 src_port; 98 __be16 dst_port; 99 100 int rxq_index; 101 struct mlx4_en_priv *priv; 102 u32 flow_id; /* RFS infrastructure id */ 103 int id; /* mlx4_en driver id */ 104 u64 reg_id; /* Flow steering API id */ 105 u8 activated; /* Used to prevent expiry before filter 106 * is attached 107 */ 108 struct hlist_node filter_chain; 109}; 110 111static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 112 113static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 114{ 115 switch (ip_proto) { 116 case IPPROTO_UDP: 117 return MLX4_NET_TRANS_RULE_ID_UDP; 118 case IPPROTO_TCP: 119 return MLX4_NET_TRANS_RULE_ID_TCP; 120 default: 121 return -EPROTONOSUPPORT; 122 } 123}; 124 125static void mlx4_en_filter_work(struct work_struct *work) 126{ 127 struct mlx4_en_filter *filter = container_of(work, 128 struct mlx4_en_filter, 129 work); 130 struct mlx4_en_priv *priv = filter->priv; 131 struct mlx4_spec_list spec_tcp_udp = { 132 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 133 { 134 .tcp_udp = { 135 .dst_port = filter->dst_port, 136 .dst_port_msk = (__force __be16)-1, 137 .src_port = filter->src_port, 138 .src_port_msk = (__force __be16)-1, 139 }, 140 }, 141 }; 142 struct mlx4_spec_list spec_ip = { 143 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 144 { 145 .ipv4 = { 146 .dst_ip = filter->dst_ip, 147 .dst_ip_msk = (__force __be32)-1, 148 .src_ip = filter->src_ip, 149 .src_ip_msk = (__force __be32)-1, 150 }, 151 }, 152 }; 153 struct mlx4_spec_list spec_eth = { 154 .id = MLX4_NET_TRANS_RULE_ID_ETH, 155 }; 156 struct mlx4_net_trans_rule rule = { 157 .list = LIST_HEAD_INIT(rule.list), 158 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 159 .exclusive = 1, 160 .allow_loopback = 1, 161 .promisc_mode = MLX4_FS_REGULAR, 162 .port = priv->port, 163 .priority = MLX4_DOMAIN_RFS, 164 }; 165 int rc; 166 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 167 168 if (spec_tcp_udp.id < 0) { 169 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 170 filter->ip_proto); 171 goto ignore; 172 } 173 list_add_tail(&spec_eth.list, &rule.list); 174 list_add_tail(&spec_ip.list, &rule.list); 175 list_add_tail(&spec_tcp_udp.list, &rule.list); 176 177 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 178 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 179 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 180 181 filter->activated = 0; 182 183 if (filter->reg_id) { 184 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 185 if (rc && rc != -ENOENT) 186 en_err(priv, "Error detaching flow. rc = %d\n", rc); 187 } 188 189 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 190 if (rc) 191 en_err(priv, "Error attaching flow. err = %d\n", rc); 192 193ignore: 194 mlx4_en_filter_rfs_expire(priv); 195 196 filter->activated = 1; 197} 198 199static inline struct hlist_head * 200filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 201 __be16 src_port, __be16 dst_port) 202{ 203 unsigned long l; 204 int bucket_idx; 205 206 l = (__force unsigned long)src_port | 207 ((__force unsigned long)dst_port << 2); 208 l ^= (__force unsigned long)(src_ip ^ dst_ip); 209 210 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 211 212 return &priv->filter_hash[bucket_idx]; 213} 214 215static struct mlx4_en_filter * 216mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 217 __be32 dst_ip, u8 ip_proto, __be16 src_port, 218 __be16 dst_port, u32 flow_id) 219{ 220 struct mlx4_en_filter *filter = NULL; 221 222 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 223 if (!filter) 224 return NULL; 225 226 filter->priv = priv; 227 filter->rxq_index = rxq_index; 228 INIT_WORK(&filter->work, mlx4_en_filter_work); 229 230 filter->src_ip = src_ip; 231 filter->dst_ip = dst_ip; 232 filter->ip_proto = ip_proto; 233 filter->src_port = src_port; 234 filter->dst_port = dst_port; 235 236 filter->flow_id = flow_id; 237 238 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 239 240 list_add_tail(&filter->next, &priv->filters); 241 hlist_add_head(&filter->filter_chain, 242 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 243 dst_port)); 244 245 return filter; 246} 247 248static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 249{ 250 struct mlx4_en_priv *priv = filter->priv; 251 int rc; 252 253 list_del(&filter->next); 254 255 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 256 if (rc && rc != -ENOENT) 257 en_err(priv, "Error detaching flow. rc = %d\n", rc); 258 259 kfree(filter); 260} 261 262static inline struct mlx4_en_filter * 263mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 264 u8 ip_proto, __be16 src_port, __be16 dst_port) 265{ 266 struct hlist_node *elem; 267 struct mlx4_en_filter *filter; 268 struct mlx4_en_filter *ret = NULL; 269 270 hlist_for_each_entry(filter, elem, 271 filter_hash_bucket(priv, src_ip, dst_ip, 272 src_port, dst_port), 273 filter_chain) { 274 if (filter->src_ip == src_ip && 275 filter->dst_ip == dst_ip && 276 filter->ip_proto == ip_proto && 277 filter->src_port == src_port && 278 filter->dst_port == dst_port) { 279 ret = filter; 280 break; 281 } 282 } 283 284 return ret; 285} 286 287static int 288mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 289 u16 rxq_index, u32 flow_id) 290{ 291 struct mlx4_en_priv *priv = netdev_priv(net_dev); 292 struct mlx4_en_filter *filter; 293 const struct iphdr *ip; 294 const __be16 *ports; 295 u8 ip_proto; 296 __be32 src_ip; 297 __be32 dst_ip; 298 __be16 src_port; 299 __be16 dst_port; 300 int nhoff = skb_network_offset(skb); 301 int ret = 0; 302 303 if (skb->protocol != htons(ETH_P_IP)) 304 return -EPROTONOSUPPORT; 305 306 ip = (const struct iphdr *)(skb->data + nhoff); 307 if (ip_is_fragment(ip)) 308 return -EPROTONOSUPPORT; 309 310 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 311 return -EPROTONOSUPPORT; 312 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 313 314 ip_proto = ip->protocol; 315 src_ip = ip->saddr; 316 dst_ip = ip->daddr; 317 src_port = ports[0]; 318 dst_port = ports[1]; 319 320 spin_lock_bh(&priv->filters_lock); 321 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 322 src_port, dst_port); 323 if (filter) { 324 if (filter->rxq_index == rxq_index) 325 goto out; 326 327 filter->rxq_index = rxq_index; 328 } else { 329 filter = mlx4_en_filter_alloc(priv, rxq_index, 330 src_ip, dst_ip, ip_proto, 331 src_port, dst_port, flow_id); 332 if (!filter) { 333 ret = -ENOMEM; 334 goto err; 335 } 336 } 337 338 queue_work(priv->mdev->workqueue, &filter->work); 339 340out: 341 ret = filter->id; 342err: 343 spin_unlock_bh(&priv->filters_lock); 344 345 return ret; 346} 347 348void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 349 struct mlx4_en_rx_ring *rx_ring) 350{ 351 struct mlx4_en_filter *filter, *tmp; 352 LIST_HEAD(del_list); 353 354 spin_lock_bh(&priv->filters_lock); 355 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 356 list_move(&filter->next, &del_list); 357 hlist_del(&filter->filter_chain); 358 } 359 spin_unlock_bh(&priv->filters_lock); 360 361 list_for_each_entry_safe(filter, tmp, &del_list, next) { 362 cancel_work_sync(&filter->work); 363 mlx4_en_filter_free(filter); 364 } 365} 366 367static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 368{ 369 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 370 LIST_HEAD(del_list); 371 int i = 0; 372 373 spin_lock_bh(&priv->filters_lock); 374 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 375 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 376 break; 377 378 if (filter->activated && 379 !work_pending(&filter->work) && 380 rps_may_expire_flow(priv->dev, 381 filter->rxq_index, filter->flow_id, 382 filter->id)) { 383 list_move(&filter->next, &del_list); 384 hlist_del(&filter->filter_chain); 385 } else 386 last_filter = filter; 387 388 i++; 389 } 390 391 if (last_filter && (&last_filter->next != priv->filters.next)) 392 list_move(&priv->filters, &last_filter->next); 393 394 spin_unlock_bh(&priv->filters_lock); 395 396 list_for_each_entry_safe(filter, tmp, &del_list, next) 397 mlx4_en_filter_free(filter); 398} 399#endif 400 401static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid) 402{ 403 struct mlx4_en_priv *priv = netdev_priv(dev); 404 struct mlx4_en_dev *mdev = priv->mdev; 405 int err; 406 int idx; 407 408 if (arg != priv) 409 return; 410 411 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 412 413 set_bit(vid, priv->active_vlans); 414 415 /* Add VID to port VLAN filter */ 416 mutex_lock(&mdev->state_lock); 417 if (mdev->device_up && priv->port_up) { 418 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 419 if (err) 420 en_err(priv, "Failed configuring VLAN filter\n"); 421 } 422 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 423 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 424 mutex_unlock(&mdev->state_lock); 425 426} 427 428static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid) 429{ 430 struct mlx4_en_priv *priv = netdev_priv(dev); 431 struct mlx4_en_dev *mdev = priv->mdev; 432 int err; 433 434 if (arg != priv) 435 return; 436 437 en_dbg(HW, priv, "Killing VID:%d\n", vid); 438 439 clear_bit(vid, priv->active_vlans); 440 441 /* Remove VID from port VLAN filter */ 442 mutex_lock(&mdev->state_lock); 443 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 444 445 if (mdev->device_up && priv->port_up) { 446 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 447 if (err) 448 en_err(priv, "Failed configuring VLAN filter\n"); 449 } 450 mutex_unlock(&mdev->state_lock); 451 452} 453 454static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 455 unsigned char *mac, int *qpn, u64 *reg_id) 456{ 457 struct mlx4_en_dev *mdev = priv->mdev; 458 struct mlx4_dev *dev = mdev->dev; 459 int err; 460 461 switch (dev->caps.steering_mode) { 462 case MLX4_STEERING_MODE_B0: { 463 struct mlx4_qp qp; 464 u8 gid[16] = {0}; 465 466 qp.qpn = *qpn; 467 memcpy(&gid[10], mac, ETH_ALEN); 468 gid[5] = priv->port; 469 470 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 471 break; 472 } 473 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 474 struct mlx4_spec_list spec_eth = { {NULL} }; 475 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 476 477 struct mlx4_net_trans_rule rule = { 478 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 479 .exclusive = 0, 480 .allow_loopback = 1, 481 .promisc_mode = MLX4_FS_REGULAR, 482 .priority = MLX4_DOMAIN_NIC, 483 }; 484 485 rule.port = priv->port; 486 rule.qpn = *qpn; 487 INIT_LIST_HEAD(&rule.list); 488 489 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 490 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 491 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 492 list_add_tail(&spec_eth.list, &rule.list); 493 494 err = mlx4_flow_attach(dev, &rule, reg_id); 495 break; 496 } 497 default: 498 return -EINVAL; 499 } 500 if (err) 501 en_warn(priv, "Failed Attaching Unicast\n"); 502 503 return err; 504} 505 506static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 507 unsigned char *mac, int qpn, u64 reg_id) 508{ 509 struct mlx4_en_dev *mdev = priv->mdev; 510 struct mlx4_dev *dev = mdev->dev; 511 512 switch (dev->caps.steering_mode) { 513 case MLX4_STEERING_MODE_B0: { 514 struct mlx4_qp qp; 515 u8 gid[16] = {0}; 516 517 qp.qpn = qpn; 518 memcpy(&gid[10], mac, ETH_ALEN); 519 gid[5] = priv->port; 520 521 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 522 break; 523 } 524 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 525 mlx4_flow_detach(dev, reg_id); 526 break; 527 } 528 default: 529 en_err(priv, "Invalid steering mode.\n"); 530 } 531} 532 533static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 534{ 535 struct mlx4_en_dev *mdev = priv->mdev; 536 struct mlx4_dev *dev = mdev->dev; 537 struct mlx4_mac_entry *entry; 538 int index = 0; 539 int err = 0; 540 u64 reg_id; 541 int *qpn = &priv->base_qpn; 542 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 543 544 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 545 IF_LLADDR(priv->dev)); 546 index = mlx4_register_mac(dev, priv->port, mac); 547 if (index < 0) { 548 err = index; 549 en_err(priv, "Failed adding MAC: %pM\n", 550 IF_LLADDR(priv->dev)); 551 return err; 552 } 553 554 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 555 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 556 *qpn = base_qpn + index; 557 return 0; 558 } 559 560 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, 0); 561 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 562 if (err) { 563 en_err(priv, "Failed to reserve qp for mac registration\n"); 564 goto qp_err; 565 } 566 567 err = mlx4_en_uc_steer_add(priv, IF_LLADDR(priv->dev), qpn, ®_id); 568 if (err) 569 goto steer_err; 570 571 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 572 if (!entry) { 573 err = -ENOMEM; 574 goto alloc_err; 575 } 576 memcpy(entry->mac, IF_LLADDR(priv->dev), sizeof(entry->mac)); 577 entry->reg_id = reg_id; 578 579 hlist_add_head(&entry->hlist, 580 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 581 582 return 0; 583 584alloc_err: 585 mlx4_en_uc_steer_release(priv, IF_LLADDR(priv->dev), *qpn, reg_id); 586 587steer_err: 588 mlx4_qp_release_range(dev, *qpn, 1); 589 590qp_err: 591 mlx4_unregister_mac(dev, priv->port, mac); 592 return err; 593} 594 595static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 596{ 597 struct mlx4_en_dev *mdev = priv->mdev; 598 struct mlx4_dev *dev = mdev->dev; 599 int qpn = priv->base_qpn; 600 u64 mac; 601 602 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 603 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 604 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 605 IF_LLADDR(priv->dev)); 606 mlx4_unregister_mac(dev, priv->port, mac); 607 } else { 608 struct mlx4_mac_entry *entry; 609 struct hlist_node *n, *tmp; 610 struct hlist_head *bucket; 611 unsigned int i; 612 613 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 614 bucket = &priv->mac_hash[i]; 615 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 616 mac = mlx4_mac_to_u64(entry->mac); 617 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 618 entry->mac); 619 mlx4_en_uc_steer_release(priv, entry->mac, 620 qpn, entry->reg_id); 621 622 mlx4_unregister_mac(dev, priv->port, mac); 623 hlist_del(&entry->hlist); 624 kfree(entry); 625 } 626 } 627 628 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 629 priv->port, qpn); 630 mlx4_qp_release_range(dev, qpn, 1); 631 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 632 } 633} 634 635static void mlx4_en_clear_list(struct net_device *dev) 636{ 637 struct mlx4_en_priv *priv = netdev_priv(dev); 638 struct mlx4_en_mc_list *tmp, *mc_to_del; 639 640 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 641 list_del(&mc_to_del->list); 642 kfree(mc_to_del); 643 } 644} 645 646static void mlx4_en_cache_mclist(struct net_device *dev) 647{ 648 struct ifmultiaddr *ifma; 649 struct mlx4_en_mc_list *tmp; 650 struct mlx4_en_priv *priv = netdev_priv(dev); 651 652 if_maddr_rlock(dev); 653 TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) { 654 if (ifma->ifma_addr->sa_family != AF_LINK) 655 continue; 656 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen != 657 ETHER_ADDR_LEN) 658 continue; 659 /* Make sure the list didn't grow. */ 660 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 661 if (tmp == NULL) { 662 en_err(priv, "Failed to allocate multicast list\n"); 663 break; 664 } 665 memcpy(tmp->addr, 666 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN); 667 list_add_tail(&tmp->list, &priv->mc_list); 668 } 669 if_maddr_runlock(dev); 670} 671 672static void update_mclist_flags(struct mlx4_en_priv *priv, 673 struct list_head *dst, 674 struct list_head *src) 675{ 676 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 677 bool found; 678 679 /* Find all the entries that should be removed from dst, 680 * These are the entries that are not found in src 681 */ 682 list_for_each_entry(dst_tmp, dst, list) { 683 found = false; 684 list_for_each_entry(src_tmp, src, list) { 685 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 686 found = true; 687 break; 688 } 689 } 690 if (!found) 691 dst_tmp->action = MCLIST_REM; 692 } 693 694 /* Add entries that exist in src but not in dst 695 * mark them as need to add 696 */ 697 list_for_each_entry(src_tmp, src, list) { 698 found = false; 699 list_for_each_entry(dst_tmp, dst, list) { 700 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 701 dst_tmp->action = MCLIST_NONE; 702 found = true; 703 break; 704 } 705 } 706 if (!found) { 707 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list), 708 GFP_KERNEL); 709 if (!new_mc) { 710 en_err(priv, "Failed to allocate current multicast list\n"); 711 return; 712 } 713 memcpy(new_mc, src_tmp, 714 sizeof(struct mlx4_en_mc_list)); 715 new_mc->action = MCLIST_ADD; 716 list_add_tail(&new_mc->list, dst); 717 } 718 } 719} 720 721static void mlx4_en_set_rx_mode(struct net_device *dev) 722{ 723 struct mlx4_en_priv *priv = netdev_priv(dev); 724 725 if (!priv->port_up) 726 return; 727 728 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 729} 730 731static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 732 struct mlx4_en_dev *mdev) 733{ 734 int err = 0; 735 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 736 priv->flags |= MLX4_EN_FLAG_PROMISC; 737 738 /* Enable promiscouos mode */ 739 switch (mdev->dev->caps.steering_mode) { 740 case MLX4_STEERING_MODE_DEVICE_MANAGED: 741 err = mlx4_flow_steer_promisc_add(mdev->dev, 742 priv->port, 743 priv->base_qpn, 744 MLX4_FS_ALL_DEFAULT); 745 if (err) 746 en_err(priv, "Failed enabling promiscuous mode\n"); 747 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 748 break; 749 750 case MLX4_STEERING_MODE_B0: 751 err = mlx4_unicast_promisc_add(mdev->dev, 752 priv->base_qpn, 753 priv->port); 754 if (err) 755 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 756 757 /* Add the default qp number as multicast 758 * promisc 759 */ 760 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 761 err = mlx4_multicast_promisc_add(mdev->dev, 762 priv->base_qpn, 763 priv->port); 764 if (err) 765 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 766 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 767 } 768 break; 769 770 case MLX4_STEERING_MODE_A0: 771 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 772 priv->port, 773 priv->base_qpn, 774 1); 775 if (err) 776 en_err(priv, "Failed enabling promiscuous mode\n"); 777 break; 778 } 779 780 /* Disable port multicast filter (unconditionally) */ 781 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 782 0, MLX4_MCAST_DISABLE); 783 if (err) 784 en_err(priv, "Failed disabling multicast filter\n"); 785 } 786} 787 788static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 789 struct mlx4_en_dev *mdev) 790{ 791 int err = 0; 792 793 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 794 795 /* Disable promiscouos mode */ 796 switch (mdev->dev->caps.steering_mode) { 797 case MLX4_STEERING_MODE_DEVICE_MANAGED: 798 err = mlx4_flow_steer_promisc_remove(mdev->dev, 799 priv->port, 800 MLX4_FS_ALL_DEFAULT); 801 if (err) 802 en_err(priv, "Failed disabling promiscuous mode\n"); 803 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 804 break; 805 806 case MLX4_STEERING_MODE_B0: 807 err = mlx4_unicast_promisc_remove(mdev->dev, 808 priv->base_qpn, 809 priv->port); 810 if (err) 811 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 812 /* Disable Multicast promisc */ 813 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 814 err = mlx4_multicast_promisc_remove(mdev->dev, 815 priv->base_qpn, 816 priv->port); 817 if (err) 818 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 819 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 820 } 821 break; 822 823 case MLX4_STEERING_MODE_A0: 824 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 825 priv->port, 826 priv->base_qpn, 0); 827 if (err) 828 en_err(priv, "Failed disabling promiscuous mode\n"); 829 break; 830 } 831} 832 833static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 834 struct net_device *dev, 835 struct mlx4_en_dev *mdev) 836{ 837 struct mlx4_en_mc_list *mclist, *tmp; 838 u8 mc_list[16] = {0}; 839 int err = 0; 840 u64 mcast_addr = 0; 841 842 843 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 844 if (dev->if_flags & IFF_ALLMULTI) { 845 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 846 0, MLX4_MCAST_DISABLE); 847 if (err) 848 en_err(priv, "Failed disabling multicast filter\n"); 849 850 /* Add the default qp number as multicast promisc */ 851 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 852 switch (mdev->dev->caps.steering_mode) { 853 case MLX4_STEERING_MODE_DEVICE_MANAGED: 854 err = mlx4_flow_steer_promisc_add(mdev->dev, 855 priv->port, 856 priv->base_qpn, 857 MLX4_FS_MC_DEFAULT); 858 break; 859 860 case MLX4_STEERING_MODE_B0: 861 err = mlx4_multicast_promisc_add(mdev->dev, 862 priv->base_qpn, 863 priv->port); 864 break; 865 866 case MLX4_STEERING_MODE_A0: 867 break; 868 } 869 if (err) 870 en_err(priv, "Failed entering multicast promisc mode\n"); 871 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 872 } 873 } else { 874 /* Disable Multicast promisc */ 875 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 876 switch (mdev->dev->caps.steering_mode) { 877 case MLX4_STEERING_MODE_DEVICE_MANAGED: 878 err = mlx4_flow_steer_promisc_remove(mdev->dev, 879 priv->port, 880 MLX4_FS_MC_DEFAULT); 881 break; 882 883 case MLX4_STEERING_MODE_B0: 884 err = mlx4_multicast_promisc_remove(mdev->dev, 885 priv->base_qpn, 886 priv->port); 887 break; 888 889 case MLX4_STEERING_MODE_A0: 890 break; 891 } 892 if (err) 893 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 894 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 895 } 896 897 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 898 0, MLX4_MCAST_DISABLE); 899 if (err) 900 en_err(priv, "Failed disabling multicast filter\n"); 901 902 /* Flush mcast filter and init it with broadcast address */ 903 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 904 1, MLX4_MCAST_CONFIG); 905 906 /* Update multicast list - we cache all addresses so they won't 907 * change while HW is updated holding the command semaphor */ 908 mlx4_en_cache_mclist(dev); 909 list_for_each_entry(mclist, &priv->mc_list, list) { 910 mcast_addr = mlx4_mac_to_u64(mclist->addr); 911 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 912 mcast_addr, 0, MLX4_MCAST_CONFIG); 913 } 914 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 915 0, MLX4_MCAST_ENABLE); 916 if (err) 917 en_err(priv, "Failed enabling multicast filter\n"); 918 919 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 920 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 921 if (mclist->action == MCLIST_REM) { 922 /* detach this address and delete from list */ 923 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 924 mc_list[5] = priv->port; 925 err = mlx4_multicast_detach(mdev->dev, 926 &priv->rss_map.indir_qp, 927 mc_list, 928 MLX4_PROT_ETH, 929 mclist->reg_id); 930 if (err) 931 en_err(priv, "Fail to detach multicast address\n"); 932 933 /* remove from list */ 934 list_del(&mclist->list); 935 kfree(mclist); 936 } else if (mclist->action == MCLIST_ADD) { 937 /* attach the address */ 938 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 939 /* needed for B0 steering support */ 940 mc_list[5] = priv->port; 941 err = mlx4_multicast_attach(mdev->dev, 942 &priv->rss_map.indir_qp, 943 mc_list, 944 priv->port, 0, 945 MLX4_PROT_ETH, 946 &mclist->reg_id); 947 if (err) 948 en_err(priv, "Fail to attach multicast address\n"); 949 950 } 951 } 952 } 953} 954 955static void mlx4_en_do_set_rx_mode(struct work_struct *work) 956{ 957 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 958 rx_mode_task); 959 struct mlx4_en_dev *mdev = priv->mdev; 960 struct net_device *dev = priv->dev; 961 962 963 mutex_lock(&mdev->state_lock); 964 if (!mdev->device_up) { 965 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 966 goto out; 967 } 968 if (!priv->port_up) { 969 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 970 goto out; 971 } 972 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 973 if (priv->port_state.link_state) { 974 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 975 /* update netif baudrate */ 976 priv->dev->if_baudrate = 977 IF_Mbps(priv->port_state.link_speed); 978 /* Important note: the following call for if_link_state_change 979 * is needed for interface up scenario (start port, link state 980 * change) */ 981 if_link_state_change(priv->dev, LINK_STATE_UP); 982 en_dbg(HW, priv, "Link Up\n"); 983 } 984 } 985 986 /* Promsicuous mode: disable all filters */ 987 if ((dev->if_flags & IFF_PROMISC) || 988 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 989 mlx4_en_set_promisc_mode(priv, mdev); 990 goto out; 991 } 992 993 /* Not in promiscuous mode */ 994 if (priv->flags & MLX4_EN_FLAG_PROMISC) 995 mlx4_en_clear_promisc_mode(priv, mdev); 996 997 mlx4_en_do_multicast(priv, dev, mdev); 998out: 999 mutex_unlock(&mdev->state_lock); 1000} 1001 1002#ifdef CONFIG_NET_POLL_CONTROLLER 1003static void mlx4_en_netpoll(struct net_device *dev) 1004{ 1005 struct mlx4_en_priv *priv = netdev_priv(dev); 1006 struct mlx4_en_cq *cq; 1007 unsigned long flags; 1008 int i; 1009 1010 for (i = 0; i < priv->rx_ring_num; i++) { 1011 cq = priv->rx_cq[i]; 1012 spin_lock_irqsave(&cq->lock, flags); 1013 napi_synchronize(&cq->napi); 1014 mlx4_en_process_rx_cq(dev, cq, 0); 1015 spin_unlock_irqrestore(&cq->lock, flags); 1016 } 1017} 1018#endif 1019 1020static void mlx4_en_watchdog_timeout(void *arg) 1021{ 1022 struct mlx4_en_priv *priv = arg; 1023 struct mlx4_en_dev *mdev = priv->mdev; 1024 1025 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1026 queue_work(mdev->workqueue, &priv->watchdog_task); 1027 if (priv->port_up) 1028 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1029 mlx4_en_watchdog_timeout, priv); 1030} 1031 1032 1033 1034static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1035{ 1036 struct mlx4_en_cq *cq; 1037 int i; 1038 1039 /* If we haven't received a specific coalescing setting 1040 * (module param), we set the moderation parameters as follows: 1041 * - moder_cnt is set to the number of mtu sized packets to 1042 * satisfy our coelsing target. 1043 * - moder_time is set to a fixed value. 1044 */ 1045 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->if_mtu + 1; 1046 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1047 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1048 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1049 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - " 1050 "rx_frames:%d rx_usecs:%d\n", 1051 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs); 1052 1053 /* Setup cq moderation params */ 1054 for (i = 0; i < priv->rx_ring_num; i++) { 1055 cq = priv->rx_cq[i]; 1056 cq->moder_cnt = priv->rx_frames; 1057 cq->moder_time = priv->rx_usecs; 1058 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1059 priv->last_moder_packets[i] = 0; 1060 priv->last_moder_bytes[i] = 0; 1061 } 1062 1063 for (i = 0; i < priv->tx_ring_num; i++) { 1064 cq = priv->tx_cq[i]; 1065 cq->moder_cnt = priv->tx_frames; 1066 cq->moder_time = priv->tx_usecs; 1067 } 1068 1069 /* Reset auto-moderation params */ 1070 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1071 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1072 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1073 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1074 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1075 priv->adaptive_rx_coal = 1; 1076 priv->last_moder_jiffies = 0; 1077 priv->last_moder_tx_packets = 0; 1078} 1079 1080static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1081{ 1082 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1083 struct mlx4_en_cq *cq; 1084 unsigned long packets; 1085 unsigned long rate; 1086 unsigned long avg_pkt_size; 1087 unsigned long rx_packets; 1088 unsigned long rx_bytes; 1089 unsigned long rx_pkt_diff; 1090 int moder_time; 1091 int ring, err; 1092 1093 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1094 return; 1095 1096 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1097 spin_lock(&priv->stats_lock); 1098 rx_packets = priv->rx_ring[ring]->packets; 1099 rx_bytes = priv->rx_ring[ring]->bytes; 1100 spin_unlock(&priv->stats_lock); 1101 1102 rx_pkt_diff = ((unsigned long) (rx_packets - 1103 priv->last_moder_packets[ring])); 1104 packets = rx_pkt_diff; 1105 rate = packets * HZ / period; 1106 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1107 priv->last_moder_bytes[ring])) / packets : 0; 1108 1109 /* Apply auto-moderation only when packet rate 1110 * exceeds a rate that it matters */ 1111 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1112 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1113 if (rate < priv->pkt_rate_low) 1114 moder_time = priv->rx_usecs_low; 1115 else if (rate > priv->pkt_rate_high) 1116 moder_time = priv->rx_usecs_high; 1117 else 1118 moder_time = (rate - priv->pkt_rate_low) * 1119 (priv->rx_usecs_high - priv->rx_usecs_low) / 1120 (priv->pkt_rate_high - priv->pkt_rate_low) + 1121 priv->rx_usecs_low; 1122 } else { 1123 moder_time = priv->rx_usecs_low; 1124 } 1125 1126 if (moder_time != priv->last_moder_time[ring]) { 1127 priv->last_moder_time[ring] = moder_time; 1128 cq = priv->rx_cq[ring]; 1129 cq->moder_time = moder_time; 1130 err = mlx4_en_set_cq_moder(priv, cq); 1131 if (err) 1132 en_err(priv, "Failed modifying moderation for cq:%d\n", 1133 ring); 1134 } 1135 priv->last_moder_packets[ring] = rx_packets; 1136 priv->last_moder_bytes[ring] = rx_bytes; 1137 } 1138 1139 priv->last_moder_jiffies = jiffies; 1140} 1141 1142static void mlx4_en_do_get_stats(struct work_struct *work) 1143{ 1144 struct delayed_work *delay = to_delayed_work(work); 1145 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1146 stats_task); 1147 struct mlx4_en_dev *mdev = priv->mdev; 1148 int err; 1149 1150 mutex_lock(&mdev->state_lock); 1151 if (mdev->device_up) { 1152 if (priv->port_up) { 1153 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1154 if (err) 1155 en_dbg(HW, priv, "Could not update stats\n"); 1156 1157 mlx4_en_auto_moderation(priv); 1158 } 1159 1160 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1161 } 1162 mutex_unlock(&mdev->state_lock); 1163} 1164 1165/* mlx4_en_service_task - Run service task for tasks that needed to be done 1166 * periodically 1167 */ 1168static void mlx4_en_service_task(struct work_struct *work) 1169{ 1170 struct delayed_work *delay = to_delayed_work(work); 1171 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1172 service_task); 1173 struct mlx4_en_dev *mdev = priv->mdev; 1174 1175 mutex_lock(&mdev->state_lock); 1176 if (mdev->device_up) { 1177 queue_delayed_work(mdev->workqueue, &priv->service_task, 1178 SERVICE_TASK_DELAY); 1179 } 1180 mutex_unlock(&mdev->state_lock); 1181} 1182 1183static void mlx4_en_linkstate(struct work_struct *work) 1184{ 1185 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1186 linkstate_task); 1187 struct mlx4_en_dev *mdev = priv->mdev; 1188 int linkstate = priv->link_state; 1189 1190 mutex_lock(&mdev->state_lock); 1191 /* If observable port state changed set carrier state and 1192 * report to system log */ 1193 if (priv->last_link_state != linkstate) { 1194 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1195 en_info(priv, "Link Down\n"); 1196 if_link_state_change(priv->dev, LINK_STATE_DOWN); 1197 /* update netif baudrate */ 1198 priv->dev->if_baudrate = 0; 1199 1200 /* make sure the port is up before notifying the OS. 1201 * This is tricky since we get here on INIT_PORT and 1202 * in such case we can't tell the OS the port is up. 1203 * To solve this there is a call to if_link_state_change 1204 * in set_rx_mode. 1205 * */ 1206 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){ 1207 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 1208 en_info(priv, "Query port failed\n"); 1209 priv->dev->if_baudrate = 1210 IF_Mbps(priv->port_state.link_speed); 1211 en_info(priv, "Link Up\n"); 1212 if_link_state_change(priv->dev, LINK_STATE_UP); 1213 } 1214 } 1215 priv->last_link_state = linkstate; 1216 mutex_unlock(&mdev->state_lock); 1217} 1218 1219 1220int mlx4_en_start_port(struct net_device *dev) 1221{ 1222 struct mlx4_en_priv *priv = netdev_priv(dev); 1223 struct mlx4_en_dev *mdev = priv->mdev; 1224 struct mlx4_en_cq *cq; 1225 struct mlx4_en_tx_ring *tx_ring; 1226 int rx_index = 0; 1227 int tx_index = 0; 1228 int err = 0; 1229 int i; 1230 int j; 1231 u8 mc_list[16] = {0}; 1232 1233 1234 if (priv->port_up) { 1235 en_dbg(DRV, priv, "start port called while port already up\n"); 1236 return 0; 1237 } 1238 1239 INIT_LIST_HEAD(&priv->mc_list); 1240 INIT_LIST_HEAD(&priv->curr_list); 1241 INIT_LIST_HEAD(&priv->ethtool_list); 1242 1243 /* Calculate Rx buf size */ 1244 dev->if_mtu = min(dev->if_mtu, priv->max_mtu); 1245 mlx4_en_calc_rx_buf(dev); 1246 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size); 1247 1248 /* Configure rx cq's and rings */ 1249 err = mlx4_en_activate_rx_rings(priv); 1250 if (err) { 1251 en_err(priv, "Failed to activate RX rings\n"); 1252 return err; 1253 } 1254 for (i = 0; i < priv->rx_ring_num; i++) { 1255 cq = priv->rx_cq[i]; 1256 1257 mlx4_en_cq_init_lock(cq); 1258 err = mlx4_en_activate_cq(priv, cq, i); 1259 if (err) { 1260 en_err(priv, "Failed activating Rx CQ\n"); 1261 goto cq_err; 1262 } 1263 for (j = 0; j < cq->size; j++) 1264 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1265 err = mlx4_en_set_cq_moder(priv, cq); 1266 if (err) { 1267 en_err(priv, "Failed setting cq moderation parameters"); 1268 mlx4_en_deactivate_cq(priv, cq); 1269 goto cq_err; 1270 } 1271 mlx4_en_arm_cq(priv, cq); 1272 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1273 ++rx_index; 1274 } 1275 1276 /* Set qp number */ 1277 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1278 err = mlx4_en_get_qp(priv); 1279 if (err) { 1280 en_err(priv, "Failed getting eth qp\n"); 1281 goto cq_err; 1282 } 1283 mdev->mac_removed[priv->port] = 0; 1284 1285 /* gets default allocated counter index from func cap */ 1286 /* or sink counter index if no resources */ 1287 priv->counter_index = mdev->dev->caps.def_counter_index[priv->port - 1]; 1288 1289 en_dbg(DRV, priv, "%s: default counter index %d for port %d\n", 1290 __func__, priv->counter_index, priv->port); 1291 1292 err = mlx4_en_config_rss_steer(priv); 1293 if (err) { 1294 en_err(priv, "Failed configuring rss steering\n"); 1295 goto mac_err; 1296 } 1297 1298 err = mlx4_en_create_drop_qp(priv); 1299 if (err) 1300 goto rss_err; 1301 1302 /* Configure tx cq's and rings */ 1303 for (i = 0; i < priv->tx_ring_num; i++) { 1304 /* Configure cq */ 1305 cq = priv->tx_cq[i]; 1306 err = mlx4_en_activate_cq(priv, cq, i); 1307 if (err) { 1308 en_err(priv, "Failed activating Tx CQ\n"); 1309 goto tx_err; 1310 } 1311 err = mlx4_en_set_cq_moder(priv, cq); 1312 if (err) { 1313 en_err(priv, "Failed setting cq moderation parameters"); 1314 mlx4_en_deactivate_cq(priv, cq); 1315 goto tx_err; 1316 } 1317 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1318 cq->buf->wqe_index = cpu_to_be16(0xffff); 1319 1320 /* Configure ring */ 1321 tx_ring = priv->tx_ring[i]; 1322 1323 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1324 i / priv->num_tx_rings_p_up); 1325 if (err) { 1326 en_err(priv, "Failed activating Tx ring %d\n", i); 1327 mlx4_en_deactivate_cq(priv, cq); 1328 goto tx_err; 1329 } 1330 1331 /* Arm CQ for TX completions */ 1332 mlx4_en_arm_cq(priv, cq); 1333 1334 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1335 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1336 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 1337 ++tx_index; 1338 } 1339 1340 /* Configure port */ 1341 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1342 priv->rx_mb_size, 1343 priv->prof->tx_pause, 1344 priv->prof->tx_ppp, 1345 priv->prof->rx_pause, 1346 priv->prof->rx_ppp); 1347 if (err) { 1348 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1349 priv->port, err); 1350 goto tx_err; 1351 } 1352 /* Set default qp number */ 1353 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1354 if (err) { 1355 en_err(priv, "Failed setting default qp numbers\n"); 1356 goto tx_err; 1357 } 1358 1359 /* Init port */ 1360 en_dbg(HW, priv, "Initializing port\n"); 1361 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1362 if (err) { 1363 en_err(priv, "Failed Initializing port\n"); 1364 goto tx_err; 1365 } 1366 1367 /* Attach rx QP to bradcast address */ 1368 memset(&mc_list[10], 0xff, ETH_ALEN); 1369 mc_list[5] = priv->port; /* needed for B0 steering support */ 1370 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1371 priv->port, 0, MLX4_PROT_ETH, 1372 &priv->broadcast_id)) 1373 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1374 1375 /* Must redo promiscuous mode setup. */ 1376 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1377 1378 /* Schedule multicast task to populate multicast list */ 1379 queue_work(mdev->workqueue, &priv->rx_mode_task); 1380 1381 priv->port_up = true; 1382 1383 /* Enable the queues. */ 1384 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 1385 dev->if_drv_flags |= IFF_DRV_RUNNING; 1386#ifdef CONFIG_DEBUG_FS 1387 mlx4_en_create_debug_files(priv); 1388#endif 1389 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1390 mlx4_en_watchdog_timeout, priv); 1391 1392 1393 return 0; 1394 1395tx_err: 1396 while (tx_index--) { 1397 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1398 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1399 } 1400 mlx4_en_destroy_drop_qp(priv); 1401rss_err: 1402 mlx4_en_release_rss_steer(priv); 1403mac_err: 1404 mlx4_en_put_qp(priv); 1405cq_err: 1406 while (rx_index--) 1407 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1408 for (i = 0; i < priv->rx_ring_num; i++) 1409 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1410 1411 return err; /* need to close devices */ 1412} 1413 1414 1415void mlx4_en_stop_port(struct net_device *dev) 1416{ 1417 struct mlx4_en_priv *priv = netdev_priv(dev); 1418 struct mlx4_en_dev *mdev = priv->mdev; 1419 struct mlx4_en_mc_list *mclist, *tmp; 1420 int i; 1421 u8 mc_list[16] = {0}; 1422 1423 if (!priv->port_up) { 1424 en_dbg(DRV, priv, "stop port called while port already down\n"); 1425 return; 1426 } 1427 1428#ifdef CONFIG_DEBUG_FS 1429 mlx4_en_delete_debug_files(priv); 1430#endif 1431 1432 /* close port*/ 1433 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1434 1435 /* Set port as not active */ 1436 priv->port_up = false; 1437 if (priv->counter_index != 0xff) { 1438 mlx4_counter_free(mdev->dev, priv->port, priv->counter_index); 1439 priv->counter_index = 0xff; 1440 } 1441 1442 /* Promsicuous mode */ 1443 if (mdev->dev->caps.steering_mode == 1444 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1445 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1446 MLX4_EN_FLAG_MC_PROMISC); 1447 mlx4_flow_steer_promisc_remove(mdev->dev, 1448 priv->port, 1449 MLX4_FS_ALL_DEFAULT); 1450 mlx4_flow_steer_promisc_remove(mdev->dev, 1451 priv->port, 1452 MLX4_FS_MC_DEFAULT); 1453 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1454 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1455 1456 /* Disable promiscouos mode */ 1457 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1458 priv->port); 1459 1460 /* Disable Multicast promisc */ 1461 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1462 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1463 priv->port); 1464 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1465 } 1466 } 1467 1468 /* Detach All multicasts */ 1469 memset(&mc_list[10], 0xff, ETH_ALEN); 1470 mc_list[5] = priv->port; /* needed for B0 steering support */ 1471 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1472 MLX4_PROT_ETH, priv->broadcast_id); 1473 list_for_each_entry(mclist, &priv->curr_list, list) { 1474 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1475 mc_list[5] = priv->port; 1476 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1477 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1478 } 1479 mlx4_en_clear_list(dev); 1480 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1481 list_del(&mclist->list); 1482 kfree(mclist); 1483 } 1484 1485 /* Flush multicast filter */ 1486 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1487 mlx4_en_destroy_drop_qp(priv); 1488 1489 /* Free TX Rings */ 1490 for (i = 0; i < priv->tx_ring_num; i++) { 1491 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1492 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1493 } 1494 msleep(10); 1495 1496 for (i = 0; i < priv->tx_ring_num; i++) 1497 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1498 1499 /* Free RSS qps */ 1500 mlx4_en_release_rss_steer(priv); 1501 1502 /* Unregister Mac address for the port */ 1503 mlx4_en_put_qp(priv); 1504 mdev->mac_removed[priv->port] = 1; 1505 1506 /* Free RX Rings */ 1507 for (i = 0; i < priv->rx_ring_num; i++) { 1508 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1509 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1510 mlx4_en_deactivate_cq(priv, cq); 1511 } 1512 1513 callout_stop(&priv->watchdog_timer); 1514 1515 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1516} 1517 1518static void mlx4_en_restart(struct work_struct *work) 1519{ 1520 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1521 watchdog_task); 1522 struct mlx4_en_dev *mdev = priv->mdev; 1523 struct net_device *dev = priv->dev; 1524 struct mlx4_en_tx_ring *ring; 1525 int i; 1526 1527 1528 if (priv->blocked == 0 || priv->port_up == 0) 1529 return; 1530 for (i = 0; i < priv->tx_ring_num; i++) { 1531 ring = priv->tx_ring[i]; 1532 if (ring->blocked && 1533 ring->watchdog_time + MLX4_EN_WATCHDOG_TIMEOUT < ticks) 1534 goto reset; 1535 } 1536 return; 1537 1538reset: 1539 priv->port_stats.tx_timeout++; 1540 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1541 1542 mutex_lock(&mdev->state_lock); 1543 if (priv->port_up) { 1544 mlx4_en_stop_port(dev); 1545 //for (i = 0; i < priv->tx_ring_num; i++) 1546 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue); 1547 if (mlx4_en_start_port(dev)) 1548 en_err(priv, "Failed restarting port %d\n", priv->port); 1549 } 1550 mutex_unlock(&mdev->state_lock); 1551} 1552 1553static void mlx4_en_clear_stats(struct net_device *dev) 1554{ 1555 struct mlx4_en_priv *priv = netdev_priv(dev); 1556 struct mlx4_en_dev *mdev = priv->mdev; 1557 int i; 1558 1559 if (!mlx4_is_slave(mdev->dev)) 1560 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1561 en_dbg(HW, priv, "Failed dumping statistics\n"); 1562 1563 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1564 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1565 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1566 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats)); 1567 1568 for (i = 0; i < priv->tx_ring_num; i++) { 1569 priv->tx_ring[i]->bytes = 0; 1570 priv->tx_ring[i]->packets = 0; 1571 priv->tx_ring[i]->tx_csum = 0; 1572 priv->tx_ring[i]->oversized_packets = 0; 1573 } 1574 for (i = 0; i < priv->rx_ring_num; i++) { 1575 priv->rx_ring[i]->bytes = 0; 1576 priv->rx_ring[i]->packets = 0; 1577 priv->rx_ring[i]->csum_ok = 0; 1578 priv->rx_ring[i]->csum_none = 0; 1579 } 1580} 1581 1582static void mlx4_en_open(void* arg) 1583{ 1584 1585 struct mlx4_en_priv *priv; 1586 struct mlx4_en_dev *mdev; 1587 struct net_device *dev; 1588 int err = 0; 1589 1590 priv = arg; 1591 mdev = priv->mdev; 1592 dev = priv->dev; 1593 1594 1595 mutex_lock(&mdev->state_lock); 1596 1597 if (!mdev->device_up) { 1598 en_err(priv, "Cannot open - device down/disabled\n"); 1599 goto out; 1600 } 1601 1602 /* Reset HW statistics and SW counters */ 1603 mlx4_en_clear_stats(dev); 1604 1605 err = mlx4_en_start_port(dev); 1606 if (err) 1607 en_err(priv, "Failed starting port:%d\n", priv->port); 1608 1609out: 1610 mutex_unlock(&mdev->state_lock); 1611 return; 1612} 1613 1614void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1615{ 1616 int i; 1617 1618#ifdef CONFIG_RFS_ACCEL 1619 if (priv->dev->rx_cpu_rmap) { 1620 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1621 priv->dev->rx_cpu_rmap = NULL; 1622 } 1623#endif 1624 1625 for (i = 0; i < priv->tx_ring_num; i++) { 1626 if (priv->tx_ring && priv->tx_ring[i]) 1627 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1628 if (priv->tx_cq && priv->tx_cq[i]) 1629 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1630 } 1631 1632 for (i = 0; i < priv->rx_ring_num; i++) { 1633 if (priv->rx_ring[i]) 1634 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1635 priv->prof->rx_ring_size, priv->stride); 1636 if (priv->rx_cq[i]) 1637 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1638 } 1639 1640 if (priv->stat_sysctl != NULL) 1641 sysctl_ctx_free(&priv->stat_ctx); 1642} 1643 1644int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1645{ 1646 struct mlx4_en_port_profile *prof = priv->prof; 1647 int i; 1648 int node = 0; 1649 1650 /* Create rx Rings */ 1651 for (i = 0; i < priv->rx_ring_num; i++) { 1652 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1653 prof->rx_ring_size, i, RX, node)) 1654 goto err; 1655 1656 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1657 prof->rx_ring_size, node)) 1658 goto err; 1659 } 1660 1661 /* Create tx Rings */ 1662 for (i = 0; i < priv->tx_ring_num; i++) { 1663 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1664 prof->tx_ring_size, i, TX, node)) 1665 goto err; 1666 1667 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1668 prof->tx_ring_size, TXBB_SIZE, node, i)) 1669 goto err; 1670 } 1671 1672#ifdef CONFIG_RFS_ACCEL 1673 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1674 if (!priv->dev->rx_cpu_rmap) 1675 goto err; 1676#endif 1677 /* Re-create stat sysctls in case the number of rings changed. */ 1678 mlx4_en_sysctl_stat(priv); 1679 return 0; 1680 1681err: 1682 en_err(priv, "Failed to allocate NIC resources\n"); 1683 for (i = 0; i < priv->rx_ring_num; i++) { 1684 if (priv->rx_ring[i]) 1685 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1686 prof->rx_ring_size, 1687 priv->stride); 1688 if (priv->rx_cq[i]) 1689 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1690 } 1691 for (i = 0; i < priv->tx_ring_num; i++) { 1692 if (priv->tx_ring[i]) 1693 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1694 if (priv->tx_cq[i]) 1695 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1696 } 1697 priv->port_up = false; 1698 return -ENOMEM; 1699} 1700 1701struct en_port_attribute { 1702 struct attribute attr; 1703 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf); 1704 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count); 1705}; 1706 1707#define PORT_ATTR_RO(_name) \ 1708struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name) 1709 1710#define EN_PORT_ATTR(_name, _mode, _show, _store) \ 1711struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store) 1712 1713void mlx4_en_destroy_netdev(struct net_device *dev) 1714{ 1715 struct mlx4_en_priv *priv = netdev_priv(dev); 1716 struct mlx4_en_dev *mdev = priv->mdev; 1717 1718 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1719 1720 if (priv->vlan_attach != NULL) 1721 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 1722 if (priv->vlan_detach != NULL) 1723 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 1724 1725 /* Unregister device - this will close the port if it was up */ 1726 if (priv->registered) { 1727 mutex_lock(&mdev->state_lock); 1728 ether_ifdetach(dev); 1729 mutex_unlock(&mdev->state_lock); 1730 } 1731 1732 mutex_lock(&mdev->state_lock); 1733 mlx4_en_stop_port(dev); 1734 mutex_unlock(&mdev->state_lock); 1735 1736 if (priv->allocated) 1737 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1738 1739 cancel_delayed_work(&priv->stats_task); 1740 cancel_delayed_work(&priv->service_task); 1741 /* flush any pending task for this netdev */ 1742 flush_workqueue(mdev->workqueue); 1743 callout_drain(&priv->watchdog_timer); 1744 1745 /* Detach the netdev so tasks would not attempt to access it */ 1746 mutex_lock(&mdev->state_lock); 1747 mdev->pndev[priv->port] = NULL; 1748 mutex_unlock(&mdev->state_lock); 1749 1750 1751 mlx4_en_free_resources(priv); 1752 1753 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */ 1754 if (priv->conf_sysctl != NULL) 1755 sysctl_ctx_free(&priv->conf_ctx); 1756 1757 kfree(priv->tx_ring); 1758 kfree(priv->tx_cq); 1759 1760 kfree(priv); 1761 if_free(dev); 1762 1763} 1764 1765static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1766{ 1767 struct mlx4_en_priv *priv = netdev_priv(dev); 1768 struct mlx4_en_dev *mdev = priv->mdev; 1769 int err = 0; 1770 1771 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n", 1772 (unsigned)dev->if_mtu, (unsigned)new_mtu); 1773 1774 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1775 en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu, 1776 priv->max_mtu); 1777 return -EPERM; 1778 } 1779 mutex_lock(&mdev->state_lock); 1780 dev->if_mtu = new_mtu; 1781 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1782 if (!mdev->device_up) { 1783 /* NIC is probably restarting - let watchdog task reset 1784 * * the port */ 1785 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1786 } else { 1787 mlx4_en_stop_port(dev); 1788 err = mlx4_en_start_port(dev); 1789 if (err) { 1790 en_err(priv, "Failed restarting port:%d\n", 1791 priv->port); 1792 queue_work(mdev->workqueue, &priv->watchdog_task); 1793 } 1794 } 1795 } 1796 mutex_unlock(&mdev->state_lock); 1797 return 0; 1798} 1799 1800static int mlx4_en_calc_media(struct mlx4_en_priv *priv) 1801{ 1802 int trans_type; 1803 int active; 1804 1805 active = IFM_ETHER; 1806 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN) 1807 return (active); 1808 active |= IFM_FDX; 1809 trans_type = priv->port_state.transciver; 1810 /* XXX I don't know all of the transceiver values. */ 1811 switch (priv->port_state.link_speed) { 1812 case 1000: 1813 active |= IFM_1000_T; 1814 break; 1815 case 10000: 1816 if (trans_type > 0 && trans_type <= 0xC) 1817 active |= IFM_10G_SR; 1818 else if (trans_type == 0x80 || trans_type == 0) 1819 active |= IFM_10G_CX4; 1820 break; 1821 case 40000: 1822 active |= IFM_40G_CR4; 1823 break; 1824 } 1825 if (priv->prof->tx_pause) 1826 active |= IFM_ETH_TXPAUSE; 1827 if (priv->prof->rx_pause) 1828 active |= IFM_ETH_RXPAUSE; 1829 1830 return (active); 1831} 1832 1833static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 1834{ 1835 struct mlx4_en_priv *priv; 1836 1837 priv = dev->if_softc; 1838 ifmr->ifm_status = IFM_AVALID; 1839 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN) 1840 ifmr->ifm_status |= IFM_ACTIVE; 1841 ifmr->ifm_active = mlx4_en_calc_media(priv); 1842 1843 return; 1844} 1845 1846static int mlx4_en_media_change(struct ifnet *dev) 1847{ 1848 struct mlx4_en_priv *priv; 1849 struct ifmedia *ifm; 1850 int rxpause; 1851 int txpause; 1852 int error; 1853 1854 priv = dev->if_softc; 1855 ifm = &priv->media; 1856 rxpause = txpause = 0; 1857 error = 0; 1858 1859 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1860 return (EINVAL); 1861 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1862 case IFM_AUTO: 1863 break; 1864 case IFM_10G_SR: 1865 case IFM_10G_CX4: 1866 case IFM_1000_T: 1867 case IFM_40G_CR4: 1868 if ((IFM_SUBTYPE(ifm->ifm_media) 1869 == IFM_SUBTYPE(mlx4_en_calc_media(priv))) 1870 && (ifm->ifm_media & IFM_FDX)) 1871 break; 1872 /* Fallthrough */ 1873 default: 1874 printf("%s: Only auto media type\n", if_name(dev)); 1875 return (EINVAL); 1876 } 1877 /* Allow user to set/clear pause */ 1878 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) 1879 rxpause = 1; 1880 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) 1881 txpause = 1; 1882 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) { 1883 priv->prof->tx_pause = txpause; 1884 priv->prof->rx_pause = rxpause; 1885 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 1886 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, 1887 priv->prof->tx_ppp, priv->prof->rx_pause, 1888 priv->prof->rx_ppp); 1889 } 1890 return (error); 1891} 1892 1893static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data) 1894{ 1895 struct mlx4_en_priv *priv; 1896 struct mlx4_en_dev *mdev; 1897 struct ifreq *ifr; 1898 int error; 1899 int mask; 1900 1901 error = 0; 1902 mask = 0; 1903 priv = dev->if_softc; 1904 mdev = priv->mdev; 1905 ifr = (struct ifreq *) data; 1906 switch (command) { 1907 1908 case SIOCSIFMTU: 1909 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); 1910 break; 1911 case SIOCSIFFLAGS: 1912 if (dev->if_flags & IFF_UP) { 1913 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1914 mutex_lock(&mdev->state_lock); 1915 mlx4_en_start_port(dev); 1916 mutex_unlock(&mdev->state_lock); 1917 } else { 1918 mlx4_en_set_rx_mode(dev); 1919 } 1920 } else { 1921 mutex_lock(&mdev->state_lock); 1922 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1923 mlx4_en_stop_port(dev); 1924 if_link_state_change(dev, LINK_STATE_DOWN); 1925 } 1926 mutex_unlock(&mdev->state_lock); 1927 } 1928 break; 1929 case SIOCADDMULTI: 1930 case SIOCDELMULTI: 1931 mlx4_en_set_rx_mode(dev); 1932 break; 1933 case SIOCSIFMEDIA: 1934 case SIOCGIFMEDIA: 1935 error = ifmedia_ioctl(dev, ifr, &priv->media, command); 1936 break; 1937 case SIOCSIFCAP: 1938 mutex_lock(&mdev->state_lock); 1939 mask = ifr->ifr_reqcap ^ dev->if_capenable; 1940 if (mask & IFCAP_TXCSUM) { 1941 dev->if_capenable ^= IFCAP_TXCSUM; 1942 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1943 1944 if (IFCAP_TSO4 & dev->if_capenable && 1945 !(IFCAP_TXCSUM & dev->if_capenable)) { 1946 dev->if_capenable &= ~IFCAP_TSO4; 1947 dev->if_hwassist &= ~CSUM_IP_TSO; 1948 if_printf(dev, 1949 "tso4 disabled due to -txcsum.\n"); 1950 } 1951 } 1952 if (mask & IFCAP_TXCSUM_IPV6) { 1953 dev->if_capenable ^= IFCAP_TXCSUM_IPV6; 1954 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1955 1956 if (IFCAP_TSO6 & dev->if_capenable && 1957 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 1958 dev->if_capenable &= ~IFCAP_TSO6; 1959 dev->if_hwassist &= ~CSUM_IP6_TSO; 1960 if_printf(dev, 1961 "tso6 disabled due to -txcsum6.\n"); 1962 } 1963 } 1964 if (mask & IFCAP_RXCSUM) 1965 dev->if_capenable ^= IFCAP_RXCSUM; 1966 if (mask & IFCAP_RXCSUM_IPV6) 1967 dev->if_capenable ^= IFCAP_RXCSUM_IPV6; 1968 1969 if (mask & IFCAP_TSO4) { 1970 if (!(IFCAP_TSO4 & dev->if_capenable) && 1971 !(IFCAP_TXCSUM & dev->if_capenable)) { 1972 if_printf(dev, "enable txcsum first.\n"); 1973 error = EAGAIN; 1974 goto out; 1975 } 1976 dev->if_capenable ^= IFCAP_TSO4; 1977 dev->if_hwassist ^= CSUM_IP_TSO; 1978 } 1979 if (mask & IFCAP_TSO6) { 1980 if (!(IFCAP_TSO6 & dev->if_capenable) && 1981 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 1982 if_printf(dev, "enable txcsum6 first.\n"); 1983 error = EAGAIN; 1984 goto out; 1985 } 1986 dev->if_capenable ^= IFCAP_TSO6; 1987 dev->if_hwassist ^= CSUM_IP6_TSO; 1988 } 1989 if (mask & IFCAP_LRO) 1990 dev->if_capenable ^= IFCAP_LRO; 1991 if (mask & IFCAP_VLAN_HWTAGGING) 1992 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1993 if (mask & IFCAP_VLAN_HWFILTER) 1994 dev->if_capenable ^= IFCAP_VLAN_HWFILTER; 1995 if (mask & IFCAP_WOL_MAGIC) 1996 dev->if_capenable ^= IFCAP_WOL_MAGIC; 1997 if (dev->if_drv_flags & IFF_DRV_RUNNING) 1998 mlx4_en_start_port(dev); 1999out: 2000 mutex_unlock(&mdev->state_lock); 2001 VLAN_CAPABILITIES(dev); 2002 break; 2003#if __FreeBSD_version >= 1100036 2004 case SIOCGI2C: { 2005 struct ifi2creq i2c; 2006 2007 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 2008 if (error) 2009 break; 2010 if (i2c.len > sizeof(i2c.data)) { 2011 error = EINVAL; 2012 break; 2013 } 2014 /* 2015 * Note that we ignore i2c.addr here. The driver hardcodes 2016 * the address to 0x50, while standard expects it to be 0xA0. 2017 */ 2018 error = mlx4_get_module_info(mdev->dev, priv->port, 2019 i2c.offset, i2c.len, i2c.data); 2020 if (error < 0) { 2021 error = -error; 2022 break; 2023 } 2024 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 2025 break; 2026 } 2027#endif 2028 default: 2029 error = ether_ioctl(dev, command, data); 2030 break; 2031 } 2032 2033 return (error); 2034} 2035 2036 2037int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2038 struct mlx4_en_port_profile *prof) 2039{ 2040 struct net_device *dev; 2041 struct mlx4_en_priv *priv; 2042 uint8_t dev_addr[ETHER_ADDR_LEN]; 2043 int err; 2044 int i; 2045 2046 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2047 dev = priv->dev = if_alloc(IFT_ETHER); 2048 if (dev == NULL) { 2049 en_err(priv, "Net device allocation failed\n"); 2050 kfree(priv); 2051 return -ENOMEM; 2052 } 2053 dev->if_softc = priv; 2054 if_initname(dev, "mlxen", (device_get_unit( 2055 mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1); 2056 dev->if_mtu = ETHERMTU; 2057 dev->if_init = mlx4_en_open; 2058 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2059 dev->if_ioctl = mlx4_en_ioctl; 2060 dev->if_transmit = mlx4_en_transmit; 2061 dev->if_qflush = mlx4_en_qflush; 2062 dev->if_snd.ifq_maxlen = prof->tx_ring_size; 2063 2064 /* 2065 * Initialize driver private data 2066 */ 2067 priv->counter_index = 0xff; 2068 spin_lock_init(&priv->stats_lock); 2069 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2070 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2071 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2072 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2073 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2074 callout_init(&priv->watchdog_timer, 1); 2075#ifdef CONFIG_RFS_ACCEL 2076 INIT_LIST_HEAD(&priv->filters); 2077 spin_lock_init(&priv->filters_lock); 2078#endif 2079 2080 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2081 priv->dev = dev; 2082 priv->mdev = mdev; 2083 priv->ddev = &mdev->pdev->dev; 2084 priv->prof = prof; 2085 priv->port = port; 2086 priv->port_up = false; 2087 priv->flags = prof->flags; 2088 2089 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2090 priv->tx_ring_num = prof->tx_ring_num; 2091 priv->tx_ring = kcalloc(MAX_TX_RINGS, 2092 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL); 2093 if (!priv->tx_ring) { 2094 err = -ENOMEM; 2095 goto out; 2096 } 2097 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS, 2098 GFP_KERNEL); 2099 if (!priv->tx_cq) { 2100 err = -ENOMEM; 2101 goto out; 2102 } 2103 2104 priv->rx_ring_num = prof->rx_ring_num; 2105 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2106 priv->mac_index = -1; 2107 priv->last_ifq_jiffies = 0; 2108 priv->if_counters_rx_errors = 0; 2109 priv->if_counters_rx_no_buffer = 0; 2110#ifdef CONFIG_MLX4_EN_DCB 2111 if (!mlx4_is_slave(priv->mdev->dev)) { 2112 priv->dcbx_cap = DCB_CAP_DCBX_HOST; 2113 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 2114 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2115 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2116 } else { 2117 en_info(priv, "QoS disabled - no HW support\n"); 2118 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2119 } 2120 } 2121#endif 2122 2123 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 2124 INIT_HLIST_HEAD(&priv->mac_hash[i]); 2125 2126 /* Query for default mac and max mtu */ 2127 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2128 priv->mac = mdev->dev->caps.def_mac[priv->port]; 2129 if (ILLEGAL_MAC(priv->mac)) { 2130#if BITS_PER_LONG == 64 2131 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n", 2132 priv->port, priv->mac); 2133#elif BITS_PER_LONG == 32 2134 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 2135 priv->port, priv->mac); 2136#endif 2137 err = -EINVAL; 2138 goto out; 2139 } 2140 2141 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2142 DS_SIZE); 2143 2144 mlx4_en_sysctl_conf(priv); 2145 2146 err = mlx4_en_alloc_resources(priv); 2147 if (err) 2148 goto out; 2149 2150 /* Allocate page for receive rings */ 2151 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2152 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2153 if (err) { 2154 en_err(priv, "Failed to allocate page for rx qps\n"); 2155 goto out; 2156 } 2157 priv->allocated = 1; 2158 2159 /* 2160 * Set driver features 2161 */ 2162 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 2163 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 2164 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 2165 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 2166 dev->if_capabilities |= IFCAP_LRO; 2167 dev->if_capabilities |= IFCAP_HWSTATS; 2168 2169 if (mdev->LSO_support) 2170 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO; 2171 2172 /* set TSO limits so that we don't have to drop TX packets */ 2173 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */; 2174 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */; 2175 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE; 2176 2177 dev->if_capenable = dev->if_capabilities; 2178 2179 dev->if_hwassist = 0; 2180 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) 2181 dev->if_hwassist |= CSUM_TSO; 2182 if (dev->if_capenable & IFCAP_TXCSUM) 2183 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2184 if (dev->if_capenable & IFCAP_TXCSUM_IPV6) 2185 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2186 2187 2188 /* Register for VLAN events */ 2189 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 2190 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 2191 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 2192 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 2193 2194 mdev->pndev[priv->port] = dev; 2195 2196 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN; 2197 mlx4_en_set_default_moderation(priv); 2198 2199 /* Set default MAC */ 2200 for (i = 0; i < ETHER_ADDR_LEN; i++) 2201 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 2202 2203 2204 ether_ifattach(dev, dev_addr); 2205 if_link_state_change(dev, LINK_STATE_DOWN); 2206 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 2207 mlx4_en_media_change, mlx4_en_media_status); 2208 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL); 2209 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL); 2210 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL); 2211 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL); 2212 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2213 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 2214 2215 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2216 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2217 2218 priv->registered = 1; 2219 2220 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2221 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2222 2223 2224 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 2225 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 2226 priv->rx_mb_size, 2227 prof->tx_pause, prof->tx_ppp, 2228 prof->rx_pause, prof->rx_ppp); 2229 if (err) { 2230 en_err(priv, "Failed setting port general configurations " 2231 "for port %d, with error %d\n", priv->port, err); 2232 goto out; 2233 } 2234 2235 /* Init port */ 2236 en_warn(priv, "Initializing port\n"); 2237 err = mlx4_INIT_PORT(mdev->dev, priv->port); 2238 if (err) { 2239 en_err(priv, "Failed Initializing port\n"); 2240 goto out; 2241 } 2242 2243 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2244 2245 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2246 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); 2247 2248 return 0; 2249 2250out: 2251 mlx4_en_destroy_netdev(dev); 2252 return err; 2253} 2254 2255static int mlx4_en_set_ring_size(struct net_device *dev, 2256 int rx_size, int tx_size) 2257{ 2258 struct mlx4_en_priv *priv = netdev_priv(dev); 2259 struct mlx4_en_dev *mdev = priv->mdev; 2260 int port_up = 0; 2261 int err = 0; 2262 2263 rx_size = roundup_pow_of_two(rx_size); 2264 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 2265 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 2266 tx_size = roundup_pow_of_two(tx_size); 2267 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 2268 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 2269 2270 if (rx_size == (priv->port_up ? 2271 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && 2272 tx_size == priv->tx_ring[0]->size) 2273 return 0; 2274 mutex_lock(&mdev->state_lock); 2275 if (priv->port_up) { 2276 port_up = 1; 2277 mlx4_en_stop_port(dev); 2278 } 2279 mlx4_en_free_resources(priv); 2280 priv->prof->tx_ring_size = tx_size; 2281 priv->prof->rx_ring_size = rx_size; 2282 err = mlx4_en_alloc_resources(priv); 2283 if (err) { 2284 en_err(priv, "Failed reallocating port resources\n"); 2285 goto out; 2286 } 2287 if (port_up) { 2288 err = mlx4_en_start_port(dev); 2289 if (err) 2290 en_err(priv, "Failed starting port\n"); 2291 } 2292out: 2293 mutex_unlock(&mdev->state_lock); 2294 return err; 2295} 2296static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS) 2297{ 2298 struct mlx4_en_priv *priv; 2299 int size; 2300 int error; 2301 2302 priv = arg1; 2303 size = priv->prof->rx_ring_size; 2304 error = sysctl_handle_int(oidp, &size, 0, req); 2305 if (error || !req->newptr) 2306 return (error); 2307 error = -mlx4_en_set_ring_size(priv->dev, size, 2308 priv->prof->tx_ring_size); 2309 return (error); 2310} 2311 2312static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS) 2313{ 2314 struct mlx4_en_priv *priv; 2315 int size; 2316 int error; 2317 2318 priv = arg1; 2319 size = priv->prof->tx_ring_size; 2320 error = sysctl_handle_int(oidp, &size, 0, req); 2321 if (error || !req->newptr) 2322 return (error); 2323 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size, 2324 size); 2325 2326 return (error); 2327} 2328 2329static int mlx4_en_get_module_info(struct net_device *dev, 2330 struct ethtool_modinfo *modinfo) 2331{ 2332 struct mlx4_en_priv *priv = netdev_priv(dev); 2333 struct mlx4_en_dev *mdev = priv->mdev; 2334 int ret; 2335 u8 data[4]; 2336 2337 /* Read first 2 bytes to get Module & REV ID */ 2338 ret = mlx4_get_module_info(mdev->dev, priv->port, 2339 0/*offset*/, 2/*size*/, data); 2340 2341 if (ret < 2) { 2342 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret); 2343 return -EIO; 2344 } 2345 2346 switch (data[0] /* identifier */) { 2347 case MLX4_MODULE_ID_QSFP: 2348 modinfo->type = ETH_MODULE_SFF_8436; 2349 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2350 break; 2351 case MLX4_MODULE_ID_QSFP_PLUS: 2352 if (data[1] >= 0x3) { /* revision id */ 2353 modinfo->type = ETH_MODULE_SFF_8636; 2354 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2355 } else { 2356 modinfo->type = ETH_MODULE_SFF_8436; 2357 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2358 } 2359 break; 2360 case MLX4_MODULE_ID_QSFP28: 2361 modinfo->type = ETH_MODULE_SFF_8636; 2362 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2363 break; 2364 case MLX4_MODULE_ID_SFP: 2365 modinfo->type = ETH_MODULE_SFF_8472; 2366 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2367 break; 2368 default: 2369 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n"); 2370 return -EINVAL; 2371 } 2372 2373 return 0; 2374} 2375 2376static int mlx4_en_get_module_eeprom(struct net_device *dev, 2377 struct ethtool_eeprom *ee, 2378 u8 *data) 2379{ 2380 struct mlx4_en_priv *priv = netdev_priv(dev); 2381 struct mlx4_en_dev *mdev = priv->mdev; 2382 int offset = ee->offset; 2383 int i = 0, ret; 2384 2385 if (ee->len == 0) 2386 return -EINVAL; 2387 2388 memset(data, 0, ee->len); 2389 2390 while (i < ee->len) { 2391 en_dbg(DRV, priv, 2392 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 2393 i, offset, ee->len - i); 2394 2395 ret = mlx4_get_module_info(mdev->dev, priv->port, 2396 offset, ee->len - i, data + i); 2397 2398 if (!ret) /* Done reading */ 2399 return 0; 2400 2401 if (ret < 0) { 2402 en_err(priv, 2403 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 2404 i, offset, ee->len - i, ret); 2405 return -1; 2406 } 2407 2408 i += ret; 2409 offset += ret; 2410 } 2411 return 0; 2412} 2413 2414static void mlx4_en_print_eeprom(u8 *data, __u32 len) 2415{ 2416 int i; 2417 int j = 0; 2418 int row = 0; 2419 const int NUM_OF_BYTES = 16; 2420 2421 printf("\nOffset\t\tValues\n"); 2422 printf("------\t\t------\n"); 2423 while(row < len){ 2424 printf("0x%04x\t\t",row); 2425 for(i=0; i < NUM_OF_BYTES; i++){ 2426 printf("%02x ", data[j]); 2427 row++; 2428 j++; 2429 } 2430 printf("\n"); 2431 } 2432} 2433 2434/* Read cable EEPROM module information by first inspecting the first 2435 * two bytes to get the length and then read the rest of the information. 2436 * The information is printed to dmesg. */ 2437static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS) 2438{ 2439 2440 u8* data; 2441 int error; 2442 int result = 0; 2443 struct mlx4_en_priv *priv; 2444 struct net_device *dev; 2445 struct ethtool_modinfo modinfo; 2446 struct ethtool_eeprom ee; 2447 2448 error = sysctl_handle_int(oidp, &result, 0, req); 2449 if (error || !req->newptr) 2450 return (error); 2451 2452 if (result == 1) { 2453 priv = arg1; 2454 dev = priv->dev; 2455 data = kmalloc(PAGE_SIZE, GFP_KERNEL); 2456 2457 error = mlx4_en_get_module_info(dev, &modinfo); 2458 if (error) { 2459 en_err(priv, 2460 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n", 2461 -error); 2462 goto out; 2463 } 2464 2465 ee.len = modinfo.eeprom_len; 2466 ee.offset = 0; 2467 2468 error = mlx4_en_get_module_eeprom(dev, &ee, data); 2469 if (error) { 2470 en_err(priv, 2471 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n", 2472 -error); 2473 /* Continue printing partial information in case of an error */ 2474 } 2475 2476 /* EEPROM information will be printed in dmesg */ 2477 mlx4_en_print_eeprom(data, ee.len); 2478out: 2479 kfree(data); 2480 } 2481 /* Return zero to prevent sysctl failure. */ 2482 return (0); 2483} 2484 2485static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS) 2486{ 2487 struct mlx4_en_priv *priv; 2488 int ppp; 2489 int error; 2490 2491 priv = arg1; 2492 ppp = priv->prof->tx_ppp; 2493 error = sysctl_handle_int(oidp, &ppp, 0, req); 2494 if (error || !req->newptr) 2495 return (error); 2496 if (ppp > 0xff || ppp < 0) 2497 return (-EINVAL); 2498 priv->prof->tx_ppp = ppp; 2499 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2500 priv->rx_mb_size + ETHER_CRC_LEN, 2501 priv->prof->tx_pause, 2502 priv->prof->tx_ppp, 2503 priv->prof->rx_pause, 2504 priv->prof->rx_ppp); 2505 2506 return (error); 2507} 2508 2509static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS) 2510{ 2511 struct mlx4_en_priv *priv; 2512 struct mlx4_en_dev *mdev; 2513 int ppp; 2514 int error; 2515 int port_up; 2516 2517 port_up = 0; 2518 priv = arg1; 2519 mdev = priv->mdev; 2520 ppp = priv->prof->rx_ppp; 2521 error = sysctl_handle_int(oidp, &ppp, 0, req); 2522 if (error || !req->newptr) 2523 return (error); 2524 if (ppp > 0xff || ppp < 0) 2525 return (-EINVAL); 2526 /* See if we have to change the number of tx queues. */ 2527 if (!ppp != !priv->prof->rx_ppp) { 2528 mutex_lock(&mdev->state_lock); 2529 if (priv->port_up) { 2530 port_up = 1; 2531 mlx4_en_stop_port(priv->dev); 2532 } 2533 mlx4_en_free_resources(priv); 2534 priv->prof->rx_ppp = ppp; 2535 error = -mlx4_en_alloc_resources(priv); 2536 if (error) 2537 en_err(priv, "Failed reallocating port resources\n"); 2538 if (error == 0 && port_up) { 2539 error = -mlx4_en_start_port(priv->dev); 2540 if (error) 2541 en_err(priv, "Failed starting port\n"); 2542 } 2543 mutex_unlock(&mdev->state_lock); 2544 return (error); 2545 2546 } 2547 priv->prof->rx_ppp = ppp; 2548 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2549 priv->rx_mb_size + ETHER_CRC_LEN, 2550 priv->prof->tx_pause, 2551 priv->prof->tx_ppp, 2552 priv->prof->rx_pause, 2553 priv->prof->rx_ppp); 2554 2555 return (error); 2556} 2557 2558static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv) 2559{ 2560 struct net_device *dev; 2561 struct sysctl_ctx_list *ctx; 2562 struct sysctl_oid *node; 2563 struct sysctl_oid_list *node_list; 2564 struct sysctl_oid *coal; 2565 struct sysctl_oid_list *coal_list; 2566 const char *pnameunit; 2567 2568 dev = priv->dev; 2569 ctx = &priv->conf_ctx; 2570 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev); 2571 2572 sysctl_ctx_init(ctx); 2573 priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 2574 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet"); 2575 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2576 "conf", CTLFLAG_RD, NULL, "Configuration"); 2577 node_list = SYSCTL_CHILDREN(node); 2578 2579 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable", 2580 CTLFLAG_RW, &priv->msg_enable, 0, 2581 "Driver message enable bitfield"); 2582 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings", 2583 CTLFLAG_RD, &priv->rx_ring_num, 0, 2584 "Number of receive rings"); 2585 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings", 2586 CTLFLAG_RD, &priv->tx_ring_num, 0, 2587 "Number of transmit rings"); 2588 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size", 2589 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2590 mlx4_en_set_rx_ring_size, "I", "Receive ring size"); 2591 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size", 2592 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2593 mlx4_en_set_tx_ring_size, "I", "Transmit ring size"); 2594 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp", 2595 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2596 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause"); 2597 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp", 2598 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2599 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause"); 2600 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num", 2601 CTLFLAG_RD, &priv->port, 0, 2602 "Port Number"); 2603 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name", 2604 CTLFLAG_RD, __DECONST(void *, pnameunit), 0, 2605 "PCI device name"); 2606 2607 /* Add coalescer configuration. */ 2608 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, 2609 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration"); 2610 coal_list = SYSCTL_CHILDREN(coal); 2611 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low", 2612 CTLFLAG_RW, &priv->pkt_rate_low, 0, 2613 "Packets per-second for minimum delay"); 2614 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low", 2615 CTLFLAG_RW, &priv->rx_usecs_low, 0, 2616 "Minimum RX delay in micro-seconds"); 2617 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high", 2618 CTLFLAG_RW, &priv->pkt_rate_high, 0, 2619 "Packets per-second for maximum delay"); 2620 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high", 2621 CTLFLAG_RW, &priv->rx_usecs_high, 0, 2622 "Maximum RX delay in micro-seconds"); 2623 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval", 2624 CTLFLAG_RW, &priv->sample_interval, 0, 2625 "adaptive frequency in units of HZ ticks"); 2626 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal", 2627 CTLFLAG_RW, &priv->adaptive_rx_coal, 0, 2628 "Enable adaptive rx coalescing"); 2629 /* EEPROM support */ 2630 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info", 2631 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2632 mlx4_en_read_eeprom, "I", "EEPROM information"); 2633} 2634 2635static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv) 2636{ 2637 struct sysctl_ctx_list *ctx; 2638 struct sysctl_oid_list *node_list; 2639 struct sysctl_oid *ring_node; 2640 struct sysctl_oid_list *ring_list; 2641 struct mlx4_en_tx_ring *tx_ring; 2642 struct mlx4_en_rx_ring *rx_ring; 2643 char namebuf[128]; 2644 int i; 2645 2646 ctx = &priv->stat_ctx; 2647 sysctl_ctx_init(ctx); 2648 priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2649 "stat", CTLFLAG_RD, NULL, "Statistics"); 2650 node_list = SYSCTL_CHILDREN(priv->stat_sysctl); 2651 2652#ifdef MLX4_EN_PERF_STAT 2653 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD, 2654 &priv->pstats.tx_poll, "TX Poll calls"); 2655 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD, 2656 &priv->pstats.tx_pktsz_avg, "TX average packet size"); 2657 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD, 2658 &priv->pstats.inflight_avg, "TX average packets in-flight"); 2659 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD, 2660 &priv->pstats.tx_coal_avg, "TX average coalesced completions"); 2661 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD, 2662 &priv->pstats.rx_coal_avg, "RX average coalesced completions"); 2663#endif 2664 2665 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD, 2666 &priv->port_stats.tso_packets, "TSO packets sent"); 2667 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD, 2668 &priv->port_stats.queue_stopped, "Queue full"); 2669 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD, 2670 &priv->port_stats.wake_queue, "Queue resumed after full"); 2671 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD, 2672 &priv->port_stats.tx_timeout, "Transmit timeouts"); 2673 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD, 2674 &priv->port_stats.oversized_packets, "TX oversized packets, m_defrag failed"); 2675 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD, 2676 &priv->port_stats.rx_alloc_failed, "RX failed to allocate mbuf"); 2677 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD, 2678 &priv->port_stats.rx_chksum_good, "RX checksum offload success"); 2679 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD, 2680 &priv->port_stats.rx_chksum_none, "RX without checksum offload"); 2681 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_chksum_offload", 2682 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 2683 "TX checksum offloads"); 2684 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "defrag_attempts", CTLFLAG_RD, 2685 &priv->port_stats.defrag_attempts, "Oversized chains defragged"); 2686 2687 /* Could strdup the names and add in a loop. This is simpler. */ 2688 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, 2689 &priv->pkstats.rx_bytes, "RX Bytes"); 2690 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD, 2691 &priv->pkstats.rx_packets, "RX packets"); 2692 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD, 2693 &priv->pkstats.rx_multicast_packets, "RX Multicast Packets"); 2694 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD, 2695 &priv->pkstats.rx_broadcast_packets, "RX Broadcast Packets"); 2696 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD, 2697 &priv->pkstats.rx_errors, "RX Errors"); 2698 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD, 2699 &priv->pkstats.rx_dropped, "RX Dropped"); 2700 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD, 2701 &priv->pkstats.rx_length_errors, "RX Length Errors"); 2702 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD, 2703 &priv->pkstats.rx_over_errors, "RX Over Errors"); 2704 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, 2705 &priv->pkstats.rx_crc_errors, "RX CRC Errors"); 2706 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD, 2707 &priv->pkstats.rx_jabbers, "RX Jabbers"); 2708 2709 2710 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD, 2711 &priv->pkstats.rx_in_range_length_error, "RX IN_Range Length Error"); 2712 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_out_range_length_error", 2713 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 2714 "RX Out Range Length Error"); 2715 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD, 2716 &priv->pkstats.rx_lt_64_bytes_packets, "RX Lt 64 Bytes Packets"); 2717 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD, 2718 &priv->pkstats.rx_127_bytes_packets, "RX 127 bytes Packets"); 2719 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD, 2720 &priv->pkstats.rx_255_bytes_packets, "RX 255 bytes Packets"); 2721 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD, 2722 &priv->pkstats.rx_511_bytes_packets, "RX 511 bytes Packets"); 2723 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD, 2724 &priv->pkstats.rx_1023_bytes_packets, "RX 1023 bytes Packets"); 2725 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD, 2726 &priv->pkstats.rx_1518_bytes_packets, "RX 1518 bytes Packets"); 2727 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD, 2728 &priv->pkstats.rx_1522_bytes_packets, "RX 1522 bytes Packets"); 2729 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD, 2730 &priv->pkstats.rx_1548_bytes_packets, "RX 1548 bytes Packets"); 2731 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD, 2732 &priv->pkstats.rx_gt_1548_bytes_packets, 2733 "RX Greater Then 1548 bytes Packets"); 2734 2735 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD, 2736 &priv->pkstats.tx_packets, "TX packets"); 2737 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, 2738 &priv->pkstats.tx_bytes, "TX Bytes"); 2739 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD, 2740 &priv->pkstats.tx_multicast_packets, "TX Multicast Packets"); 2741 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD, 2742 &priv->pkstats.tx_broadcast_packets, "TX Broadcast Packets"); 2743 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD, 2744 &priv->pkstats.tx_errors, "TX Errors"); 2745 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD, 2746 &priv->pkstats.tx_dropped, "TX Dropped"); 2747 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD, 2748 &priv->pkstats.tx_lt_64_bytes_packets, "TX Less Then 64 Bytes Packets"); 2749 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD, 2750 &priv->pkstats.tx_127_bytes_packets, "TX 127 Bytes Packets"); 2751 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD, 2752 &priv->pkstats.tx_255_bytes_packets, "TX 255 Bytes Packets"); 2753 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD, 2754 &priv->pkstats.tx_511_bytes_packets, "TX 511 Bytes Packets"); 2755 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD, 2756 &priv->pkstats.tx_1023_bytes_packets, "TX 1023 Bytes Packets"); 2757 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD, 2758 &priv->pkstats.tx_1518_bytes_packets, "TX 1518 Bytes Packets"); 2759 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD, 2760 &priv->pkstats.tx_1522_bytes_packets, "TX 1522 Bytes Packets"); 2761 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD, 2762 &priv->pkstats.tx_1548_bytes_packets, "TX 1548 Bytes Packets"); 2763 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD, 2764 &priv->pkstats.tx_gt_1548_bytes_packets, 2765 "TX Greater Then 1548 Bytes Packets"); 2766 2767 2768 2769 for (i = 0; i < priv->tx_ring_num; i++) { 2770 tx_ring = priv->tx_ring[i]; 2771 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i); 2772 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2773 CTLFLAG_RD, NULL, "TX Ring"); 2774 ring_list = SYSCTL_CHILDREN(ring_node); 2775 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets", 2776 CTLFLAG_RD, &tx_ring->packets, "TX packets"); 2777 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes", 2778 CTLFLAG_RD, &tx_ring->bytes, "TX bytes"); 2779 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "tso_packets", 2780 CTLFLAG_RD, &tx_ring->tso_packets, "TSO packets"); 2781 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "defrag_attempts", 2782 CTLFLAG_RD, &tx_ring->defrag_attempts, "Oversized chains defragged"); 2783 } 2784 2785 for (i = 0; i < priv->rx_ring_num; i++) { 2786 rx_ring = priv->rx_ring[i]; 2787 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i); 2788 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2789 CTLFLAG_RD, NULL, "RX Ring"); 2790 ring_list = SYSCTL_CHILDREN(ring_node); 2791 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets", 2792 CTLFLAG_RD, &rx_ring->packets, "RX packets"); 2793 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes", 2794 CTLFLAG_RD, &rx_ring->bytes, "RX bytes"); 2795 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "error", 2796 CTLFLAG_RD, &rx_ring->errors, "RX soft errors"); 2797 } 2798} 2799