Lines Matching defs:net_dev

203 static int dpaa_netdev_init(struct net_device *net_dev,
207 struct dpaa_priv *priv = netdev_priv(net_dev);
208 struct device *dev = net_dev->dev.parent;
219 percpu_priv->net_dev = net_dev;
222 net_dev->netdev_ops = dpaa_ops;
225 net_dev->mem_start = (unsigned long)priv->mac_dev->res->start;
226 net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;
228 net_dev->min_mtu = ETH_MIN_MTU;
229 net_dev->max_mtu = dpaa_get_max_mtu();
231 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
234 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
238 net_dev->features |= NETIF_F_GSO;
239 net_dev->features |= NETIF_F_RXCSUM;
241 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
243 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
245 net_dev->features |= net_dev->hw_features;
246 net_dev->vlan_features = net_dev->features;
248 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
253 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
254 eth_hw_addr_set(net_dev, mac_addr);
256 eth_hw_addr_random(net_dev);
258 (const enet_addr_t *)net_dev->dev_addr);
264 net_dev->dev_addr);
267 net_dev->ethtool_ops = &dpaa_ethtool_ops;
269 net_dev->needed_headroom = priv->tx_headroom;
270 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
273 mac_dev->phylink_config.dev = &net_dev->dev;
287 netif_carrier_off(net_dev);
289 err = register_netdev(net_dev);
299 static int dpaa_stop(struct net_device *net_dev)
306 priv = netdev_priv(net_dev);
309 netif_tx_stop_all_queues(net_dev);
325 net_dev->phydev = NULL;
332 static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
337 priv = netdev_priv(net_dev);
340 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
341 jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
349 static void dpaa_get_stats64(struct net_device *net_dev,
353 struct dpaa_priv *priv = netdev_priv(net_dev);
370 static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
373 struct dpaa_priv *priv = netdev_priv(net_dev);
388 netdev_reset_tc(net_dev);
393 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
398 netdev_set_num_tc(net_dev, num_tc);
401 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
406 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
431 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
438 priv = netdev_priv(net_dev);
440 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
442 err = eth_mac_addr(net_dev, addr);
444 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
451 (const enet_addr_t *)net_dev->dev_addr);
453 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
456 eth_mac_addr(net_dev, &old_addr);
464 static void dpaa_set_rx_mode(struct net_device *net_dev)
469 priv = netdev_priv(net_dev);
471 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
476 netif_err(priv, drv, net_dev,
481 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
486 netif_err(priv, drv, net_dev,
491 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
493 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
821 netif_tx_stop_all_queues(priv->net_dev);
826 netif_tx_wake_all_queues(priv->net_dev);
884 struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev);
885 struct dpaa_priv *priv = netdev_priv(net_dev);
904 netdev_err(net_dev, "could not update speed: %d\n", err);
912 fq->net_dev = priv->net_dev;
924 fq->net_dev = priv->net_dev;
947 dev_err(priv->net_dev->dev.parent,
984 dev_warn(priv->net_dev->dev.parent,
1024 priv = netdev_priv(dpaa_fq->net_dev);
1025 dev = dpaa_fq->net_dev->dev.parent;
1156 err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev,
1185 priv = netdev_priv(dpaa_fq->net_dev);
1383 static void dpaa_fd_release(const struct net_device *net_dev,
1412 netdev_err(net_dev, "DMA mapping failed\n");
1509 netif_alert(priv, tx_err, priv->net_dev,
1526 netif_alert(priv, tx_err, priv->net_dev,
1552 struct net_device *net_dev = dpaa_bp->priv->net_dev;
1561 netdev_err(net_dev, "dev_alloc_pages() failed\n");
1569 netdev_err(net_dev, "DMA map failed\n");
1672 struct device *dev = priv->net_dev->dev.parent;
1748 if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1930 struct net_device *net_dev = priv->net_dev;
1956 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1970 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1983 struct net_device *net_dev = priv->net_dev;
1996 netdev_err(net_dev, "dev_alloc_pages() failed\n");
2010 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
2024 netdev_err(priv->net_dev, "DMA mapping failed\n");
2038 netdev_err(priv->net_dev, "DMA mapping failed\n");
2064 netdev_err(priv->net_dev, "DMA mapping failed\n");
2100 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2120 static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
2122 struct dpaa_priv *priv = netdev_priv(net_dev);
2155 new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
2277 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2289 priv = netdev_priv(net_dev);
2324 if (dpaa_a050385_wa_skb(net_dev, &skb))
2341 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2362 static void dpaa_rx_error(struct net_device *net_dev,
2369 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2383 dpaa_fd_release(net_dev, fd);
2386 static void dpaa_tx_error(struct net_device *net_dev,
2395 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2427 static void dpaa_tx_conf(struct net_device *net_dev,
2437 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2473 struct net_device *net_dev;
2477 net_dev = dpaa_fq->net_dev;
2478 priv = netdev_priv(net_dev);
2489 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2494 static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
2497 struct dpaa_priv *priv = netdev_priv(net_dev);
2549 txq = netdev_get_tx_queue(net_dev, smp_processor_id());
2626 if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf))
2635 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
2637 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2642 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
2645 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2670 struct net_device *net_dev;
2686 net_dev = dpaa_fq->net_dev;
2687 priv = netdev_priv(net_dev);
2693 trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2708 dpaa_fd_release(net_dev, &dq->fd);
2714 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2718 dpaa_fd_release(net_dev, fd);
2747 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
2790 skb->protocol = eth_type_trans(skb, net_dev);
2821 struct net_device *net_dev;
2824 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2825 priv = netdev_priv(net_dev);
2832 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2843 struct net_device *net_dev;
2846 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2847 priv = netdev_priv(net_dev);
2850 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2857 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2869 struct net_device *net_dev;
2872 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2873 priv = netdev_priv(net_dev);
2918 static int dpaa_open(struct net_device *net_dev)
2924 priv = netdev_priv(net_dev);
2941 netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
2946 netif_tx_start_all_queues(net_dev);
2961 static int dpaa_eth_stop(struct net_device *net_dev)
2966 err = dpaa_stop(net_dev);
2968 priv = netdev_priv(net_dev);
2982 dev_warn(priv->net_dev->dev.parent,
2991 static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
2993 struct dpaa_priv *priv = netdev_priv(net_dev);
2998 WRITE_ONCE(net_dev->mtu, new_mtu);
3002 static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
3004 struct dpaa_priv *priv = netdev_priv(net_dev);
3010 if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) {
3015 up = netif_running(net_dev);
3018 dpaa_eth_stop(net_dev);
3025 err = dpaa_open(net_dev);
3035 static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
3039 return dpaa_setup_xdp(net_dev, xdp);
3045 static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
3054 if (!netif_running(net_dev))
3059 if (dpaa_xdp_xmit_frame(net_dev, xdpf))
3106 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
3109 struct dpaa_priv *priv = netdev_priv(net_dev);
3112 if (net_dev->phydev)
3118 return dpaa_ts_ioctl(net_dev, rq, cmd);
3140 static int dpaa_napi_add(struct net_device *net_dev)
3142 struct dpaa_priv *priv = netdev_priv(net_dev);
3149 netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
3155 static void dpaa_napi_del(struct net_device *net_dev)
3157 struct dpaa_priv *priv = netdev_priv(net_dev);
3282 struct net_device *net_dev = NULL;
3327 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
3328 if (!net_dev) {
3334 SET_NETDEV_DEV(net_dev, dev->parent);
3335 dev_set_drvdata(dev, net_dev);
3337 priv = netdev_priv(net_dev);
3338 priv->net_dev = net_dev;
3344 netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
3357 netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
3368 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
3370 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
3371 net_dev->mtu);
3465 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
3468 err = dpaa_napi_add(net_dev);
3472 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
3476 dpaa_eth_sysfs_init(&net_dev->dev);
3478 netif_info(priv, probe, net_dev, "Probed interface %s\n",
3479 net_dev->name);
3484 dpaa_napi_del(net_dev);
3496 free_netdev(net_dev);
3503 struct net_device *net_dev;
3509 net_dev = dev_get_drvdata(dev);
3511 priv = netdev_priv(net_dev);
3516 unregister_netdev(net_dev);
3529 dpaa_napi_del(net_dev);
3533 free_netdev(net_dev);