Lines Matching refs:sb_dev

2606 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2810 if (txq->sb_dev)
2811 netdev_unbind_sb_channel(dev, txq->sb_dev);
2859 struct net_device *sb_dev)
2864 netif_reset_xps_queues_gt(sb_dev, 0);
2866 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2867 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2870 if (txq->sb_dev == sb_dev)
2871 txq->sb_dev = NULL;
2877 struct net_device *sb_dev,
2880 /* Make certain the sb_dev and dev are already configured */
2881 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2889 sb_dev->tc_to_txq[tc].count = count;
2890 sb_dev->tc_to_txq[tc].offset = offset;
2896 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
3240 const struct net_device *sb_dev,
3250 qoffset = sb_dev->tc_to_txq[tc].offset;
3251 qcount = sb_dev->tc_to_txq[tc].count;
3254 sb_dev->name, qoffset, tc);
4169 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4184 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4195 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4212 struct net_device *sb_dev)
4219 struct net_device *sb_dev)
4226 struct net_device *sb_dev)
4231 sb_dev = sb_dev ? : dev;
4235 int new_index = get_xps_queue(dev, sb_dev, skb);
4238 new_index = skb_tx_hash(dev, sb_dev, skb);
4254 struct net_device *sb_dev)
4269 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4271 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4283 * @sb_dev: suboordinate device used for L2 forwarding offload
4301 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4353 txq = netdev_core_pick_tx(dev, skb, sb_dev);