1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7#include "gve.h"
8#include "gve_adminq.h"
9#include "gve_utils.h"
10
11bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx)
12{
13	struct gve_notify_block *block =
14			&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
15
16	return block->tx != NULL;
17}
18
19void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
20{
21	struct gve_notify_block *block =
22			&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
23
24	block->tx = NULL;
25}
26
27void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
28{
29	unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
30					 num_online_cpus());
31	int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
32	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
33	struct gve_tx_ring *tx = &priv->tx[queue_idx];
34
35	block->tx = tx;
36	tx->ntfy_id = ntfy_idx;
37	netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
38			    queue_idx);
39}
40
41bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx)
42{
43	struct gve_notify_block *block =
44			&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
45
46	return block->rx != NULL;
47}
48
49void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
50{
51	struct gve_notify_block *block =
52			&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
53
54	block->rx = NULL;
55}
56
57void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
58{
59	u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
60	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
61	struct gve_rx_ring *rx = &priv->rx[queue_idx];
62
63	block->rx = rx;
64	rx->ntfy_id = ntfy_idx;
65}
66
67struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
68				 u8 *data, u16 len)
69{
70	struct sk_buff *skb;
71
72	skb = napi_alloc_skb(napi, len);
73	if (unlikely(!skb))
74		return NULL;
75
76	__skb_put(skb, len);
77	skb_copy_to_linear_data_offset(skb, 0, data, len);
78	skb->protocol = eth_type_trans(skb, dev);
79
80	return skb;
81}
82
83struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
84			    struct gve_rx_slot_page_info *page_info, u16 len)
85{
86	void *va = page_info->page_address + page_info->page_offset +
87		page_info->pad;
88
89	return gve_rx_copy_data(dev, napi, va, len);
90}
91
92void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
93{
94	page_info->pagecnt_bias--;
95	if (page_info->pagecnt_bias == 0) {
96		int pagecount = page_count(page_info->page);
97
98		/* If we have run out of bias - set it back up to INT_MAX
99		 * minus the existing refs.
100		 */
101		page_info->pagecnt_bias = INT_MAX - pagecount;
102
103		/* Set pagecount back up to max. */
104		page_ref_add(page_info->page, INT_MAX - pagecount);
105	}
106}
107
108void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
109		  int (*gve_poll)(struct napi_struct *, int))
110{
111	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
112
113	netif_napi_add(priv->dev, &block->napi, gve_poll);
114}
115
116void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
117{
118	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
119
120	netif_napi_del(&block->napi);
121}
122