1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3
4#include <linux/bitfield.h>
5#include <linux/netdevice.h>
6#include <linux/skbuff.h>
7#include <linux/workqueue.h>
8#include <net/dst_metadata.h>
9
10#include "main.h"
11#include "../nfp_net.h"
12#include "../nfp_net_repr.h"
13#include "./cmsg.h"
14
15static struct nfp_flower_cmsg_hdr *
16nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
17{
18	return (struct nfp_flower_cmsg_hdr *)skb->data;
19}
20
21struct sk_buff *
22nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
23		      enum nfp_flower_cmsg_type_port type, gfp_t flag)
24{
25	struct nfp_flower_cmsg_hdr *ch;
26	struct sk_buff *skb;
27
28	size += NFP_FLOWER_CMSG_HLEN;
29
30	skb = nfp_app_ctrl_msg_alloc(app, size, flag);
31	if (!skb)
32		return NULL;
33
34	ch = nfp_flower_cmsg_get_hdr(skb);
35	ch->pad = 0;
36	ch->version = NFP_FLOWER_CMSG_VER1;
37	ch->type = type;
38	skb_put(skb, size);
39
40	return skb;
41}
42
43struct sk_buff *
44nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
45{
46	struct nfp_flower_cmsg_mac_repr *msg;
47	struct sk_buff *skb;
48
49	skb = nfp_flower_cmsg_alloc(app, struct_size(msg, ports, num_ports),
50				    NFP_FLOWER_CMSG_TYPE_MAC_REPR, GFP_KERNEL);
51	if (!skb)
52		return NULL;
53
54	msg = nfp_flower_cmsg_get_data(skb);
55	memset(msg->reserved, 0, sizeof(msg->reserved));
56	msg->num_ports = num_ports;
57
58	return skb;
59}
60
61void
62nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
63			     unsigned int nbi, unsigned int nbi_port,
64			     unsigned int phys_port)
65{
66	struct nfp_flower_cmsg_mac_repr *msg;
67
68	msg = nfp_flower_cmsg_get_data(skb);
69	msg->ports[idx].idx = idx;
70	msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI;
71	msg->ports[idx].nbi_port = nbi_port;
72	msg->ports[idx].phys_port = phys_port;
73}
74
75int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok,
76			    unsigned int mtu, bool mtu_only)
77{
78	struct nfp_flower_cmsg_portmod *msg;
79	struct sk_buff *skb;
80
81	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
82				    NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
83	if (!skb)
84		return -ENOMEM;
85
86	msg = nfp_flower_cmsg_get_data(skb);
87	msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
88	msg->reserved = 0;
89	msg->info = carrier_ok;
90
91	if (mtu_only)
92		msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY;
93
94	msg->mtu = cpu_to_be16(mtu);
95
96	nfp_ctrl_tx(repr->app->ctrl, skb);
97
98	return 0;
99}
100
101int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
102{
103	struct nfp_flower_cmsg_portreify *msg;
104	struct sk_buff *skb;
105
106	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
107				    NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
108				    GFP_KERNEL);
109	if (!skb)
110		return -ENOMEM;
111
112	msg = nfp_flower_cmsg_get_data(skb);
113	msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
114	msg->reserved = 0;
115	msg->info = cpu_to_be16(exists);
116
117	nfp_ctrl_tx(repr->app->ctrl, skb);
118
119	return 0;
120}
121
122static bool
123nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb)
124{
125	struct nfp_flower_priv *app_priv = app->priv;
126	struct nfp_flower_cmsg_portmod *msg;
127
128	msg = nfp_flower_cmsg_get_data(skb);
129
130	if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY))
131		return false;
132
133	spin_lock_bh(&app_priv->mtu_conf.lock);
134	if (!app_priv->mtu_conf.requested_val ||
135	    app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) ||
136	    be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) {
137		/* Not an ack for requested MTU change. */
138		spin_unlock_bh(&app_priv->mtu_conf.lock);
139		return false;
140	}
141
142	app_priv->mtu_conf.ack = true;
143	app_priv->mtu_conf.requested_val = 0;
144	wake_up(&app_priv->mtu_conf.wait_q);
145	spin_unlock_bh(&app_priv->mtu_conf.lock);
146
147	return true;
148}
149
150static void
151nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
152{
153	struct nfp_flower_cmsg_portmod *msg;
154	struct net_device *netdev;
155	bool link;
156
157	msg = nfp_flower_cmsg_get_data(skb);
158	link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK;
159
160	rtnl_lock();
161	rcu_read_lock();
162	netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
163	rcu_read_unlock();
164	if (!netdev) {
165		nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
166				     be32_to_cpu(msg->portnum));
167		rtnl_unlock();
168		return;
169	}
170
171	if (link) {
172		u16 mtu = be16_to_cpu(msg->mtu);
173
174		netif_carrier_on(netdev);
175
176		/* An MTU of 0 from the firmware should be ignored */
177		if (mtu)
178			dev_set_mtu(netdev, mtu);
179	} else {
180		netif_carrier_off(netdev);
181	}
182	rtnl_unlock();
183}
184
185static void
186nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
187{
188	struct nfp_flower_priv *priv = app->priv;
189	struct nfp_flower_cmsg_portreify *msg;
190	bool exists;
191
192	msg = nfp_flower_cmsg_get_data(skb);
193
194	rcu_read_lock();
195	exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
196	rcu_read_unlock();
197	if (!exists) {
198		nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
199				     be32_to_cpu(msg->portnum));
200		return;
201	}
202
203	atomic_inc(&priv->reify_replies);
204	wake_up(&priv->reify_wait_queue);
205}
206
207static void
208nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
209{
210	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
211	struct nfp_flower_cmsg_merge_hint *msg;
212	struct nfp_fl_payload *sub_flows[2];
213	struct nfp_flower_priv *priv;
214	int err, i, flow_cnt;
215
216	msg = nfp_flower_cmsg_get_data(skb);
217	/* msg->count starts at 0 and always assumes at least 1 entry. */
218	flow_cnt = msg->count + 1;
219
220	if (msg_len < struct_size(msg, flow, flow_cnt)) {
221		nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %zd\n",
222				     msg_len, struct_size(msg, flow, flow_cnt));
223		return;
224	}
225
226	if (flow_cnt != 2) {
227		nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
228				     flow_cnt);
229		return;
230	}
231
232	priv = app->priv;
233	mutex_lock(&priv->nfp_fl_lock);
234	for (i = 0; i < flow_cnt; i++) {
235		u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
236
237		sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
238		if (!sub_flows[i]) {
239			nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
240			goto err_mutex_unlock;
241		}
242	}
243
244	err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
245	/* Only warn on memory fail. Hint veto will not break functionality. */
246	if (err == -ENOMEM)
247		nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
248
249err_mutex_unlock:
250	mutex_unlock(&priv->nfp_fl_lock);
251}
252
253static void
254nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
255{
256	struct nfp_flower_priv *app_priv = app->priv;
257	struct nfp_flower_cmsg_hdr *cmsg_hdr;
258	enum nfp_flower_cmsg_type_port type;
259	bool skb_stored = false;
260
261	cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
262
263	type = cmsg_hdr->type;
264	switch (type) {
265	case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
266		nfp_flower_cmsg_portmod_rx(app, skb);
267		break;
268	case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
269		if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE) {
270			nfp_flower_cmsg_merge_hint_rx(app, skb);
271			break;
272		}
273		goto err_default;
274	case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
275		nfp_tunnel_request_route_v4(app, skb);
276		break;
277	case NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6:
278		nfp_tunnel_request_route_v6(app, skb);
279		break;
280	case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
281		nfp_tunnel_keep_alive(app, skb);
282		break;
283	case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6:
284		nfp_tunnel_keep_alive_v6(app, skb);
285		break;
286	case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
287		nfp_flower_stats_rlim_reply(app, skb);
288		break;
289	case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
290		if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
291			skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
292			break;
293		}
294		fallthrough;
295	default:
296err_default:
297		nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
298				     type);
299		goto out;
300	}
301
302	if (!skb_stored)
303		dev_consume_skb_any(skb);
304	return;
305out:
306	dev_kfree_skb_any(skb);
307}
308
309void nfp_flower_cmsg_process_rx(struct work_struct *work)
310{
311	struct sk_buff_head cmsg_joined;
312	struct nfp_flower_priv *priv;
313	struct sk_buff *skb;
314
315	priv = container_of(work, struct nfp_flower_priv, cmsg_work);
316	skb_queue_head_init(&cmsg_joined);
317
318	spin_lock_bh(&priv->cmsg_skbs_high.lock);
319	skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
320	spin_unlock_bh(&priv->cmsg_skbs_high.lock);
321
322	spin_lock_bh(&priv->cmsg_skbs_low.lock);
323	skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
324	spin_unlock_bh(&priv->cmsg_skbs_low.lock);
325
326	while ((skb = __skb_dequeue(&cmsg_joined)))
327		nfp_flower_cmsg_process_one_rx(priv->app, skb);
328}
329
330static void
331nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
332{
333	struct nfp_flower_priv *priv = app->priv;
334	struct sk_buff_head *skb_head;
335
336	if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
337		skb_head = &priv->cmsg_skbs_high;
338	else
339		skb_head = &priv->cmsg_skbs_low;
340
341	if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
342		nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
343		dev_kfree_skb_any(skb);
344		return;
345	}
346
347	skb_queue_tail(skb_head, skb);
348	schedule_work(&priv->cmsg_work);
349}
350
351void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
352{
353	struct nfp_flower_cmsg_hdr *cmsg_hdr;
354
355	cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
356
357	if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
358		nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
359				     cmsg_hdr->version);
360		dev_kfree_skb_any(skb);
361		return;
362	}
363
364	if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
365		/* We need to deal with stats updates from HW asap */
366		nfp_flower_rx_flow_stats(app, skb);
367		dev_consume_skb_any(skb);
368	} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD &&
369		   nfp_flower_process_mtu_ack(app, skb)) {
370		/* Handle MTU acks outside wq to prevent RTNL conflict. */
371		dev_consume_skb_any(skb);
372	} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
373		   cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6) {
374		/* Acks from the NFP that the route is added - ignore. */
375		dev_consume_skb_any(skb);
376	} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
377		/* Handle REIFY acks outside wq to prevent RTNL conflict. */
378		nfp_flower_cmsg_portreify_rx(app, skb);
379		dev_consume_skb_any(skb);
380	} else {
381		nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
382	}
383}
384