1/*
2 **************************************************************************
3 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17#include "nss_tx_rx_common.h"
18
19/*
20 * Spinlock to update tunnel stats
21 */
22DEFINE_SPINLOCK(nss_gre_redir_stats_lock);
23
24/*
25 * Array to hold tunnel stats along with if_num
26 */
27static struct nss_gre_redir_tunnel_stats tun_stats[NSS_GRE_REDIR_MAX_INTERFACES];
28
29/*
30 * nss_gre_redir_tunnel_update_stats()
31 * 	Update gre_redir tunnel stats.
32 */
33static void nss_gre_redir_tunnel_update_stats(struct nss_ctx_instance *nss_ctx, int if_num, struct nss_gre_redir_stats_sync_msg *ngss)
34{
35	int i;
36
37	spin_lock_bh(&nss_gre_redir_stats_lock);
38	for (i = 0; i < NSS_GRE_REDIR_MAX_INTERFACES; i++) {
39		if ((tun_stats[i].if_num == if_num) && (tun_stats[i].valid)) {
40
41			tun_stats[i].node_stats.rx_packets += ngss->node_stats.rx_packets;
42			tun_stats[i].node_stats.rx_bytes += ngss->node_stats.rx_bytes;
43			tun_stats[i].node_stats.tx_packets += ngss->node_stats.tx_packets;
44			tun_stats[i].node_stats.tx_bytes += ngss->node_stats.tx_bytes;
45			tun_stats[i].node_stats.rx_dropped += ngss->node_stats.rx_dropped;
46			tun_stats[i].tx_dropped += ngss->tx_dropped;
47
48			break;
49		}
50	}
51	spin_unlock_bh(&nss_gre_redir_stats_lock);
52}
53
54
55/*
56 * nss_gre_redir_handler()
57 * 	Handle NSS -> HLOS messages for gre tunnel
58 */
59static void nss_gre_redir_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data)
60{
61	struct nss_gre_redir_msg *ngrm = (struct nss_gre_redir_msg *)ncm;
62	void *ctx;
63	nss_gre_redir_msg_callback_t cb;
64
65	/*
66	 * interface should either be dynamic interface for receiving tunnel msg or GRE_REDIR interface for
67	 * receiving base node messages.
68	 */
69	BUG_ON(((ncm->interface < NSS_DYNAMIC_IF_START) || (ncm->interface >= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))) &&
70		ncm->interface != NSS_GRE_REDIR_INTERFACE);
71
72	/*
73	 * Is this a valid request/response packet?
74	 */
75	if (ncm->type >=  NSS_GRE_REDIR_MAX_MSG_TYPES) {
76		nss_warning("%p: received invalid message %d for gre interface", nss_ctx, ncm->type);
77		return;
78	}
79
80	if (ncm->len > sizeof(struct nss_gre_redir_msg)) {
81		nss_warning("%p: tx request for another interface: %d", nss_ctx, ncm->interface);
82		return;
83	}
84
85	/*
86	 * Update the callback and app_data for NOTIFY messages, gre sends all notify messages
87	 * to the same callback/app_data.
88	 */
89	if (ncm->response == NSS_CMM_RESPONSE_NOTIFY) {
90		ncm->cb = (uint32_t)nss_ctx->nss_top->if_rx_msg_callback[ncm->interface];
91	}
92
93	/*
94	 * Log failures
95	 */
96	nss_core_log_msg_failures(nss_ctx, ncm);
97
98	switch (ncm->type) {
99	case NSS_GRE_REDIR_RX_STATS_SYNC_MSG:
100		/*
101		 * Update Tunnel statistics.
102		 */
103		if (!(nss_is_dynamic_interface(ncm->interface))) {
104			nss_warning("%p: stats received for wrong interface %d\n", nss_ctx, ncm->interface);
105			break;
106		}
107
108		nss_gre_redir_tunnel_update_stats(nss_ctx, ncm->interface, &ngrm->msg.stats_sync);
109		break;
110	}
111
112	/*
113	 * Do we have a call back
114	 */
115	if (!ncm->cb) {
116		return;
117	}
118
119	/*
120	 * callback
121	 */
122	cb = (nss_gre_redir_msg_callback_t)ncm->cb;
123	ctx =  nss_ctx->nss_top->subsys_dp_register[ncm->interface].ndev;
124
125	/*
126	 * call gre tunnel callback
127	 */
128	cb(ctx, ncm);
129}
130
131/*
132 * nss_gre_redir_get_stats()
133 * 	get gre_redir tunnel stats.
134 */
135bool nss_gre_redir_get_stats(int index, struct nss_gre_redir_tunnel_stats *stats)
136{
137	spin_lock_bh(&nss_gre_redir_stats_lock);
138	if (!tun_stats[index].valid) {
139		spin_unlock_bh(&nss_gre_redir_stats_lock);
140		return false;
141	}
142
143	if (nss_is_dynamic_interface(tun_stats[index].if_num) == false) {
144		spin_unlock_bh(&nss_gre_redir_stats_lock);
145		return false;
146	}
147
148	memcpy(stats, &tun_stats[index], sizeof(struct nss_gre_redir_tunnel_stats));
149	spin_unlock_bh(&nss_gre_redir_stats_lock);
150
151	return true;
152}
153
154/*
155 * nss_gre_redir_tx_msg()
156 * 	Transmit a gre message to NSSFW
157 */
158nss_tx_status_t nss_gre_redir_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_msg *msg)
159{
160	struct nss_gre_redir_msg *nm;
161	struct nss_cmn_msg *ncm = &msg->cm;
162	struct sk_buff *nbuf;
163	int32_t status;
164
165	NSS_VERIFY_CTX_MAGIC(nss_ctx);
166	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
167		nss_warning("%p: gre msg dropped as core not ready", nss_ctx);
168		return NSS_TX_FAILURE_NOT_READY;
169	}
170
171	/*
172	 * Sanity check the message
173	 */
174
175	/*
176	 * interface should either be dynamic interface to transmit tunnel msg or GRE_REDIR interface to transmit
177	 * base node messages.
178	 */
179	if (((ncm->interface < NSS_DYNAMIC_IF_START) || (ncm->interface >= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))) &&
180		ncm->interface != NSS_GRE_REDIR_INTERFACE) {
181		nss_warning("%p: tx request for another interface: %d", nss_ctx, ncm->interface);
182		return NSS_TX_FAILURE;
183	}
184
185	if (ncm->type > NSS_GRE_REDIR_MAX_MSG_TYPES) {
186		nss_warning("%p: message type out of range: %d", nss_ctx, ncm->type);
187		return NSS_TX_FAILURE;
188	}
189
190	if (ncm->len > sizeof(struct nss_gre_redir_msg)) {
191		nss_warning("%p: message length is invalid: %d", nss_ctx, ncm->len);
192		return NSS_TX_FAILURE;
193	}
194
195	nbuf = dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE);
196	if (unlikely(!nbuf)) {
197		NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_NBUF_ALLOC_FAILS]);
198		nss_warning("%p: msg dropped as command allocation failed", nss_ctx);
199		return NSS_TX_FAILURE;
200	}
201
202	/*
203	 * Copy the message to our skb
204	 */
205	nm = (struct nss_gre_redir_msg *)skb_put(nbuf, sizeof(struct nss_gre_redir_msg));
206	memcpy(nm, msg, sizeof(struct nss_gre_redir_msg));
207
208	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
209	if (status != NSS_CORE_STATUS_SUCCESS) {
210		dev_kfree_skb_any(nbuf);
211		nss_warning("%p: Unable to enqueue 'gre message' \n", nss_ctx);
212		return NSS_TX_FAILURE;
213	}
214
215	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
216				NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
217
218	NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_CMD_REQ]);
219	return NSS_TX_SUCCESS;
220}
221
222/*
223 * nss_gre_redir_tx_buf()
224 *	Send packet to gre_redir interface owned by NSS
225 */
226nss_tx_status_t nss_gre_redir_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num)
227{
228	int32_t status;
229
230	nss_trace("%p: gre_redir If Tx packet, id:%d, data=%p", nss_ctx, if_num, os_buf->data);
231
232	NSS_VERIFY_CTX_MAGIC(nss_ctx);
233	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
234		nss_warning("%p: 'Phys If Tx' packet dropped as core not ready", nss_ctx);
235		return NSS_TX_FAILURE_NOT_READY;
236	}
237
238	status = nss_core_send_buffer(nss_ctx, if_num, os_buf, NSS_IF_DATA_QUEUE_0, H2N_BUFFER_PACKET, 0);
239	if (unlikely(status != NSS_CORE_STATUS_SUCCESS)) {
240		nss_warning("%p: Unable to enqueue 'Phys If Tx' packet\n", nss_ctx);
241		if (status == NSS_CORE_STATUS_FAILURE_QUEUE) {
242			return NSS_TX_FAILURE_QUEUE;
243		}
244
245		return NSS_TX_FAILURE;
246	}
247
248	/*
249	 * Kick the NSS awake so it can process our new entry.
250	 */
251	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE_0].desc_ring.int_bit,
252				NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
253
254	NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_PACKET]);
255	return NSS_TX_SUCCESS;
256}
257
258/*
259 ***********************************
260 * Register/Unregister/Miscellaneous APIs
261 ***********************************
262 */
263
264/*
265 * nss_gre_redir_register_if()
266 */
267struct nss_ctx_instance *nss_gre_redir_register_if(uint32_t if_num, struct net_device *netdev, nss_gre_redir_data_callback_t cb_func_data,
268							nss_gre_redir_msg_callback_t cb_func_msg, uint32_t features)
269{
270	uint32_t status;
271	int i;
272
273	nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES)));
274
275	/*
276	 * Registering handler for sending tunnel interface msgs to NSS.
277	 */
278	status = nss_core_register_handler(if_num, nss_gre_redir_msg_handler, NULL);
279	if (status != NSS_CORE_STATUS_SUCCESS) {
280		nss_warning("Not able to register handler for gre_redir interface %d with NSS core\n", if_num);
281		return NULL;
282	}
283
284	nss_top_main.subsys_dp_register[if_num].ndev = netdev;
285        nss_top_main.subsys_dp_register[if_num].cb = cb_func_data;
286	nss_top_main.subsys_dp_register[if_num].app_data = NULL;
287	nss_top_main.subsys_dp_register[if_num].features = features;
288
289        nss_top_main.if_rx_msg_callback[if_num] = cb_func_msg;
290
291	spin_lock_bh(&nss_gre_redir_stats_lock);
292	for (i = 0; i < NSS_GRE_REDIR_MAX_INTERFACES; i++) {
293		if (!(tun_stats[i].valid)) {
294			tun_stats[i].valid = true;
295			tun_stats[i].if_num = if_num;
296			break;
297		}
298	}
299	spin_unlock_bh(&nss_gre_redir_stats_lock);
300
301        return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_redir_handler_id];
302}
303
304/*
305 * nss_gre_redir_unregister_if()
306 */
307void nss_gre_redir_unregister_if(uint32_t if_num)
308{
309	uint32_t status;
310	int i;
311
312	nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES)));
313
314	status = nss_core_unregister_handler(if_num);
315	if (status != NSS_CORE_STATUS_SUCCESS) {
316		nss_warning("Not able to unregister handler for gre_redir interface %d with NSS core\n", if_num);
317		return;
318	}
319
320	nss_top_main.subsys_dp_register[if_num].ndev = NULL;
321        nss_top_main.subsys_dp_register[if_num].cb = NULL;
322	nss_top_main.subsys_dp_register[if_num].app_data = NULL;
323	nss_top_main.subsys_dp_register[if_num].features = 0;
324
325	nss_top_main.if_rx_msg_callback[if_num] = NULL;
326
327	spin_lock_bh(&nss_gre_redir_stats_lock);
328	for (i = 0; i < NSS_GRE_REDIR_MAX_INTERFACES; i++) {
329		if ((tun_stats[i].if_num == if_num) && (tun_stats[i].valid)) {
330			tun_stats[i].valid = false;
331			tun_stats[i].if_num = -1;
332			break;
333		}
334	}
335	spin_unlock_bh(&nss_gre_redir_stats_lock);
336}
337
338/*
339 * nss_gre_redir_register_handler()
340 *	Registering handler for sending msg to base gre_redir node on NSS.
341 */
342void nss_gre_redir_register_handler(void)
343{
344	uint32_t status = nss_core_register_handler(NSS_GRE_REDIR_INTERFACE, nss_gre_redir_msg_handler, NULL);
345	if (status != NSS_CORE_STATUS_SUCCESS) {
346		nss_warning("Not able to register handler for gre_redir base interface with NSS core\n");
347		return;
348	}
349}
350
351EXPORT_SYMBOL(nss_gre_redir_tx_msg);
352EXPORT_SYMBOL(nss_gre_redir_tx_buf);
353EXPORT_SYMBOL(nss_gre_redir_register_if);
354EXPORT_SYMBOL(nss_gre_redir_unregister_if);
355EXPORT_SYMBOL(nss_gre_redir_get_stats);
356