1/*
2 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
12 * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT
13 * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
14 * USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16/*
17 * @file
18 * This is the network dependent layer to handle network related functionality.
19 * This file is tightly coupled to neworking frame work of linux kernel.
20 * The functionality carried out in this file should be treated as an
21 * example only if the underlying operating system is not Linux.
22 *
23 * @note Many of the functions other than the device specific functions
24 *  changes for operating system other than Linux 2.6.xx
25 *-----------------------------REVISION HISTORY---------------------------------
26 * Qualcomm Atheros		15/Feb/2013			Created
27 */
28
29#include <linux/version.h>
30#include <linux/kernel.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#include <linux/workqueue.h>
34#include <linux/bitops.h>
35#include <linux/phy.h>
36#include <linux/interrupt.h>
37
38#include <nss_gmac_dev.h>
39#include <nss_gmac_network_interface.h>
40
41#define NSS_GMAC_NAPI_BUDGET	64
42#define dma_int_enable		(dma_ie_normal | dma_int_tx_norm_mask | dma_int_rx_norm_mask)
43
44#ifdef CONFIG_ETH_TM_ACCURATE_CONTROL
45#include <linux/netlink.h>
46#include <linux/types.h>
47#include <linux/sched.h>
48#include <net/sock.h>
49#include "ipv6_pssthrgh.h"
50#define NETLINK_TM 21
51
52#define TM_ETH_ONE_DIRECTION 0
53#define TM_ETH_BOTH_DIRECTION 1
54#define TM_ETH_CONTROL_ABOLISH 2
55#define TM_ETH_DROP_PACKET 3
56#define TM_ETH_MAX_LIMIT 0xffffffffffffffff
57
58unsigned long long tm_limit = TM_ETH_MAX_LIMIT;
59int msg_recive = 0;
60int eth_tm_dir;
61unsigned long long counter_rx = 0;
62unsigned long long counter_tx = 0;
63unsigned long long tmp_counter = 0;
64struct sock *nl_sk = NULL;
65int need_drop = 0;
66
67struct msg_data{
68        int backpid;
69        u64 leftdata;
70        int tm_dir;
71};
72
73void test_netlink (struct sk_buff *__skb)
74{
75	struct sk_buff *skb = NULL;
76	struct nlmsghdr *nlh = NULL;
77	struct msg_data *msgdata;
78
79	skb = skb_get (__skb);
80	if (skb == NULL || skb->len < NLMSG_LENGTH (0))
81	{
82		printk ("Kernel recive message ERROR ...\n");
83		return;
84	}
85
86	nlh = nlmsg_hdr (skb);
87	msgdata = (struct msg_data *) NLMSG_DATA (nlh);
88	if (msgdata->tm_dir == TM_ETH_CONTROL_ABOLISH || msgdata->tm_dir == TM_ETH_DROP_PACKET)
89	{
90		tm_limit = TM_ETH_MAX_LIMIT;
91	}
92	else
93	{
94		tm_limit = msgdata->leftdata;
95	}
96
97	eth_tm_dir = msgdata->tm_dir;
98	kfree_skb (skb);
99	msg_recive = 1;
100	if (msgdata->tm_dir == TM_ETH_DROP_PACKET)
101		need_drop = 1;
102	else
103		need_drop = 0;
104	printk ("ETH Driver recive message dir is %d ...\n", eth_tm_dir);
105
106	counter_rx = 0;
107	counter_tx = 0;
108}
109
110void tm_netlink_init(struct module *module)
111{
112	nl_sk = netlink_kernel_create(&init_net, NETLINK_TM, 1, test_netlink, NULL, module);
113	if (nl_sk == NULL)
114		printk("netlink_kernel_create error ...\n");
115}
116
117void tm_netlink_deinit(void)
118{
119	if (nl_sk == NULL)
120		release_sock(nl_sk);
121}
122#endif
123
124/**
125 * This sets up the transmit Descriptor queue in ring or chain mode.
126 * This function is tightly coupled to the platform and operating system
127 * Device is interested only after the descriptors are setup. Therefore this
128 * function is not included in the device driver API. This function should be
129 * treated as an example code to design the descriptor structures for ring mode
130 * or chain mode.
131 * This function depends on the device structure for allocation consistent
132 * dma-able memory in case of linux.
133 *	- Allocates the memory for the descriptors.
134 *	- Initialize the Busy and Next descriptors indices to 0(Indicating
135 *	  first descriptor).
136 *	- Initialize the Busy and Next descriptors to first descriptor address.
137 *	- Initialize the last descriptor with the endof ring in case of ring
138 *	  mode.
139 *	- Initialize the descriptors in chain mode.
140 * @param[in] pointer to nss_gmac_dev.
141 * @param[in] pointer to device structure.
142 * @param[in] number of descriptor expected in tx descriptor queue.
143 * @param[in] whether descriptors to be created in RING mode or CHAIN mode.
144 * @return 0 upon success. Error code upon failure.
145 * @note This function fails if allocation fails for required number of
146 * descriptors in Ring mode, but in chain mode function returns -ENOMEM in the
147 * process of descriptor chain creation. once returned from this function user
148 * should for gmacdev->tx_desc_count to see how many descriptors are there in
149 * the chain.
150 * Should continue further only if the number of descriptors in the
151 * chain meets the requirements.
152 */
153static int32_t nss_gmac_setup_tx_desc_queue(struct nss_gmac_dev *gmacdev,
154						struct device *dev,
155						uint32_t no_of_desc,
156						uint32_t desc_mode)
157{
158	struct dma_desc *first_desc = NULL;
159	dma_addr_t dma_addr;
160
161	gmacdev->tx_desc_count = 0;
162
163	BUG_ON(desc_mode != RINGMODE);
164	BUG_ON((no_of_desc & (no_of_desc - 1)) != 0);
165
166	netdev_dbg(gmacdev->netdev, "Total size of memory required for Tx Descriptors in Ring Mode = 0x%08x\n"
167			, (uint32_t) ((sizeof(struct dma_desc) * no_of_desc)));
168
169	first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc
170					, &dma_addr, GFP_KERNEL);
171	if (first_desc == NULL) {
172		netdev_dbg(gmacdev->netdev,
173				"Error in Tx Descriptors memory allocation\n");
174		return -ENOMEM;
175	}
176
177	gmacdev->tx_desc_count = no_of_desc;
178	gmacdev->tx_desc = first_desc;
179	gmacdev->tx_desc_dma = dma_addr;
180	netdev_dbg(gmacdev->netdev, "Tx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%08x dma = 0x%08x\n"
181			, no_of_desc, (uint32_t)first_desc, dma_addr);
182
183	nss_gmac_tx_desc_init_ring(gmacdev->tx_desc, gmacdev->tx_desc_count);
184
185	gmacdev->tx_next = 0;
186	gmacdev->tx_busy = 0;
187	gmacdev->tx_next_desc = gmacdev->tx_desc;
188	gmacdev->tx_busy_desc = gmacdev->tx_desc;
189	gmacdev->busy_tx_desc = 0;
190
191	return 0;
192}
193
194
195/**
196 * This sets up the receive Descriptor queue in ring or chain mode.
197 * This function is tightly coupled to the platform and operating system
198 * Device is interested only after the descriptors are setup. Therefore this
199 * function is not included in the device driver API. This function should be
200 * treated as an example code to design the descriptor structures in ring mode
201 * or chain mode.
202 * This function depends on the device structure for allocation of
203 * consistent dma-able memory in case of linux.
204 *	- Allocates the memory for the descriptors.
205 *	- Initialize the Busy and Next descriptors indices to 0(Indicating first
206 *	  descriptor).
207 *	- Initialize the Busy and Next descriptors to first descriptor address.
208 *	- Initialize the last descriptor with the endof ring in case of ring
209 *	   mode.
210 *	- Initialize the descriptors in chain mode.
211 * @param[in] pointer to nss_gmac_dev.
212 * @param[in] pointer to device structure.
213 * @param[in] number of descriptor expected in rx descriptor queue.
214 * @param[in] whether descriptors to be created in RING mode or CHAIN mode.
215 * @return 0 upon success. Error code upon failure.
216 * @note This function fails if allocation fails for required number of
217 * descriptors in Ring mode, but in chain mode function returns -ENOMEM in the
218 * process of descriptor chain creation. once returned from this function user
219 * should for gmacdev->rx_desc_count to see how many descriptors are there in
220 * the chain.
221 * Should continue further only if the number of descriptors in the
222 * chain meets the requirements.
223 */
224static int32_t nss_gmac_setup_rx_desc_queue(struct nss_gmac_dev *gmacdev,
225						struct device *dev,
226						uint32_t no_of_desc,
227						uint32_t desc_mode)
228{
229	struct dma_desc *first_desc = NULL;
230	dma_addr_t dma_addr;
231
232	gmacdev->rx_desc_count = 0;
233
234	BUG_ON(desc_mode != RINGMODE);
235	BUG_ON((no_of_desc & (no_of_desc - 1)) != 0);
236
237	netdev_dbg(gmacdev->netdev, "total size of memory required for Rx Descriptors in Ring Mode = 0x%08x\n"
238			, (uint32_t) ((sizeof(struct dma_desc) * no_of_desc)));
239
240	first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc
241					, &dma_addr, GFP_KERNEL);
242	if (first_desc == NULL) {
243		netdev_dbg(gmacdev->netdev, "Error in Rx Descriptor Memory allocation in Ring mode\n");
244		return -ENOMEM;
245	}
246
247	gmacdev->rx_desc_count = no_of_desc;
248	gmacdev->rx_desc = first_desc;
249	gmacdev->rx_desc_dma = dma_addr;
250	netdev_dbg(gmacdev->netdev, "Rx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%08x dma = 0x%08x\n",
251			no_of_desc, (uint32_t)first_desc, dma_addr);
252
253	nss_gmac_rx_desc_init_ring(gmacdev->rx_desc, no_of_desc);
254
255	gmacdev->rx_next = 0;
256	gmacdev->rx_busy = 0;
257	gmacdev->rx_next_desc = gmacdev->rx_desc;
258	gmacdev->rx_busy_desc = gmacdev->rx_desc;
259	gmacdev->busy_rx_desc = 0;
260
261	return 0;
262}
263
264/*
265 * nss_gmac_rx_refill()
266 *	Refill the RX descrptor
267 */
268static inline void nss_gmac_rx_refill(struct nss_gmac_dev *gmacdev)
269{
270	int count = NSS_GMAC_RX_DESC_SIZE - gmacdev->busy_rx_desc;
271	dma_addr_t dma_addr;
272	int i;
273	struct sk_buff *skb;
274
275	for (i = 0; i < count; i++) {
276		skb = __netdev_alloc_skb(gmacdev->netdev,
277				NSS_GMAC_MINI_JUMBO_FRAME_MTU, GFP_ATOMIC);
278		if (unlikely(skb == NULL)) {
279			netdev_dbg(gmacdev->netdev, "Unable to allocate skb, will try next time\n");
280			break;
281		}
282		skb_reserve(skb, NET_IP_ALIGN);
283		dma_addr = dma_map_single(&gmacdev->netdev->dev, skb->data,
284				NSS_GMAC_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
285		nss_gmac_set_rx_qptr(gmacdev, dma_addr,
286				NSS_GMAC_MINI_JUMBO_FRAME_MTU, (uint32_t)skb);
287	}
288}
289
290/*
291 * nss_gmac_rx()
292 *	Process RX packets
293 */
294static inline int nss_gmac_rx(struct nss_gmac_dev *gmacdev, int budget)
295{
296	struct dma_desc *desc = NULL;
297	int frame_length, busy;
298	uint32_t status;
299	struct sk_buff *rx_skb;
300
301	if (!gmacdev->busy_rx_desc) {
302		/* no desc are held by gmac dma, we are done */
303		return 0;
304	}
305
306	busy = gmacdev->busy_rx_desc;
307	if (busy > budget)
308		busy = budget;
309
310	do {
311		desc = gmacdev->rx_busy_desc;
312		if (nss_gmac_is_desc_owned_by_dma(desc)) {
313			/* desc still hold by gmac dma, so we are done */
314			break;
315		}
316
317		status = desc->status;
318		rx_skb = (struct sk_buff *)desc->reserved1;
319		dma_unmap_single(&gmacdev->netdev->dev, desc->buffer1,
320				NSS_GMAC_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
321
322		if (likely(nss_gmac_is_rx_desc_valid(status))) {
323			/* We have a pkt to process get the frame length */
324			frame_length = nss_gmac_get_rx_desc_frame_length(status);
325			/* Get rid of FCS: 4 */
326			frame_length -= ETH_FCS_LEN;
327
328			/* Valid packet, collect stats */
329			gmacdev->stats.rx_packets++;
330			gmacdev->stats.rx_bytes += frame_length;
331
332			/* type_trans and deliver to linux */
333			skb_put(rx_skb, frame_length);
334			rx_skb->protocol = eth_type_trans(rx_skb, gmacdev->netdev);
335			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
336			napi_gro_receive(&gmacdev->napi, rx_skb);
337
338		} else {
339			gmacdev->stats.rx_errors++;
340			dev_kfree_skb(rx_skb);
341
342			if (status & (desc_rx_crc | desc_rx_collision |
343					desc_rx_damaged | desc_rx_dribbling |
344					desc_rx_length_error)) {
345				gmacdev->stats.rx_crc_errors += (status & desc_rx_crc) ? 1 : 0;
346				gmacdev->stats.collisions += (status & desc_rx_collision) ? 1 : 0;
347				gmacdev->stats.rx_over_errors += (status & desc_rx_damaged) ? 1 : 0;
348				gmacdev->stats.rx_frame_errors += (status & desc_rx_dribbling) ? 1 : 0;
349				gmacdev->stats.rx_length_errors += (status & desc_rx_length_error) ? 1 : 0;
350			}
351		}
352
353		nss_gmac_reset_rx_qptr(gmacdev);
354		busy--;
355	} while (busy > 0);
356	return budget - busy;
357}
358
359/*
360 * nss_gmac_process_tx_complete
361 *	Xmit complete, clear descriptor and free the skb
362 */
363static inline void nss_gmac_process_tx_complete(struct nss_gmac_dev *gmacdev)
364{
365	int busy, len;
366	uint32_t status;
367	struct dma_desc *desc = NULL;
368	struct sk_buff *skb;
369
370	spin_lock(&gmacdev->slock);
371	busy = gmacdev->busy_tx_desc;
372
373	if (!busy) {
374		/* No desc are hold by gmac dma, we are done */
375		spin_unlock(&gmacdev->slock);
376		return;
377	}
378
379	do {
380		desc = gmacdev->tx_busy_desc;
381		if (nss_gmac_is_desc_owned_by_dma(desc)) {
382			/* desc still hold by gmac dma, so we are done */
383			break;
384		}
385		len = (desc->length & desc_size1_mask) >> desc_size1_shift;
386		dma_unmap_single(&gmacdev->netdev->dev, desc->buffer1, len,
387								DMA_TO_DEVICE);
388
389		status = desc->status;
390		if (status & desc_tx_last) {
391			/* TX is done for this whole skb, we can free it */
392			skb = (struct sk_buff *)desc->reserved1;
393			BUG_ON(!skb);
394			dev_kfree_skb(skb);
395
396			if (unlikely(status & desc_error)) {
397				/* Some error happen, collect statistics */
398				gmacdev->stats.tx_errors++;
399				gmacdev->stats.tx_carrier_errors += (status & desc_tx_lost_carrier) ? 1 : 0;
400				gmacdev->stats.tx_carrier_errors += (status & desc_tx_no_carrier) ? 1 : 0;
401				gmacdev->stats.tx_window_errors += (status & desc_tx_late_collision) ? 1 : 0;
402				gmacdev->stats.tx_fifo_errors += (status & desc_tx_underflow) ? 1 : 0;
403			} else {
404				/* No error, recored tx pkts/bytes and
405				 * collision
406				 */
407				gmacdev->stats.tx_packets++;
408				gmacdev->stats.collisions += nss_gmac_get_tx_collision_count(status);
409				gmacdev->stats.tx_bytes += len;
410			}
411		}
412		nss_gmac_reset_tx_qptr(gmacdev);
413		busy--;
414	} while (busy > 0);
415	spin_unlock(&gmacdev->slock);
416}
417
418/*
419 * nss_gmac_poll
420 *	Scheduled by napi to process RX and TX complete
421 */
422static int nss_gmac_poll(struct napi_struct *napi, int budget)
423{
424	struct nss_gmac_dev *gmacdev = container_of(napi,
425					struct nss_gmac_dev, napi);
426	int work_done;
427
428	nss_gmac_process_tx_complete(gmacdev);
429	work_done = nss_gmac_rx(gmacdev, budget);
430	nss_gmac_rx_refill(gmacdev);
431
432	if (work_done < budget) {
433		napi_complete(napi);
434		nss_gmac_enable_interrupt(gmacdev, dma_int_enable);
435	}
436	return work_done;
437}
438
439/*
440 * nss_gmac_handle_irq
441 *	Process IRQ and schedule napi
442 */
443static irqreturn_t nss_gmac_handle_irq(int irq, void *ctx)
444{
445	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)ctx;
446
447	nss_gmac_clear_interrupt(gmacdev);
448	nss_gmac_disable_interrupt(gmacdev, dma_int_enable);
449	napi_schedule(&gmacdev->napi);
450	return IRQ_HANDLED;
451}
452
453/*
454 * nss_gmac_slowpath_if_open
455 *	Do slow path data plane open
456 */
457static int nss_gmac_slowpath_if_open(void *app_data, uint32_t tx_desc_ring,
458					uint32_t rx_desc_ring, uint32_t mode)
459{
460	return NSS_GMAC_SUCCESS;
461}
462
463static int nss_gmac_slowpath_if_close(void *app_data)
464{
465	return NSS_GMAC_SUCCESS;
466}
467
468static int nss_gmac_slowpath_if_link_state(void *app_data, uint32_t link_state)
469{
470	struct net_device *netdev = (struct net_device *)app_data;
471	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
472
473	if (link_state) {
474		napi_enable(&gmacdev->napi);
475		nss_gmac_enable_dma_rx(gmacdev);
476		nss_gmac_enable_dma_tx(gmacdev);
477		nss_gmac_enable_interrupt(gmacdev, dma_int_enable);
478	} else if (gmacdev->link_state == LINKUP) {
479		nss_gmac_disable_interrupt(gmacdev, dma_int_enable);
480		napi_disable(&gmacdev->napi);
481	}
482	return NSS_GMAC_SUCCESS;
483}
484
485static int nss_gmac_slowpath_if_mac_addr(void *app_data, uint8_t *addr)
486{
487	return NSS_GMAC_SUCCESS;
488}
489static int nss_gmac_slowpath_if_change_mtu(void *app_data, uint32_t mtu)
490{
491	return NSS_GMAC_SUCCESS;
492}
493
494static int nss_gmac_slowpath_if_xmit(void *app_data, struct sk_buff *skb)
495{
496	struct net_device *netdev = (struct net_device *)app_data;
497	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
498	unsigned int len = skb_headlen(skb);
499	dma_addr_t dma_addr;
500	int nfrags = skb_shinfo(skb)->nr_frags;
501
502	/*
503	 * We don't have enough tx descriptor for this pkt, return busy
504	 */
505	if ((NSS_GMAC_TX_DESC_SIZE - gmacdev->busy_tx_desc) < nfrags + 1)
506		return NETDEV_TX_BUSY;
507
508	/*
509	 * Most likely, it is not a fragmented pkt, optimize for that
510	 */
511	if (likely(nfrags == 0)) {
512		dma_addr = dma_map_single(&netdev->dev, skb->data, len,
513						DMA_TO_DEVICE);
514		spin_lock_bh(&gmacdev->slock);
515		nss_gmac_set_tx_qptr(gmacdev, dma_addr, len, (uint32_t)skb,
516				(skb->ip_summed == CHECKSUM_PARTIAL),
517				(desc_tx_last | desc_tx_first),
518				desc_own_by_dma);
519		gmacdev->busy_tx_desc++;
520		spin_unlock_bh(&gmacdev->slock);
521		nss_gmac_resume_dma_tx(gmacdev);
522
523		return NSS_GMAC_SUCCESS;
524	}
525
526	/*
527	 * Handle frag pkts here if we decided to
528	 */
529
530	return NSS_GMAC_FAILURE;
531}
532
533/*
534 * nss_gmac_slowpath_if_set_features()
535 *	Set the supported net_device features
536 */
537static void nss_gmac_slowpath_if_set_features(struct net_device *netdev)
538{
539	netdev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
540	netdev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
541	netdev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
542	netdev->wanted_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
543}
544
545struct nss_gmac_data_plane_ops nss_gmac_slowpath_ops = {
546	.open		= nss_gmac_slowpath_if_open,
547	.close		= nss_gmac_slowpath_if_close,
548	.link_state	= nss_gmac_slowpath_if_link_state,
549	.mac_addr	= nss_gmac_slowpath_if_mac_addr,
550	.change_mtu	= nss_gmac_slowpath_if_change_mtu,
551	.xmit		= nss_gmac_slowpath_if_xmit,
552	.set_features	= nss_gmac_slowpath_if_set_features,
553};
554
555/**
556 * @brief Save GMAC statistics
557 * @param[in] pointer to gmac context
558 * @param[in] pointer to gmac statistics
559 * @return Returns void.
560 */
561static void nss_gmac_copy_stats(struct nss_gmac_dev *gmacdev,
562				struct nss_gmac_stats *gstat)
563{
564	BUG_ON(!spin_is_locked(&gmacdev->stats_lock));
565
566	gmacdev->nss_stats.rx_bytes += gstat->rx_bytes;
567	gmacdev->nss_stats.rx_packets += gstat->rx_packets;
568	gmacdev->nss_stats.rx_errors += gstat->rx_errors;
569	gmacdev->nss_stats.rx_receive_errors += gstat->rx_receive_errors;
570	gmacdev->nss_stats.rx_overflow_errors += gstat->rx_overflow_errors;
571	gmacdev->nss_stats.rx_descriptor_errors += gstat->rx_descriptor_errors;
572	gmacdev->nss_stats.rx_watchdog_timeout_errors +=
573		gstat->rx_watchdog_timeout_errors;
574	gmacdev->nss_stats.rx_crc_errors += gstat->rx_crc_errors;
575	gmacdev->nss_stats.rx_late_collision_errors +=
576		gstat->rx_late_collision_errors;
577	gmacdev->nss_stats.rx_dribble_bit_errors += gstat->rx_dribble_bit_errors;
578	gmacdev->nss_stats.rx_length_errors += gstat->rx_length_errors;
579	gmacdev->nss_stats.rx_ip_header_errors += gstat->rx_ip_header_errors;
580	gmacdev->nss_stats.rx_ip_payload_errors += gstat->rx_ip_payload_errors;
581	gmacdev->nss_stats.rx_no_buffer_errors += gstat->rx_no_buffer_errors;
582	gmacdev->nss_stats.rx_transport_csum_bypassed +=
583		gstat->rx_transport_csum_bypassed;
584	gmacdev->nss_stats.tx_bytes += gstat->tx_bytes;
585	gmacdev->nss_stats.tx_packets += gstat->tx_packets;
586	gmacdev->nss_stats.tx_collisions += gstat->tx_collisions;
587	gmacdev->nss_stats.tx_errors += gstat->tx_errors;
588	gmacdev->nss_stats.tx_jabber_timeout_errors +=
589		gstat->tx_jabber_timeout_errors;
590	gmacdev->nss_stats.tx_frame_flushed_errors +=
591		gstat->tx_frame_flushed_errors;
592	gmacdev->nss_stats.tx_loss_of_carrier_errors +=
593		gstat->tx_loss_of_carrier_errors;
594	gmacdev->nss_stats.tx_no_carrier_errors += gstat->tx_no_carrier_errors;
595	gmacdev->nss_stats.tx_late_collision_errors +=
596		gstat->tx_late_collision_errors;
597	gmacdev->nss_stats.tx_excessive_collision_errors +=
598		gstat->tx_excessive_collision_errors;
599	gmacdev->nss_stats.tx_excessive_deferral_errors +=
600		gstat->tx_excessive_deferral_errors;
601	gmacdev->nss_stats.tx_underflow_errors += gstat->tx_underflow_errors;
602	gmacdev->nss_stats.tx_ip_header_errors += gstat->tx_ip_header_errors;
603	gmacdev->nss_stats.tx_ip_payload_errors += gstat->tx_ip_payload_errors;
604	gmacdev->nss_stats.tx_dropped += gstat->tx_dropped;
605	gmacdev->nss_stats.hw_errs[0] += gstat->hw_errs[0];
606	gmacdev->nss_stats.hw_errs[1] += gstat->hw_errs[1];
607	gmacdev->nss_stats.hw_errs[2] += gstat->hw_errs[2];
608	gmacdev->nss_stats.hw_errs[3] += gstat->hw_errs[3];
609	gmacdev->nss_stats.hw_errs[4] += gstat->hw_errs[4];
610	gmacdev->nss_stats.hw_errs[5] += gstat->hw_errs[5];
611	gmacdev->nss_stats.hw_errs[6] += gstat->hw_errs[6];
612	gmacdev->nss_stats.hw_errs[7] += gstat->hw_errs[7];
613	gmacdev->nss_stats.hw_errs[8] += gstat->hw_errs[8];
614	gmacdev->nss_stats.hw_errs[9] += gstat->hw_errs[9];
615	gmacdev->nss_stats.rx_missed += gstat->rx_missed;
616	gmacdev->nss_stats.fifo_overflows += gstat->fifo_overflows;
617	gmacdev->nss_stats.rx_scatter_errors += gstat->rx_scatter_errors;
618	gmacdev->nss_stats.gmac_total_ticks += gstat->gmac_total_ticks;
619	gmacdev->nss_stats.gmac_worst_case_ticks += gstat->gmac_worst_case_ticks;
620	gmacdev->nss_stats.gmac_iterations += gstat->gmac_iterations;
621}
622
623extern void detect_eth_wan_data(void);
624static int need_blink_wan_led = 1;
625void set_blink_wan_led(int val)
626{
627	need_blink_wan_led = val;
628}
629
630/**
631 * @brief Stats Callback to receive statistics from NSS
632 * @param[in] pointer to gmac context
633 * @param[in] pointer to gmac statistics
634 * @return Returns void.
635 */
636static void nss_gmac_stats_receive(struct nss_gmac_dev *gmacdev,
637					struct nss_gmac_stats *gstat)
638{
639	struct net_device *netdev = NULL;
640
641	netdev = (struct net_device *)gmacdev->netdev;
642
643	if (!test_bit(__NSS_GMAC_UP, &gmacdev->flags))
644		return;
645
646	if (need_blink_wan_led && gmacdev->phy_base == 4 && (gstat->rx_bytes != 0 || gstat->tx_bytes != 0))
647		detect_eth_wan_data();
648
649#ifdef CONFIG_ETH_TM_ACCURATE_CONTROL
650	if (gmacdev->phy_base == 4) {
651		counter_tx += gstat->tx_bytes;
652		counter_rx += gstat->rx_bytes;
653	}
654#endif
655	spin_lock(&gmacdev->stats_lock);
656
657	nss_gmac_copy_stats(gmacdev, gstat);
658
659	gmacdev->stats.rx_packets += gstat->rx_packets;
660	gmacdev->stats.rx_bytes += gstat->rx_bytes;
661	gmacdev->stats.rx_errors += gstat->rx_errors;
662	gmacdev->stats.rx_dropped += gstat->rx_errors;
663	gmacdev->stats.rx_length_errors += gstat->rx_length_errors;
664	gmacdev->stats.rx_over_errors += gstat->rx_overflow_errors;
665	gmacdev->stats.rx_crc_errors += gstat->rx_crc_errors;
666	gmacdev->stats.rx_frame_errors += gstat->rx_dribble_bit_errors;
667	gmacdev->stats.rx_fifo_errors += gstat->fifo_overflows;
668	gmacdev->stats.rx_missed_errors += gstat->rx_missed;
669	gmacdev->stats.collisions += gstat->tx_collisions
670		+ gstat->rx_late_collision_errors;
671	gmacdev->stats.tx_packets += gstat->tx_packets;
672	gmacdev->stats.tx_bytes += gstat->tx_bytes;
673	gmacdev->stats.tx_errors += gstat->tx_errors;
674	gmacdev->stats.tx_dropped += gstat->tx_dropped;
675	gmacdev->stats.tx_carrier_errors += gstat->tx_loss_of_carrier_errors
676		+ gstat->tx_no_carrier_errors;
677	gmacdev->stats.tx_fifo_errors += gstat->tx_underflow_errors;
678	gmacdev->stats.tx_window_errors += gstat->tx_late_collision_errors;
679
680	spin_unlock(&gmacdev->stats_lock);
681}
682
683/**
684 * NSS Driver interface APIs
685 */
686
687
688/**
689 * @brief Rx Callback to receive frames from NSS
690 * @param[in] pointer to net device context
691 * @param[in] pointer to skb
692 * @return Returns void
693 */
694void nss_gmac_receive(struct net_device *netdev, struct sk_buff *skb,
695						struct napi_struct *napi)
696{
697	struct nss_gmac_dev *gmacdev;
698#ifdef CONFIG_ETH_TM_ACCURATE_CONTROL
699	struct sk_buff *skb_send = NULL;
700	struct nlmsghdr * nlh = NULL;
701	struct msg_data sendata;
702	int nt_ret = 0;
703#endif
704
705	BUG_ON(netdev == NULL);
706
707	gmacdev = netdev_priv(netdev);
708
709	BUG_ON(gmacdev->netdev != netdev);
710
711	skb->dev = netdev;
712	skb->protocol = eth_type_trans(skb, netdev);
713	netdev_dbg(netdev,
714			"%s: Rx on gmac%d, packet len %d, CSUM %d\n",
715			__func__, gmacdev->macid, skb->len, skb->ip_summed);
716
717	if (need_blink_wan_led && gmacdev->phy_base == 4)
718		detect_eth_wan_data();
719
720#ifdef CONFIG_ETH_TM_ACCURATE_CONTROL
721	if (gmacdev->phy_base != 4)
722		goto skip_tm;
723
724	if (need_drop == 1 && ((skb->data[0]&0xf) == 0x00) && (skb->data[1] == 0x02)) {
725		kfree_skb(skb);
726		return;
727	}
728	if (tm_limit != TM_ETH_MAX_LIMIT)
729	{
730		if (((skb->data[0] & 0xf) == 0x00) && (skb->data[1] == 0x02))
731			counter_rx += skb->len;
732
733		if (eth_tm_dir == TM_ETH_BOTH_DIRECTION)
734			tmp_counter = counter_rx + counter_tx;
735		else
736			tmp_counter = counter_rx;
737
738		if (tmp_counter >= tm_limit && msg_recive == 1)
739		{
740			sendata.backpid = 0;
741			sendata.leftdata = 0;
742			sendata.tm_dir = 0;
743			msg_recive = 0;
744
745			skb_send = alloc_skb (NLMSG_SPACE (1024), GFP_KERNEL);
746			if (skb_send == NULL)
747			{
748				printk ("alloc skb failed\n");
749				return;
750			}
751
752			NETLINK_CB (skb_send).pid = 0;
753			NETLINK_CB (skb_send).dst_group = 1;
754
755			nlh = NLMSG_PUT (skb_send, 0, 0, 0, 1024);
756			memcpy (NLMSG_DATA (nlh), &sendata, sizeof (sendata));
757
758			if (eth_tm_dir != TM_ETH_CONTROL_ABOLISH)
759			{
760				nt_ret = netlink_broadcast (nl_sk, skb_send, 0, 1, GFP_KERNEL);
761				need_drop = 1;
762				printk ("Need Drop ...............\n");
763				if (nt_ret < 0)
764					printk ("KERNEL broadcast failed  %d ...\n", nt_ret);
765			}
766
767			counter_rx = 0;
768			counter_tx = 0;
769			tmp_counter = 0;
770			tm_limit = TM_ETH_MAX_LIMIT;
771nlmsg_failure:
772			counter_tx = 0;
773			tmp_counter = 0;
774			counter_rx = 0;
775			tm_limit = TM_ETH_MAX_LIMIT;
776		}
777	}
778skip_tm:
779#endif
780
781#ifdef CONFIG_PSSTHRGH
782                if(ipv6_pssthrgh_enable){
783			// packet from WAN side && it's ipv6 protocol
784                        if (ntohs(skb->protocol) == 0x86dd &&(!strcmp(skb->dev->name, "ethwan"))) {
785                                struct net_device_stats *pstats;
786                                struct net_device *passdev;
787
788                                //del vlan tag
789                               // skb_pull(skb,4);
790
791                                //memmove(skb->data - 14,
792                                //        skb->data - 18, 12);
793                                //skb->mac_header += 4;
794				//printk("*****do new packet from:%x\n", skb->data[1]& 0x0F);
795                                //del end ??
796                                if ((passdev = dev_get_by_name(&init_net,"pas0")) == NULL) {
797                                        printk("*** Can't find the virtual device 'pas0' ***\n");
798                                        goto CTN;
799                                }
800                                skb->dev = passdev;
801                                pstats = &(DNI_ENET_INFO(passdev)->dev_stats);
802                                passdev->last_rx = jiffies;
803
804                                pstats->rx_packets++;
805                                pstats->rx_bytes+= skb->len;
806                        }
807                }
808CTN:
809#endif
810	napi_gro_receive(napi, skb);
811}
812EXPORT_SYMBOL(nss_gmac_receive);
813
814
815/**
816 * @brief Receive events from nss-drv
817 * @param[in] pointer to net device context
818 * @param[in] event type
819 * @param[in] pointer to buffer
820 * @param[in] length of buffer
821 * @return Returns void
822 */
823void nss_gmac_event_receive(void *if_ctx, int ev_type,
824				void *os_buf, uint32_t len)
825{
826	struct net_device *netdev = NULL;
827	struct nss_gmac_dev *gmacdev = NULL;
828
829	netdev = (struct net_device *)if_ctx;
830	gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
831	BUG_ON(!gmacdev);
832
833	switch (ev_type) {
834	case NSS_GMAC_EVENT_STATS:
835		nss_gmac_stats_receive(gmacdev,
836				(struct nss_gmac_stats *)os_buf);
837		break;
838
839	default:
840		netdev_dbg(netdev, "%s: Unknown Event from NSS\n", __func__);
841		break;
842	}
843}
844EXPORT_SYMBOL(nss_gmac_event_receive);
845
846/**
847 * @brief Notify linkup event to NSS
848 * @param[in] pointer to gmac context
849 * @return Returns void.
850 */
851static void nss_notify_linkup(struct nss_gmac_dev *gmacdev)
852{
853	uint32_t link = 0;
854
855	if (!test_bit(__NSS_GMAC_UP, &gmacdev->flags))
856		return;
857
858	link = 0x1;
859	if (gmacdev->speed == SPEED_1000)
860		link |= 0x4;
861	else if (gmacdev->speed == SPEED_100)
862		link |= 0x2;
863
864	gmacdev->data_plane_ops->link_state(gmacdev->data_plane_ctx, link);
865}
866
867/**
868 * This function checks for completion of PHY init
869 * and proceeds to initialize mac based on parameters
870 * read from PHY registers. It indicates presence of carrier to OS.
871 * @param[in] pointer to gmac context
872 * @return Returns void.
873 */
874void nss_gmac_linkup(struct nss_gmac_dev *gmacdev)
875{
876	struct net_device *netdev = gmacdev->netdev;
877	uint32_t gmac_tx_desc = 0, gmac_rx_desc = 0;
878	uint32_t mode = NSS_GMAC_MODE0;
879
880#ifdef RUMI_EMULATION_SUPPORT
881	nss_gmac_spare_ctl(gmacdev);
882#endif
883
884	if (nss_gmac_check_phy_init(gmacdev) != 0) {
885		gmacdev->link_state = LINKDOWN;
886		return;
887	}
888
889	gmacdev->link_state = LINKUP;
890	if (nss_gmac_dev_set_speed(gmacdev) != 0)
891		return;
892
893	if (gmacdev->first_linkup_done == 0) {
894		nss_gmac_reset(gmacdev);
895		nss_gmac_disable_interrupt_all(gmacdev);
896		nss_gmac_clear_interrupt(gmacdev);
897
898		/* Program Tx/Rx descriptor base addresses */
899		nss_gmac_init_tx_desc_base(gmacdev);
900		nss_gmac_init_rx_desc_base(gmacdev);
901		nss_gmac_dma_bus_mode_init(gmacdev, dma_bus_mode_val);
902		nss_gmac_dma_axi_bus_mode_init(gmacdev, dma_axi_bus_mode_val);
903		nss_gmac_dma_control_init(gmacdev, dma_omr);
904		nss_gmac_disable_mmc_tx_interrupt(gmacdev, 0xFFFFFFFF);
905		nss_gmac_disable_mmc_rx_interrupt(gmacdev, 0xFFFFFFFF);
906		nss_gmac_disable_mmc_ipc_rx_interrupt(gmacdev, 0xFFFFFFFF);
907
908		/* Restore the Jumbo support settings as per corresponding
909		 * interface mtu
910		 */
911		nss_gmac_change_mtu(gmacdev->netdev, gmacdev->netdev->mtu);
912		gmacdev->first_linkup_done = 1;
913	}
914
915	nss_gmac_mac_init(gmacdev);
916
917	if (gmacdev->data_plane_ops->open(gmacdev->data_plane_ctx, gmac_tx_desc,
918				gmac_rx_desc, mode) != NSS_GMAC_SUCCESS) {
919		netdev_dbg(netdev, "%s: data plane open command un-successful\n",
920								__func__);
921		gmacdev->link_state = LINKDOWN;
922		return;
923	}
924	netdev_dbg(netdev, "%s: data plane open command successfully issued\n",
925								__func__);
926
927	nss_notify_linkup(gmacdev);
928
929	netif_carrier_on(netdev);
930}
931
932
933/**
934 * Save current state of link and
935 * indicate absence of carrier to OS.
936 * @param[in] nss_gmac_dev *
937 * @return Returns void.
938 */
939void nss_gmac_linkdown(struct nss_gmac_dev *gmacdev)
940{
941	struct net_device *netdev = gmacdev->netdev;
942
943	netdev_info(netdev, "Link down\n");
944
945	if (test_bit(__NSS_GMAC_UP, &gmacdev->flags)) {
946		netif_carrier_off(netdev);
947
948		gmacdev->data_plane_ops->link_state(gmacdev->data_plane_ctx, 0);
949	}
950	gmacdev->link_state = LINKDOWN;
951	gmacdev->duplex_mode = 0;
952	gmacdev->speed = 0;
953}
954
955
956/**
957 * @brief Link state change callback
958 * @param[in] struct net_device *
959 * @return Returns void.
960 */
961void nss_gmac_adjust_link(struct net_device *netdev)
962{
963	int32_t status = 0;
964	struct nss_gmac_dev *gmacdev = NULL;
965
966	gmacdev = netdev_priv(netdev);
967
968	if (!test_bit(__NSS_GMAC_UP, &gmacdev->flags))
969		return;
970
971	status = nss_gmac_check_link(gmacdev);
972	mutex_lock(&gmacdev->link_mutex);
973	if (status == LINKUP && gmacdev->link_state == LINKDOWN)
974		nss_gmac_linkup(gmacdev);
975	else if (status == LINKDOWN && gmacdev->link_state == LINKUP) {
976		/*
977		 * Process a link down notification only if
978		 * link polling is enabled via private flags.
979		 */
980		if (gmacdev->drv_flags & NSS_GMAC_PRIV_FLAG(LINKPOLL)) {
981			nss_gmac_linkdown(gmacdev);
982		}
983	}
984	mutex_unlock(&gmacdev->link_mutex);
985}
986
987void nss_gmac_start_up(struct nss_gmac_dev *gmacdev)
988{
989	if (test_bit(__NSS_GMAC_LINKPOLL, &gmacdev->flags)) {
990		if (!IS_ERR(gmacdev->phydev)) {
991			netdev_dbg(gmacdev->netdev, "%s: start phy 0x%x\n",
992					__func__, gmacdev->phydev->phy_id);
993			phy_start(gmacdev->phydev);
994			phy_start_aneg(gmacdev->phydev);
995		} else {
996			netdev_dbg(gmacdev->netdev, "%s: Invalid PHY device for a link polled interface\n",
997								__func__);
998		}
999		return;
1000	}
1001	netdev_dbg(gmacdev->netdev, "%s: Force link up\n", __func__);
1002	/*
1003	 * Force link up if link polling is disabled
1004	 */
1005	mutex_lock(&gmacdev->link_mutex);
1006	nss_gmac_linkup(gmacdev);
1007	mutex_unlock(&gmacdev->link_mutex);
1008}
1009
1010/**
1011 * @brief Function to transmit a given packet on the wire.
1012 *
1013 * Whenever Linux Kernel has a packet ready to be transmitted, this function is
1014 * called.
1015 * The function prepares a packet and prepares the descriptor and
1016 * enables/resumes the transmission.
1017 * @param[in] pointer to sk_buff structure.
1018 * @param[in] pointer to net_device structure.
1019 * @return NETDEV_TX_xxx
1020 */
1021int32_t nss_gmac_xmit_frames(struct sk_buff *skb, struct net_device *netdev)
1022{
1023	int msg_status = 0;
1024	struct nss_gmac_dev *gmacdev = NULL;
1025
1026	BUG_ON(skb == NULL);
1027	if (skb->len < ETH_HLEN) {
1028		netdev_dbg(netdev, "%s: skb->len < ETH_HLEN\n", __func__);
1029		goto drop;
1030	}
1031
1032	gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1033	BUG_ON(gmacdev == NULL);
1034	BUG_ON(gmacdev->netdev != netdev);
1035
1036	netdev_dbg(netdev, "%s:Tx packet, len %d, CSUM %d\n",
1037			__func__, skb->len, skb->ip_summed);
1038
1039	if (need_blink_wan_led && gmacdev->phy_base == 4)
1040		detect_eth_wan_data();
1041
1042#ifdef CONFIG_ETH_TM_ACCURATE_CONTROL
1043	if (gmacdev->phy_base != 4)
1044		goto skip_tm;
1045	if (need_drop == 1 && ((skb->data[14]&0xf) == 0x00) && (skb->data[15] == 0x02)) {
1046		goto drop;
1047	}
1048	if(tm_limit != TM_ETH_MAX_LIMIT)
1049		if (((skb->data[14]&0xf) == 0x00) && (skb->data[15] == 0x02))
1050			counter_tx += skb->len;
1051skip_tm:
1052#endif
1053
1054	msg_status = gmacdev->data_plane_ops->xmit(gmacdev->data_plane_ctx, skb);
1055
1056	if (likely(msg_status == NSS_GMAC_SUCCESS))
1057		return NETDEV_TX_OK;
1058
1059drop:
1060	netdev_dbg(netdev, "%s: dropping skb\n", __func__);
1061	dev_kfree_skb_any(skb);
1062	netdev->stats.tx_dropped++;
1063
1064	return NETDEV_TX_OK;
1065}
1066
1067/**
1068 * @brief Function used when the interface is opened for use.
1069 *
1070 * We register nss_gmac_open function to linux open(). Basically this
1071 * function prepares the the device for operation. This function is called
1072 * whenever ifconfig (in Linux) activates the device (for example
1073 * "ifconfig eth0 up"). This function registers system resources needed.
1074 *	- Disables interrupts
1075 *	- Starts Linux network queue interface
1076 *	- Checks for NSS init completion and determines initial link status
1077 *	- Starts timer to detect cable plug/unplug
1078 * @param[in] pointer to net_device structure.
1079 * @return Returns 0 on success and error status upon failure.
1080 */
1081int nss_gmac_open(struct net_device *netdev)
1082{
1083	struct device *dev = NULL;
1084	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1085	struct nss_gmac_global_ctx *ctx = NULL;
1086	int err;
1087
1088	if (!gmacdev)
1089		return -EINVAL;
1090
1091	dev = &netdev->dev;
1092	ctx = gmacdev->ctx;
1093
1094	netif_carrier_off(netdev);
1095
1096	/* Disable interrupts */
1097	nss_gmac_disable_interrupt_all(gmacdev);
1098
1099	if (!gmacdev->data_plane_ops) {
1100		netdev_dbg(netdev, "%s: offload is not enabled, bring up gmac with slowpath\n",
1101								__func__);
1102
1103		netif_napi_add(netdev, &gmacdev->napi, nss_gmac_poll,
1104							NSS_GMAC_NAPI_BUDGET);
1105		/* Initial the RX/TX ring */
1106		dma_set_coherent_mask(dev, 0xffffffff);
1107		nss_gmac_setup_rx_desc_queue(gmacdev, dev,
1108					NSS_GMAC_RX_DESC_SIZE, RINGMODE);
1109		nss_gmac_setup_tx_desc_queue(gmacdev, dev,
1110					NSS_GMAC_TX_DESC_SIZE, RINGMODE);
1111		nss_gmac_rx_refill(gmacdev);
1112
1113		/* Register IRQ */
1114		err = request_irq(netdev->irq, nss_gmac_handle_irq,
1115					IRQF_DISABLED, "nss-gmac", gmacdev);
1116		if (err) {
1117			netdev_dbg(netdev, "Mac %d IRQ %d request failed\n",
1118						gmacdev->macid, netdev->irq);
1119			return err;
1120		}
1121
1122		gmacdev->data_plane_ops = &nss_gmac_slowpath_ops;
1123		gmacdev->data_plane_ctx = gmacdev->netdev;
1124	}
1125
1126	/**
1127	 * Now platform dependent initialization.
1128	 */
1129	gmacdev->speed = SPEED_100;
1130	gmacdev->duplex_mode = DUPLEX_FULL;
1131
1132	/**
1133	 * Lets read the version of ip in to device structure
1134	 */
1135	nss_gmac_read_version(gmacdev);
1136
1137	/*
1138	 * Inform the Linux Networking stack about the hardware
1139	 * capability of checksum offloading and other features.
1140	 * Each data_plane is responsible to maintain the feature set it supports
1141	 */
1142	gmacdev->data_plane_ops->set_features(netdev);
1143
1144	/**
1145	 * Set GMAC state to UP before link state is checked
1146	 */
1147	set_bit(__NSS_GMAC_UP, &gmacdev->flags);
1148	netif_start_queue(netdev);
1149
1150	gmacdev->link_state = LINKDOWN;
1151
1152	nss_gmac_start_up(gmacdev);
1153
1154	gmacdev->data_plane_ops->mac_addr(gmacdev->data_plane_ctx,
1155					(uint8_t *)gmacdev->netdev->dev_addr);
1156
1157	return 0;
1158}
1159
1160/**
1161 * @brief Function used when the interface is closed.
1162 *
1163 * This function is registered to linux stop() function. This function is
1164 * called whenever ifconfig (in Linux) closes the device (for example
1165 * "ifconfig eth0 down"). This releases all the system resources allocated
1166 * during open call.
1167 *	- Disable the device interrupts
1168 *	- Send a link change event to NSS GMAC driver.
1169 *	- Stop the Linux network queue interface
1170 *	- Cancel timer rgistered for cable plug/unplug tracking
1171 * @param[in] pointer to net_device structure.
1172 * @return Returns 0 on success and error status upon failure.
1173 */
1174int nss_gmac_close(struct net_device *netdev)
1175{
1176	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1177
1178	if (!gmacdev)
1179		return -EINVAL;
1180
1181	WARN_ON(!test_bit(__NSS_GMAC_UP, &gmacdev->flags));
1182
1183	set_bit(__NSS_GMAC_CLOSING, &gmacdev->flags);
1184
1185	netif_stop_queue(netdev);
1186	netif_carrier_off(netdev);
1187
1188	nss_gmac_rx_disable(gmacdev);
1189	nss_gmac_tx_disable(gmacdev);
1190
1191	nss_gmac_disable_interrupt_all(gmacdev);
1192	gmacdev->data_plane_ops->link_state(gmacdev->data_plane_ctx, 0);
1193
1194	if (!IS_ERR(gmacdev->phydev))
1195		phy_stop(gmacdev->phydev);
1196
1197	clear_bit(__NSS_GMAC_UP, &gmacdev->flags);
1198	clear_bit(__NSS_GMAC_CLOSING, &gmacdev->flags);
1199
1200	gmacdev->data_plane_ops->close(gmacdev->data_plane_ctx);
1201
1202	return 0;
1203}
1204
1205/**
1206 * @brief Function to handle a Tx Hang.
1207 * This is a software hook (Linux) to handle transmitter hang if any.
1208 * @param[in] pointer to net_device structure
1209 * @return void.
1210 */
1211void nss_gmac_tx_timeout(struct net_device *netdev)
1212{
1213	struct nss_gmac_dev *gmacdev = NULL;
1214
1215	gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1216	BUG_ON(gmacdev == NULL);
1217
1218	netif_carrier_off(netdev);
1219	nss_gmac_disable_dma_tx(gmacdev);
1220	nss_gmac_flush_tx_fifo(gmacdev);
1221	nss_gmac_enable_dma_tx(gmacdev);
1222	netif_carrier_on(netdev);
1223	netif_start_queue(netdev);
1224}
1225
1226
1227/**
1228 * @brief Function to change the Maximum Transfer Unit.
1229 * @param[in] pointer to net_device structure.
1230 * @param[in] New value for maximum frame size.
1231 * @return Returns 0 on success Errorcode on failure.
1232 */
1233int32_t nss_gmac_change_mtu(struct net_device *netdev, int32_t newmtu)
1234{
1235	struct nss_gmac_dev *gmacdev = NULL;
1236
1237	gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1238	if (!gmacdev)
1239		return -EINVAL;
1240
1241	if (newmtu > NSS_GMAC_JUMBO_MTU)
1242		return -EINVAL;
1243
1244	if (gmacdev->data_plane_ops->change_mtu(gmacdev->data_plane_ctx, newmtu)
1245							 != NSS_GMAC_SUCCESS)
1246		return -EAGAIN;
1247
1248	if (newmtu <= NSS_GMAC_NORMAL_FRAME_MTU) {
1249		nss_gmac_jumbo_frame_disable(gmacdev);
1250		nss_gmac_twokpe_frame_disable(gmacdev);
1251	} else if (newmtu <= NSS_GMAC_MINI_JUMBO_FRAME_MTU) {
1252		nss_gmac_jumbo_frame_disable(gmacdev);
1253		nss_gmac_twokpe_frame_enable(gmacdev);
1254	} else if (newmtu <= NSS_GMAC_FULL_JUMBO_FRAME_MTU) {
1255		nss_gmac_jumbo_frame_enable(gmacdev);
1256	}
1257
1258	netdev->mtu = newmtu;
1259	return 0;
1260}
1261
1262/*
1263 * nss_gmac_is_in_open_state()
1264 *	Return if a gmac is opened or not
1265 */
1266bool nss_gmac_is_in_open_state(struct net_device *netdev)
1267{
1268	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1269
1270	if (test_bit(__NSS_GMAC_UP, &gmacdev->flags))
1271		return true;
1272	return false;
1273}
1274EXPORT_SYMBOL(nss_gmac_is_in_open_state);
1275
1276/*
1277 * nss_gmac_reset_netdev_features()
1278 *	Resets the netdev features
1279 */
1280static inline void nss_gmac_reset_netdev_features(struct net_device *netdev)
1281{
1282	netdev->features = 0;
1283	netdev->hw_features = 0;
1284	netdev->vlan_features = 0;
1285	netdev->wanted_features = 0;
1286}
1287
1288/*
1289 * nss_gmac_override_data_palne()
1290 * @param[netdev] netdev instance that is going to register
1291 * @param[dp_ops] dataplan ops for chaning mac addr/mtu/link status
1292 * @param[ctx] passing the ctx of this nss_phy_if to gmac
1293 *
1294 * @return Return SUCCESS or FAILURE
1295 */
1296int nss_gmac_override_data_plane(struct net_device *netdev,
1297				struct nss_gmac_data_plane_ops *dp_ops,
1298				void *ctx)
1299{
1300	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1301
1302	BUG_ON(!gmacdev);
1303
1304	if (!dp_ops->open || !dp_ops->close || !dp_ops->link_state
1305		|| !dp_ops->mac_addr || !dp_ops->change_mtu || !dp_ops->xmit || !dp_ops->set_features) {
1306		netdev_dbg(netdev, "%s: All the op functions must be present, reject this registeration\n",
1307								__func__);
1308		return NSS_GMAC_FAILURE;
1309	}
1310
1311	/*
1312	 * If this gmac is up, close the netdev to force TX/RX stop, and also reset the features
1313	 */
1314	if (test_bit(__NSS_GMAC_UP, &gmacdev->flags)) {
1315		nss_gmac_close(netdev);
1316		nss_gmac_reset_netdev_features(netdev);
1317	}
1318
1319	/* Recored the data_plane_ctx, data_plane_ops */
1320	gmacdev->data_plane_ctx = ctx;
1321	gmacdev->data_plane_ops = dp_ops;
1322	gmacdev->first_linkup_done = 0;
1323
1324	return NSS_GMAC_SUCCESS;
1325}
1326EXPORT_SYMBOL(nss_gmac_override_data_plane);
1327
1328/*
1329 * nss_gmac_start_data_plane()
1330 *	Data plane to inform netdev it is ready to start
1331 * @param[netdev] net_device context
1332 * @param[ctx] context of the data plane
1333 */
1334void nss_gmac_start_data_plane(struct net_device *netdev, void *ctx)
1335{
1336	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1337	struct nss_gmac_global_ctx *global_ctx = gmacdev->ctx;
1338
1339	if (test_bit(__NSS_GMAC_UP, &gmacdev->flags)) {
1340		netdev_dbg(netdev, "This netdev already up, something is wrong\n");
1341		return;
1342	}
1343	if (gmacdev->data_plane_ctx == ctx) {
1344		netdev_dbg(netdev, "Data plane cookie matches, let's start the netdev again\n");
1345		queue_delayed_work(global_ctx->gmac_workqueue,
1346				&gmacdev->gmacwork, NSS_GMAC_LINK_CHECK_TIME);
1347	}
1348}
1349EXPORT_SYMBOL(nss_gmac_start_data_plane);
1350
1351/*
1352 * nss_gmac_restore_data_plane()
1353 *
1354 * @param[netdev] The netdev to be restored to slowpath
1355 */
1356void nss_gmac_restore_data_plane(struct net_device *netdev)
1357{
1358	struct nss_gmac_dev *gmacdev = (struct nss_gmac_dev *)netdev_priv(netdev);
1359
1360	/*
1361	 * If this gmac is up, close the netdev to force TX/RX stop, and also reset the features
1362	 */
1363	if (test_bit(__NSS_GMAC_UP, &gmacdev->flags)) {
1364		nss_gmac_close(netdev);
1365		nss_gmac_reset_netdev_features(netdev);
1366	}
1367	gmacdev->data_plane_ctx = netdev;
1368	gmacdev->data_plane_ops = &nss_gmac_slowpath_ops;
1369}
1370EXPORT_SYMBOL(nss_gmac_restore_data_plane);
1371
1372/*
1373 * nss_gmac_get_netdev_by_macid()
1374 *	return the net device of the corrsponding macid if exist
1375 */
1376struct net_device *nss_gmac_get_netdev_by_macid(int macid)
1377{
1378	struct nss_gmac_dev *gmacdev = ctx.nss_gmac[macid];
1379
1380	if (!gmacdev)
1381		return NULL;
1382	return gmacdev->netdev;
1383}
1384EXPORT_SYMBOL(nss_gmac_get_netdev_by_macid);
1385
1386/*
1387 * nss_gmac_open_work()
1388 *	Schedule delayed work to open the netdev again
1389 */
1390void nss_gmac_open_work(struct work_struct *work)
1391{
1392	struct nss_gmac_dev *gmacdev = container_of(to_delayed_work(work),
1393						struct nss_gmac_dev, gmacwork);
1394
1395	netdev_dbg(gmacdev->netdev, "Do the network up in delayed queue %s\n",
1396							gmacdev->netdev->name);
1397	nss_gmac_open(gmacdev->netdev);
1398}
1399