• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/stmmac/
1/*******************************************************************************
2  This contains the functions to handle the enhanced descriptors.
3
4  Copyright (C) 2007-2009  STMicroelectronics Ltd
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "common.h"
26
27static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28				  struct dma_desc *p, unsigned long ioaddr)
29{
30	int ret = 0;
31	struct net_device_stats *stats = (struct net_device_stats *)data;
32
33	if (unlikely(p->des01.etx.error_summary)) {
34		CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
35		if (unlikely(p->des01.etx.jabber_timeout)) {
36			CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
37			x->tx_jabber++;
38		}
39
40		if (unlikely(p->des01.etx.frame_flushed)) {
41			CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
42			x->tx_frame_flushed++;
43			dwmac_dma_flush_tx_fifo(ioaddr);
44		}
45
46		if (unlikely(p->des01.etx.loss_carrier)) {
47			CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
48			x->tx_losscarrier++;
49			stats->tx_carrier_errors++;
50		}
51		if (unlikely(p->des01.etx.no_carrier)) {
52			CHIP_DBG(KERN_ERR "\tno_carrier error\n");
53			x->tx_carrier++;
54			stats->tx_carrier_errors++;
55		}
56		if (unlikely(p->des01.etx.late_collision)) {
57			CHIP_DBG(KERN_ERR "\tlate_collision error\n");
58			stats->collisions += p->des01.etx.collision_count;
59		}
60		if (unlikely(p->des01.etx.excessive_collisions)) {
61			CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
62			stats->collisions += p->des01.etx.collision_count;
63		}
64		if (unlikely(p->des01.etx.excessive_deferral)) {
65			CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
66			x->tx_deferred++;
67		}
68
69		if (unlikely(p->des01.etx.underflow_error)) {
70			CHIP_DBG(KERN_ERR "\tunderflow error\n");
71			dwmac_dma_flush_tx_fifo(ioaddr);
72			x->tx_underflow++;
73		}
74
75		if (unlikely(p->des01.etx.ip_header_error)) {
76			CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
77			x->tx_ip_header_error++;
78		}
79
80		if (unlikely(p->des01.etx.payload_error)) {
81			CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
82			x->tx_payload_error++;
83			dwmac_dma_flush_tx_fifo(ioaddr);
84		}
85
86		ret = -1;
87	}
88
89	if (unlikely(p->des01.etx.deferred)) {
90		CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
91		x->tx_deferred++;
92	}
93#ifdef STMMAC_VLAN_TAG_USED
94	if (p->des01.etx.vlan_frame) {
95		CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
96		x->tx_vlan++;
97	}
98#endif
99
100	return ret;
101}
102
103static int enh_desc_get_tx_len(struct dma_desc *p)
104{
105	return p->des01.etx.buffer1_size;
106}
107
108static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
109{
110	int ret = good_frame;
111	u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
112
113	/* bits 5 7 0 | Frame status
114	 * ----------------------------------------------------------
115	 *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
116	 *      1 0 0 | IPv4/6 No CSUM errorS.
117	 *      1 0 1 | IPv4/6 CSUM PAYLOAD error
118	 *      1 1 0 | IPv4/6 CSUM IP HR error
119	 *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
120	 *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
121	 *      0 1 1 | COE bypassed.. no IPv4/6 frame
122	 *      0 1 0 | Reserved.
123	 */
124	if (status == 0x0) {
125		CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
126		ret = llc_snap;
127	} else if (status == 0x4) {
128		CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
129		ret = good_frame;
130	} else if (status == 0x5) {
131		CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
132		ret = csum_none;
133	} else if (status == 0x6) {
134		CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
135		ret = csum_none;
136	} else if (status == 0x7) {
137		CHIP_DBG(KERN_ERR
138		    "RX Des0 status: IPv4/6 Header and Payload Error.\n");
139		ret = csum_none;
140	} else if (status == 0x1) {
141		CHIP_DBG(KERN_ERR
142		    "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
143		ret = discard_frame;
144	} else if (status == 0x3) {
145		CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
146		ret = discard_frame;
147	}
148	return ret;
149}
150
151static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
152				  struct dma_desc *p)
153{
154	int ret = good_frame;
155	struct net_device_stats *stats = (struct net_device_stats *)data;
156
157	if (unlikely(p->des01.erx.error_summary)) {
158		CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
159				  p->des01.erx);
160		if (unlikely(p->des01.erx.descriptor_error)) {
161			CHIP_DBG(KERN_ERR "\tdescriptor error\n");
162			x->rx_desc++;
163			stats->rx_length_errors++;
164		}
165		if (unlikely(p->des01.erx.overflow_error)) {
166			CHIP_DBG(KERN_ERR "\toverflow error\n");
167			x->rx_gmac_overflow++;
168		}
169
170		if (unlikely(p->des01.erx.ipc_csum_error))
171			CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
172
173		if (unlikely(p->des01.erx.late_collision)) {
174			CHIP_DBG(KERN_ERR "\tlate_collision error\n");
175			stats->collisions++;
176			stats->collisions++;
177		}
178		if (unlikely(p->des01.erx.receive_watchdog)) {
179			CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
180			x->rx_watchdog++;
181		}
182		if (unlikely(p->des01.erx.error_gmii)) {
183			CHIP_DBG(KERN_ERR "\tReceive Error\n");
184			x->rx_mii++;
185		}
186		if (unlikely(p->des01.erx.crc_error)) {
187			CHIP_DBG(KERN_ERR "\tCRC error\n");
188			x->rx_crc++;
189			stats->rx_crc_errors++;
190		}
191		ret = discard_frame;
192	}
193
194	/* After a payload csum error, the ES bit is set.
195	 * It doesn't match with the information reported into the databook.
196	 * At any rate, we need to understand if the CSUM hw computation is ok
197	 * and report this info to the upper layers. */
198	ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
199		p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
200
201	if (unlikely(p->des01.erx.dribbling)) {
202		CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
203		ret = discard_frame;
204	}
205	if (unlikely(p->des01.erx.sa_filter_fail)) {
206		CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
207		x->sa_rx_filter_fail++;
208		ret = discard_frame;
209	}
210	if (unlikely(p->des01.erx.da_filter_fail)) {
211		CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
212		x->da_rx_filter_fail++;
213		ret = discard_frame;
214	}
215	if (unlikely(p->des01.erx.length_error)) {
216		CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
217		x->rx_length++;
218		ret = discard_frame;
219	}
220#ifdef STMMAC_VLAN_TAG_USED
221	if (p->des01.erx.vlan_tag) {
222		CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
223		x->rx_vlan++;
224	}
225#endif
226	return ret;
227}
228
229static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
230				  int disable_rx_ic)
231{
232	int i;
233	for (i = 0; i < ring_size; i++) {
234		p->des01.erx.own = 1;
235		p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
236		/* To support jumbo frames */
237		p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
238		if (i == ring_size - 1)
239			p->des01.erx.end_ring = 1;
240		if (disable_rx_ic)
241			p->des01.erx.disable_ic = 1;
242		p++;
243	}
244}
245
246static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
247{
248	int i;
249
250	for (i = 0; i < ring_size; i++) {
251		p->des01.etx.own = 0;
252		if (i == ring_size - 1)
253			p->des01.etx.end_ring = 1;
254		p++;
255	}
256}
257
258static int enh_desc_get_tx_owner(struct dma_desc *p)
259{
260	return p->des01.etx.own;
261}
262
263static int enh_desc_get_rx_owner(struct dma_desc *p)
264{
265	return p->des01.erx.own;
266}
267
268static void enh_desc_set_tx_owner(struct dma_desc *p)
269{
270	p->des01.etx.own = 1;
271}
272
273static void enh_desc_set_rx_owner(struct dma_desc *p)
274{
275	p->des01.erx.own = 1;
276}
277
278static int enh_desc_get_tx_ls(struct dma_desc *p)
279{
280	return p->des01.etx.last_segment;
281}
282
283static void enh_desc_release_tx_desc(struct dma_desc *p)
284{
285	int ter = p->des01.etx.end_ring;
286
287	memset(p, 0, sizeof(struct dma_desc));
288	p->des01.etx.end_ring = ter;
289}
290
291static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
292				     int csum_flag)
293{
294	p->des01.etx.first_segment = is_fs;
295	if (unlikely(len > BUF_SIZE_4KiB)) {
296		p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
297		p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
298	} else {
299		p->des01.etx.buffer1_size = len;
300	}
301	if (likely(csum_flag))
302		p->des01.etx.checksum_insertion = cic_full;
303}
304
305static void enh_desc_clear_tx_ic(struct dma_desc *p)
306{
307	p->des01.etx.interrupt = 0;
308}
309
310static void enh_desc_close_tx_desc(struct dma_desc *p)
311{
312	p->des01.etx.last_segment = 1;
313	p->des01.etx.interrupt = 1;
314}
315
316static int enh_desc_get_rx_frame_len(struct dma_desc *p)
317{
318	return p->des01.erx.frame_length;
319}
320
321struct stmmac_desc_ops enh_desc_ops = {
322	.tx_status = enh_desc_get_tx_status,
323	.rx_status = enh_desc_get_rx_status,
324	.get_tx_len = enh_desc_get_tx_len,
325	.init_rx_desc = enh_desc_init_rx_desc,
326	.init_tx_desc = enh_desc_init_tx_desc,
327	.get_tx_owner = enh_desc_get_tx_owner,
328	.get_rx_owner = enh_desc_get_rx_owner,
329	.release_tx_desc = enh_desc_release_tx_desc,
330	.prepare_tx_desc = enh_desc_prepare_tx_desc,
331	.clear_tx_ic = enh_desc_clear_tx_ic,
332	.close_tx_desc = enh_desc_close_tx_desc,
333	.get_tx_ls = enh_desc_get_tx_ls,
334	.set_tx_owner = enh_desc_set_tx_owner,
335	.set_rx_owner = enh_desc_set_rx_owner,
336	.get_rx_frame_len = enh_desc_get_rx_frame_len,
337};
338