• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/alpha/lib/
1/*
2 * arch/alpha/lib/checksum.c
3 *
4 * This file contains network checksum routines that are better done
5 * in an architecture-specific manner due to speed..
6 * Comments in other versions indicate that the algorithms are from RFC1071
7 *
8 * accelerated versions (and 21264 assembly versions ) contributed by
9 *	Rick Gorton	<rick.gorton@alpha-processor.com>
10 */
11
12#include <linux/module.h>
13#include <linux/string.h>
14
15#include <asm/byteorder.h>
16
17static inline unsigned short from64to16(unsigned long x)
18{
19	/* Using extract instructions is a bit more efficient
20	   than the original shift/bitmask version.  */
21
22	union {
23		unsigned long	ul;
24		unsigned int	ui[2];
25		unsigned short	us[4];
26	} in_v, tmp_v, out_v;
27
28	in_v.ul = x;
29	tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1];
30
31	/* Since the bits of tmp_v.sh[3] are going to always be zero,
32	   we don't have to bother to add that in.  */
33	out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1]
34			+ (unsigned long) tmp_v.us[2];
35
36	/* Similarly, out_v.us[2] is always zero for the final add.  */
37	return out_v.us[0] + out_v.us[1];
38}
39
40/*
41 * computes the checksum of the TCP/UDP pseudo-header
42 * returns a 16-bit checksum, already complemented.
43 */
44__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
45				   unsigned short len,
46				   unsigned short proto,
47				   __wsum sum)
48{
49	return (__force __sum16)~from64to16(
50		(__force u64)saddr + (__force u64)daddr +
51		(__force u64)sum + ((len + proto) << 8));
52}
53
54__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
55				   unsigned short len,
56				   unsigned short proto,
57				   __wsum sum)
58{
59	unsigned long result;
60
61	result = (__force u64)saddr + (__force u64)daddr +
62		 (__force u64)sum + ((len + proto) << 8);
63
64	/* Fold down to 32-bits so we don't lose in the typedef-less
65	   network stack.  */
66	/* 64 to 33 */
67	result = (result & 0xffffffff) + (result >> 32);
68	/* 33 to 32 */
69	result = (result & 0xffffffff) + (result >> 32);
70	return (__force __wsum)result;
71}
72EXPORT_SYMBOL(csum_tcpudp_nofold);
73
74/*
75 * Do a 64-bit checksum on an arbitrary memory area..
76 *
77 * This isn't a great routine, but it's not _horrible_ either. The
78 * inner loop could be unrolled a bit further, and there are better
79 * ways to do the carry, but this is reasonable.
80 */
81static inline unsigned long do_csum(const unsigned char * buff, int len)
82{
83	int odd, count;
84	unsigned long result = 0;
85
86	if (len <= 0)
87		goto out;
88	odd = 1 & (unsigned long) buff;
89	if (odd) {
90		result = *buff << 8;
91		len--;
92		buff++;
93	}
94	count = len >> 1;		/* nr of 16-bit words.. */
95	if (count) {
96		if (2 & (unsigned long) buff) {
97			result += *(unsigned short *) buff;
98			count--;
99			len -= 2;
100			buff += 2;
101		}
102		count >>= 1;		/* nr of 32-bit words.. */
103		if (count) {
104			if (4 & (unsigned long) buff) {
105				result += *(unsigned int *) buff;
106				count--;
107				len -= 4;
108				buff += 4;
109			}
110			count >>= 1;	/* nr of 64-bit words.. */
111			if (count) {
112				unsigned long carry = 0;
113				do {
114					unsigned long w = *(unsigned long *) buff;
115					count--;
116					buff += 8;
117					result += carry;
118					result += w;
119					carry = (w > result);
120				} while (count);
121				result += carry;
122				result = (result & 0xffffffff) + (result >> 32);
123			}
124			if (len & 4) {
125				result += *(unsigned int *) buff;
126				buff += 4;
127			}
128		}
129		if (len & 2) {
130			result += *(unsigned short *) buff;
131			buff += 2;
132		}
133	}
134	if (len & 1)
135		result += *buff;
136	result = from64to16(result);
137	if (odd)
138		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
139out:
140	return result;
141}
142
143/*
144 *	This is a version of ip_compute_csum() optimized for IP headers,
145 *	which always checksum on 4 octet boundaries.
146 */
147__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
148{
149	return (__force __sum16)~do_csum(iph,ihl*4);
150}
151
152/*
153 * computes the checksum of a memory block at buff, length len,
154 * and adds in "sum" (32-bit)
155 *
156 * returns a 32-bit number suitable for feeding into itself
157 * or csum_tcpudp_magic
158 *
159 * this function must be called with even lengths, except
160 * for the last fragment, which may be odd
161 *
162 * it's best to have buff aligned on a 32-bit boundary
163 */
164__wsum csum_partial(const void *buff, int len, __wsum sum)
165{
166	unsigned long result = do_csum(buff, len);
167
168	/* add in old sum, and carry.. */
169	result += (__force u32)sum;
170	/* 32+c bits -> 32 bits */
171	result = (result & 0xffffffff) + (result >> 32);
172	return (__force __wsum)result;
173}
174
175EXPORT_SYMBOL(csum_partial);
176
177/*
178 * this routine is used for miscellaneous IP-like checksums, mainly
179 * in icmp.c
180 */
181__sum16 ip_compute_csum(const void *buff, int len)
182{
183	return (__force __sum16)~from64to16(do_csum(buff,len));
184}
185