1#ifndef _S390_CHECKSUM_H
2#define _S390_CHECKSUM_H
3
4/*
5 *  include/asm-s390/checksum.h
6 *    S390 fast network checksum routines
7 *    see also arch/S390/lib/checksum.c
8 *
9 *  S390 version
10 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
11 *    Author(s): Ulrich Hild        (first version)
12 *               Martin Schwidefsky (heavily optimized CKSM version)
13 *               D.J. Barrow        (third attempt)
14 */
15
16#include <asm/uaccess.h>
17
18/*
19 * computes the checksum of a memory block at buff, length len,
20 * and adds in "sum" (32-bit)
21 *
22 * returns a 32-bit number suitable for feeding into itself
23 * or csum_tcpudp_magic
24 *
25 * this function must be called with even lengths, except
26 * for the last fragment, which may be odd
27 *
28 * it's best to have buff aligned on a 32-bit boundary
29 */
30unsigned int
31csum_partial(const unsigned char * buff, int len, unsigned int sum);
32
33/*
34 * csum_partial as an inline function
35 */
36extern inline unsigned int
37csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
38{
39	register_pair rp;
40
41	rp.subreg.even = (unsigned long) buff;
42	rp.subreg.odd = (unsigned long) len;
43	__asm__ __volatile__ (
44		"0:  cksm %0,%1\n"    /* do checksum on longs */
45		"    jo   0b\n"
46                : "+&d" (sum), "+&a" (rp) : : "cc" );
47	return sum;
48}
49
50/*
51 * the same as csum_partial, but copies from src while it
52 * checksums
53 *
54 * here even more important to align src and dst on a 32-bit (or even
55 * better 64-bit) boundary
56 */
57
58extern inline unsigned int
59csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
60{
61	memcpy(dst,src,len);
62        return csum_partial_inline(dst, len, sum);
63}
64
65/*
66 * the same as csum_partial_copy, but copies from user space.
67 *
68 * here even more important to align src and dst on a 32-bit (or even
69 * better 64-bit) boundary
70 *
71 * Copy from userspace and compute checksum.  If we catch an exception
72 * then zero the rest of the buffer.
73 */
74extern inline unsigned int
75csum_partial_copy_from_user (const char *src, char *dst,
76                                          int len, unsigned int sum,
77                                          int *err_ptr)
78{
79	int missing;
80
81	missing = copy_from_user(dst, src, len);
82	if (missing) {
83		memset(dst + len - missing, 0, missing);
84		*err_ptr = -EFAULT;
85	}
86
87	return csum_partial(dst, len, sum);
88}
89
90
91extern inline unsigned int
92csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
93{
94        memcpy(dst,src,len);
95        return csum_partial_inline(dst, len, sum);
96}
97
98/*
99 *      Fold a partial checksum without adding pseudo headers
100 */
101unsigned short csum_fold(unsigned int sum);
102
103/*
104 *	This is a version of ip_compute_csum() optimized for IP headers,
105 *	which always checksum on 4 octet boundaries.
106 *
107 */
108extern inline unsigned short
109ip_fast_csum(unsigned char *iph, unsigned int ihl)
110{
111	register_pair rp;
112	unsigned long sum;
113
114	rp.subreg.even = (unsigned long) iph;
115	rp.subreg.odd = (unsigned long) ihl*4;
116        __asm__ __volatile__ (
117		"    sr   %0,%0\n"   /* set sum to zero */
118                "0:  cksm %0,%1\n"   /* do checksum on longs */
119                "    jo   0b\n"
120                : "=&d" (sum), "+&a" (rp) : : "cc" );
121        return csum_fold(sum);
122}
123
124/*
125 * computes the checksum of the TCP/UDP pseudo-header
126 * returns a 32-bit checksum
127 */
128extern inline unsigned int
129csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
130                   unsigned short len, unsigned short proto,
131                   unsigned int sum)
132{
133	__asm__ __volatile__ (
134                "    alr   %0,%1\n"  /* sum += saddr */
135                "    brc   12,0f\n"
136		"    ahi   %0,1\n"   /* add carry */
137		"0:"
138		: "+&d" (sum) : "d" (saddr) : "cc" );
139	__asm__ __volatile__ (
140                "    alr   %0,%1\n"  /* sum += daddr */
141                "    brc   12,1f\n"
142                "    ahi   %0,1\n"   /* add carry */
143		"1:"
144		: "+&d" (sum) : "d" (daddr) : "cc" );
145	__asm__ __volatile__ (
146                "    alr   %0,%1\n"  /* sum += (len<<16) + (proto<<8) */
147		"    brc   12,2f\n"
148		"    ahi   %0,1\n"   /* add carry */
149		"2:"
150		: "+&d" (sum)
151		: "d" (((unsigned int) len<<16) + (unsigned int) proto)
152		: "cc" );
153	return sum;
154}
155
156/*
157 * computes the checksum of the TCP/UDP pseudo-header
158 * returns a 16-bit checksum, already complemented
159 */
160
161extern inline unsigned short int
162csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
163                  unsigned short len, unsigned short proto,
164                  unsigned int sum)
165{
166	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
167}
168
169/*
170 * this routine is used for miscellaneous IP-like checksums, mainly
171 * in icmp.c
172 */
173
174extern inline unsigned short
175ip_compute_csum(unsigned char * buff, int len)
176{
177	return csum_fold(csum_partial(buff, len, 0));
178}
179
180#endif /* _S390_CHECKSUM_H */
181
182
183