1/* 2 * BK Id: SCCS/s.checksum.h 1.8 05/17/01 18:14:24 cort 3 */ 4#ifdef __KERNEL__ 5#ifndef _PPC_CHECKSUM_H 6#define _PPC_CHECKSUM_H 7 8 9/* 10 * computes the checksum of a memory block at buff, length len, 11 * and adds in "sum" (32-bit) 12 * 13 * returns a 32-bit number suitable for feeding into itself 14 * or csum_tcpudp_magic 15 * 16 * this function must be called with even lengths, except 17 * for the last fragment, which may be odd 18 * 19 * it's best to have buff aligned on a 32-bit boundary 20 */ 21extern unsigned int csum_partial(const unsigned char * buff, int len, 22 unsigned int sum); 23 24/* 25 * Computes the checksum of a memory block at src, length len, 26 * and adds in "sum" (32-bit), while copying the block to dst. 27 * If an access exception occurs on src or dst, it stores -EFAULT 28 * to *src_err or *dst_err respectively (if that pointer is not 29 * NULL), and, for an error on src, zeroes the rest of dst. 30 * 31 * Like csum_partial, this must be called with even lengths, 32 * except for the last fragment. 33 */ 34extern unsigned int csum_partial_copy_generic(const char *src, char *dst, 35 int len, unsigned int sum, 36 int *src_err, int *dst_err); 37 38#define csum_partial_copy_from_user(src, dst, len, sum, errp) \ 39 csum_partial_copy_generic((src), (dst), (len), (sum), (errp), 0) 40 41#define csum_partial_copy_nocheck(src, dst, len, sum) \ 42 csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0) 43/* 44 * Old versions which ignore errors. 45 */ 46#define csum_partial_copy(src, dst, len, sum) \ 47 csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0) 48#define csum_partial_copy_fromuser(src, dst, len, sum) \ 49 csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0) 50 51 52/* 53 * turns a 32-bit partial checksum (e.g. from csum_partial) into a 54 * 1's complement 16-bit checksum. 55 */ 56static inline unsigned int csum_fold(unsigned int sum) 57{ 58 unsigned int tmp; 59 60 /* swap the two 16-bit halves of sum */ 61 __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum)); 62 /* if there is a carry from adding the two 16-bit halves, 63 it will carry from the lower half into the upper half, 64 giving us the correct sum in the upper half. */ 65 sum = ~(sum + tmp) >> 16; 66 return sum; 67} 68 69/* 70 * this routine is used for miscellaneous IP-like checksums, mainly 71 * in icmp.c 72 */ 73static inline unsigned short ip_compute_csum(unsigned char * buff, int len) 74{ 75 return csum_fold(csum_partial(buff, len, 0)); 76} 77 78static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, 79 unsigned long daddr, 80 unsigned short len, 81 unsigned short proto, 82 unsigned int sum) 83{ 84 __asm__("\n\ 85 addc %0,%0,%1 \n\ 86 adde %0,%0,%2 \n\ 87 adde %0,%0,%3 \n\ 88 addze %0,%0 \n\ 89 " 90 : "=r" (sum) 91 : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); 92 return sum; 93} 94 95/* 96 * This is a version of ip_compute_csum() optimized for IP headers, 97 * which always checksum on 4 octet boundaries. ihl is the number 98 * of 32-bit words and is always >= 5. 99 */ 100extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl); 101 102/* 103 * computes the checksum of the TCP/UDP pseudo-header 104 * returns a 16-bit checksum, already complemented 105 */ 106extern unsigned short csum_tcpudp_magic(unsigned long saddr, 107 unsigned long daddr, 108 unsigned short len, 109 unsigned short proto, 110 unsigned int sum); 111 112#endif 113#endif /* __KERNEL__ */ 114