1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _PARISC_CHECKSUM_H
3#define _PARISC_CHECKSUM_H
4
5#include <linux/in6.h>
6
7/*
8 * computes the checksum of a memory block at buff, length len,
9 * and adds in "sum" (32-bit)
10 *
11 * returns a 32-bit number suitable for feeding into itself
12 * or csum_tcpudp_magic
13 *
14 * this function must be called with even lengths, except
15 * for the last fragment, which may be odd
16 *
17 * it's best to have buff aligned on a 32-bit boundary
18 */
19extern __wsum csum_partial(const void *, int, __wsum);
20
21/*
22 *	Optimized for IP headers, which always checksum on 4 octet boundaries.
23 *
24 *	Written by Randolph Chung <tausq@debian.org>, and then mucked with by
25 *	LaMont Jones <lamont@debian.org>
26 */
27static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
28{
29	unsigned int sum;
30	unsigned long t0, t1, t2;
31
32	__asm__ __volatile__ (
33"	ldws,ma		4(%1), %0\n"
34"	addib,<=	-4, %2, 2f\n"
35"\n"
36"	ldws		4(%1), %4\n"
37"	ldws		8(%1), %5\n"
38"	add		%0, %4, %0\n"
39"	ldws,ma		12(%1), %3\n"
40"	addc		%0, %5, %0\n"
41"	addc		%0, %3, %0\n"
42"1:	ldws,ma		4(%1), %3\n"
43"	addib,<		0, %2, 1b\n"
44"	addc		%0, %3, %0\n"
45"\n"
46"	extru		%0, 31, 16, %4\n"
47"	extru		%0, 15, 16, %5\n"
48"	addc		%4, %5, %0\n"
49"	extru		%0, 15, 16, %5\n"
50"	add		%0, %5, %0\n"
51"	subi		-1, %0, %0\n"
52"2:\n"
53	: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (t0), "=r" (t1), "=r" (t2)
54	: "1" (iph), "2" (ihl)
55	: "memory");
56
57	return (__force __sum16)sum;
58}
59
60/*
61 *	Fold a partial checksum
62 */
63static inline __sum16 csum_fold(__wsum csum)
64{
65	u32 sum = (__force u32)csum;
66	/* add the swapped two 16-bit halves of sum,
67	   a possible carry from adding the two 16-bit halves,
68	   will carry from the lower half into the upper half,
69	   giving us the correct sum in the upper half. */
70	sum += (sum << 16) + (sum >> 16);
71	return (__force __sum16)(~sum >> 16);
72}
73
74static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
75					__u32 len, __u8 proto,
76					__wsum sum)
77{
78	__asm__(
79	"	add  %1, %0, %0\n"
80	"	addc %2, %0, %0\n"
81	"	addc %3, %0, %0\n"
82	"	addc %%r0, %0, %0\n"
83		: "=r" (sum)
84		: "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum));
85	return sum;
86}
87
88/*
89 * computes the checksum of the TCP/UDP pseudo-header
90 * returns a 16-bit checksum, already complemented
91 */
92static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
93					__u32 len, __u8 proto,
94					__wsum sum)
95{
96	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
97}
98
99/*
100 * this routine is used for miscellaneous IP-like checksums, mainly
101 * in icmp.c
102 */
103static inline __sum16 ip_compute_csum(const void *buf, int len)
104{
105	 return csum_fold (csum_partial(buf, len, 0));
106}
107
108
109#define _HAVE_ARCH_IPV6_CSUM
110static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
111					  const struct in6_addr *daddr,
112					  __u32 len, __u8 proto,
113					  __wsum sum)
114{
115	unsigned long t0, t1, t2, t3;
116
117	len += proto;	/* add 16-bit proto + len */
118
119	__asm__ __volatile__ (
120
121#if BITS_PER_LONG > 32
122
123	/*
124	** We can execute two loads and two adds per cycle on PA 8000.
125	** But add insn's get serialized waiting for the carry bit.
126	** Try to keep 4 registers with "live" values ahead of the ALU.
127	*/
128
129"	ldd,ma		8(%1), %4\n"	/* get 1st saddr word */
130"	ldd,ma		8(%2), %5\n"	/* get 1st daddr word */
131"	add		%4, %0, %0\n"
132"	ldd,ma		8(%1), %6\n"	/* 2nd saddr */
133"	ldd,ma		8(%2), %7\n"	/* 2nd daddr */
134"	add,dc		%5, %0, %0\n"
135"	add,dc		%6, %0, %0\n"
136"	add,dc		%7, %0, %0\n"
137"	add,dc		%3, %0, %0\n"  /* fold in proto+len | carry bit */
138"	extrd,u		%0, 31, 32, %4\n"/* copy upper half down */
139"	depdi		0, 31, 32, %0\n"/* clear upper half */
140"	add		%4, %0, %0\n"	/* fold into 32-bits */
141"	addc		0, %0, %0\n"	/* add carry */
142
143#else
144
145	/*
146	** For PA 1.x, the insn order doesn't matter as much.
147	** Insn stream is serialized on the carry bit here too.
148	** result from the previous operation (eg r0 + x)
149	*/
150"	ldw,ma		4(%1), %4\n"	/* get 1st saddr word */
151"	ldw,ma		4(%2), %5\n"	/* get 1st daddr word */
152"	add		%4, %0, %0\n"
153"	ldw,ma		4(%1), %6\n"	/* 2nd saddr */
154"	addc		%5, %0, %0\n"
155"	ldw,ma		4(%2), %7\n"	/* 2nd daddr */
156"	addc		%6, %0, %0\n"
157"	ldw,ma		4(%1), %4\n"	/* 3rd saddr */
158"	addc		%7, %0, %0\n"
159"	ldw,ma		4(%2), %5\n"	/* 3rd daddr */
160"	addc		%4, %0, %0\n"
161"	ldw,ma		4(%1), %6\n"	/* 4th saddr */
162"	addc		%5, %0, %0\n"
163"	ldw,ma		4(%2), %7\n"	/* 4th daddr */
164"	addc		%6, %0, %0\n"
165"	addc		%7, %0, %0\n"
166"	addc		%3, %0, %0\n"	/* fold in proto+len, catch carry */
167
168#endif
169	: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
170	  "=r" (t0), "=r" (t1), "=r" (t2), "=r" (t3)
171	: "0" (sum), "1" (saddr), "2" (daddr), "3" (len)
172	: "memory");
173	return csum_fold(sum);
174}
175
176#endif
177
178