1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	from tahoe:	in_cksum.c	1.2	86/01/05
32 */
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/mbuf.h>
37
38#include <netinet/in.h>
39#include <netinet/in_systm.h>
40#include <netinet/ip.h>
41
42#include <machine/in_cksum.h>
43
44/*
45 * Checksum routine for Internet Protocol family headers.
46 *
47 * This routine is very heavily used in the network
48 * code and should be modified for each CPU to be as fast as possible.
49 *
50 * This implementation is 386 version.
51 */
52
53#undef	ADDCARRY
54#define ADDCARRY(x)     if ((x) > 0xffff) (x) -= 0xffff
55#define REDUCE          {sum = (sum & 0xffff) + (sum >> 16); ADDCARRY(sum);}
56
57/*
58 * These asm statements require __volatile because they pass information
59 * via the condition codes.  GCC does not currently provide a way to specify
60 * the condition codes as an input or output operand.
61 *
62 * The LOAD macro below is effectively a prefetch into cache.  GCC will
63 * load the value into a register but will not use it.  Since modern CPUs
64 * reorder operations, this will generally take place in parallel with
65 * other calculations.
66 */
67u_short
68in_cksum_skip(struct mbuf *m, int len, int skip)
69{
70	u_short *w;
71	unsigned sum = 0;
72	int mlen = 0;
73	int byte_swapped = 0;
74	union { char	c[2]; u_short	s; } su;
75
76	len -= skip;
77	for (; skip && m; m = m->m_next) {
78		if (m->m_len > skip) {
79			mlen = m->m_len - skip;
80			w = (u_short *)(mtod(m, u_char *) + skip);
81			goto skip_start;
82		} else {
83			skip -= m->m_len;
84		}
85	}
86
87	for (;m && len; m = m->m_next) {
88		if (m->m_len == 0)
89			continue;
90		w = mtod(m, u_short *);
91		if (mlen == -1) {
92			/*
93			 * The first byte of this mbuf is the continuation
94			 * of a word spanning between this mbuf and the
95			 * last mbuf.
96			 */
97
98			/* su.c[0] is already saved when scanning previous
99			 * mbuf.  sum was REDUCEd when we found mlen == -1
100			 */
101			su.c[1] = *(u_char *)w;
102			sum += su.s;
103			w = (u_short *)((char *)w + 1);
104			mlen = m->m_len - 1;
105			len--;
106		} else
107			mlen = m->m_len;
108skip_start:
109		if (len < mlen)
110			mlen = len;
111		len -= mlen;
112		/*
113		 * Force to long boundary so we do longword aligned
114		 * memory operations
115		 */
116		if (3 & (int) w) {
117			REDUCE;
118			if ((1 & (int) w) && (mlen > 0)) {
119				sum <<= 8;
120				su.c[0] = *(char *)w;
121				w = (u_short *)((char *)w + 1);
122				mlen--;
123				byte_swapped = 1;
124			}
125			if ((2 & (int) w) && (mlen >= 2)) {
126				sum += *w++;
127				mlen -= 2;
128			}
129		}
130		/*
131		 * Advance to a 486 cache line boundary.
132		 */
133		if (4 & (int) w && mlen >= 4) {
134			__asm __volatile (
135				"addl %1, %0\n"
136				"adcl $0, %0"
137				: "+r" (sum)
138				: "g" (((const u_int32_t *)w)[0])
139			);
140			w += 2;
141			mlen -= 4;
142		}
143		if (8 & (int) w && mlen >= 8) {
144			__asm __volatile (
145				"addl %1, %0\n"
146				"adcl %2, %0\n"
147				"adcl $0, %0"
148				: "+r" (sum)
149				: "g" (((const u_int32_t *)w)[0]),
150				  "g" (((const u_int32_t *)w)[1])
151			);
152			w += 4;
153			mlen -= 8;
154		}
155		/*
156		 * Do as much of the checksum as possible 32 bits at at time.
157		 * In fact, this loop is unrolled to make overhead from
158		 * branches &c small.
159		 */
160		mlen -= 1;
161		while ((mlen -= 32) >= 0) {
162			/*
163			 * Add with carry 16 words and fold in the last
164			 * carry by adding a 0 with carry.
165			 *
166			 * The early ADD(16) and the LOAD(32) are to load
167			 * the next 2 cache lines in advance on 486's.  The
168			 * 486 has a penalty of 2 clock cycles for loading
169			 * a cache line, plus whatever time the external
170			 * memory takes to load the first word(s) addressed.
171			 * These penalties are unavoidable.  Subsequent
172			 * accesses to a cache line being loaded (and to
173			 * other external memory?) are delayed until the
174			 * whole load finishes.  These penalties are mostly
175			 * avoided by not accessing external memory for
176			 * 8 cycles after the ADD(16) and 12 cycles after
177			 * the LOAD(32).  The loop terminates when mlen
178			 * is initially 33 (not 32) to guaranteed that
179			 * the LOAD(32) is within bounds.
180			 */
181			__asm __volatile (
182				"addl %1, %0\n"
183				"adcl %2, %0\n"
184				"adcl %3, %0\n"
185				"adcl %4, %0\n"
186				"adcl %5, %0\n"
187				"mov  %6, %%eax\n"
188				"adcl %7, %0\n"
189				"adcl %8, %0\n"
190				"adcl %9, %0\n"
191				"adcl $0, %0"
192				: "+r" (sum)
193				: "g" (((const u_int32_t *)w)[4]),
194				  "g" (((const u_int32_t *)w)[0]),
195				  "g" (((const u_int32_t *)w)[1]),
196				  "g" (((const u_int32_t *)w)[2]),
197				  "g" (((const u_int32_t *)w)[3]),
198				  "g" (((const u_int32_t *)w)[8]),
199				  "g" (((const u_int32_t *)w)[5]),
200				  "g" (((const u_int32_t *)w)[6]),
201				  "g" (((const u_int32_t *)w)[7])
202				: "eax"
203			);
204			w += 16;
205		}
206		mlen += 32 + 1;
207		if (mlen >= 32) {
208			__asm __volatile (
209				"addl %1, %0\n"
210				"adcl %2, %0\n"
211				"adcl %3, %0\n"
212				"adcl %4, %0\n"
213				"adcl %5, %0\n"
214				"adcl %6, %0\n"
215				"adcl %7, %0\n"
216				"adcl %8, %0\n"
217				"adcl $0, %0"
218				: "+r" (sum)
219				: "g" (((const u_int32_t *)w)[4]),
220				  "g" (((const u_int32_t *)w)[0]),
221				  "g" (((const u_int32_t *)w)[1]),
222				  "g" (((const u_int32_t *)w)[2]),
223				  "g" (((const u_int32_t *)w)[3]),
224				  "g" (((const u_int32_t *)w)[5]),
225				  "g" (((const u_int32_t *)w)[6]),
226				  "g" (((const u_int32_t *)w)[7])
227			);
228			w += 16;
229			mlen -= 32;
230		}
231		if (mlen >= 16) {
232			__asm __volatile (
233				"addl %1, %0\n"
234				"adcl %2, %0\n"
235				"adcl %3, %0\n"
236				"adcl %4, %0\n"
237				"adcl $0, %0"
238				: "+r" (sum)
239				: "g" (((const u_int32_t *)w)[0]),
240				  "g" (((const u_int32_t *)w)[1]),
241				  "g" (((const u_int32_t *)w)[2]),
242				  "g" (((const u_int32_t *)w)[3])
243			);
244			w += 8;
245			mlen -= 16;
246		}
247		if (mlen >= 8) {
248			__asm __volatile (
249				"addl %1, %0\n"
250				"adcl %2, %0\n"
251				"adcl $0, %0"
252				: "+r" (sum)
253				: "g" (((const u_int32_t *)w)[0]),
254				  "g" (((const u_int32_t *)w)[1])
255			);
256			w += 4;
257			mlen -= 8;
258		}
259		if (mlen == 0 && byte_swapped == 0)
260			continue;       /* worth 1% maybe ?? */
261		REDUCE;
262		while ((mlen -= 2) >= 0) {
263			sum += *w++;
264		}
265		if (byte_swapped) {
266			sum <<= 8;
267			byte_swapped = 0;
268			if (mlen == -1) {
269				su.c[1] = *(char *)w;
270				sum += su.s;
271				mlen = 0;
272			} else
273				mlen = -1;
274		} else if (mlen == -1)
275			/*
276			 * This mbuf has odd number of bytes.
277			 * There could be a word split between
278			 * this mbuf and the next mbuf.
279			 * Save the last byte (to prepend to next mbuf).
280			 */
281			su.c[0] = *(char *)w;
282	}
283
284	if (len)
285		printf("%s: out of data by %d\n", __func__, len);
286	if (mlen == -1) {
287		/* The last mbuf has odd # of bytes. Follow the
288		   standard (the odd byte is shifted left by 8 bits) */
289		su.c[1] = 0;
290		sum += su.s;
291	}
292	REDUCE;
293	return (~sum & 0xffff);
294}
295