11541Srgrimes/* SPDX-License-Identifier: GPL-2.0 */
21541Srgrimes#ifndef __ASM_SH_CHECKSUM_H
31541Srgrimes#define __ASM_SH_CHECKSUM_H
41541Srgrimes
51541Srgrimes/*
61541Srgrimes * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka
71541Srgrimes */
81541Srgrimes
91541Srgrimes#include <linux/in6.h>
101541Srgrimes#include <linux/uaccess.h>
111541Srgrimes
121541Srgrimes/*
131541Srgrimes * computes the checksum of a memory block at buff, length len,
141541Srgrimes * and adds in "sum" (32-bit)
151541Srgrimes *
161541Srgrimes * returns a 32-bit number suitable for feeding into itself
171541Srgrimes * or csum_tcpudp_magic
181541Srgrimes *
191541Srgrimes * this function must be called with even lengths, except
201541Srgrimes * for the last fragment, which may be odd
211541Srgrimes *
221541Srgrimes * it's best to have buff aligned on a 32-bit boundary
231541Srgrimes */
241541Srgrimesasmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
251541Srgrimes
261541Srgrimes/*
271541Srgrimes * the same as csum_partial, but copies from src while it
281541Srgrimes * checksums, and handles user-space pointer exceptions correctly, when needed.
291541Srgrimes *
301541Srgrimes * here even more important to align src and dst on a 32-bit (or even
311541Srgrimes * better 64-bit) boundary
321541Srgrimes */
331541Srgrimes
3414195Speterasmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
351541Srgrimes
361541Srgrimes#define _HAVE_ARCH_CSUM_AND_COPY
372169Spaul/*
382169Spaul *	Note: when you get a NULL pointer exception here this means someone
392169Spaul *	passed in an incorrect kernel address to one of these functions.
401541Srgrimes *
411541Srgrimes *	If you use these functions directly please don't forget the
421541Srgrimes *	access_ok().
431541Srgrimes */
441541Srgrimesstatic inline
451541Srgrimes__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
461541Srgrimes{
471541Srgrimes	return csum_partial_copy_generic(src, dst, len);
481541Srgrimes}
491541Srgrimes
501541Srgrimes#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
511541Srgrimesstatic inline
529209Swollman__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
531541Srgrimes{
541541Srgrimes	if (!access_ok(src, len))
551541Srgrimes		return 0;
561541Srgrimes	return csum_partial_copy_generic((__force const void *)src, dst, len);
571541Srgrimes}
581541Srgrimes
592531Swollman/*
601541Srgrimes *	Fold a partial checksum
611541Srgrimes */
621541Srgrimes
631541Srgrimesstatic inline __sum16 csum_fold(__wsum sum)
641541Srgrimes{
651541Srgrimes	unsigned int __dummy;
661541Srgrimes	__asm__("swap.w %0, %1\n\t"
671541Srgrimes		"extu.w	%0, %0\n\t"
681541Srgrimes		"extu.w	%1, %1\n\t"
6914195Speter		"add	%1, %0\n\t"
7014195Speter		"swap.w	%0, %1\n\t"
7114195Speter		"add	%1, %0\n\t"
7214195Speter		"not	%0, %0\n\t"
7314195Speter		: "=r" (sum), "=&r" (__dummy)
7414195Speter		: "0" (sum)
7514195Speter		: "t");
7614195Speter	return (__force __sum16)sum;
7714195Speter}
7814195Speter
7914195Speter/*
8014195Speter *	This is a version of ip_compute_csum() optimized for IP headers,
8114195Speter *	which always checksum on 4 octet boundaries.
8214195Speter *
8314195Speter *      i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
8414195Speter *      for linux by * Arnt Gulbrandsen.
8514195Speter */
8614195Speterstatic inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
8714195Speter{
8814195Speter	__wsum sum;
8914195Speter	unsigned int __dummy0, __dummy1;
9014195Speter
9114195Speter	__asm__ __volatile__(
9214195Speter		"mov.l	@%1+, %0\n\t"
9314195Speter		"mov.l	@%1+, %3\n\t"
9414195Speter		"add	#-2, %2\n\t"
9514195Speter		"clrt\n\t"
9614195Speter		"1:\t"
9714195Speter		"addc	%3, %0\n\t"
9814195Speter		"movt	%4\n\t"
9914195Speter		"mov.l	@%1+, %3\n\t"
10014195Speter		"dt	%2\n\t"
10114195Speter		"bf/s	1b\n\t"
1021541Srgrimes		" cmp/eq #1, %4\n\t"
10314195Speter		"addc	%3, %0\n\t"
1041541Srgrimes		"addc	%2, %0"	    /* Here %2 is 0, add carry-bit */
10514195Speter	/* Since the input registers which are loaded with iph and ihl
1061541Srgrimes	   are modified, we must also specify them as outputs, or gcc
1071541Srgrimes	   will assume they contain their original values. */
1081541Srgrimes	: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
1091541Srgrimes	: "1" (iph), "2" (ihl)
1101541Srgrimes	: "t", "memory");
11114195Speter
11213491Speter	return	csum_fold(sum);
11314195Speter}
11414195Speter
11513491Speterstatic inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
11613491Speter					__u32 len, __u8 proto,
1171541Srgrimes					__wsum sum)
1181541Srgrimes{
1191541Srgrimes#ifdef __LITTLE_ENDIAN__
1201541Srgrimes	unsigned long len_proto = (proto + len) << 8;
1211541Srgrimes#else
1221541Srgrimes	unsigned long len_proto = proto + len;
1231541Srgrimes#endif
1241541Srgrimes	__asm__("clrt\n\t"
1251541Srgrimes		"addc	%0, %1\n\t"
1261541Srgrimes		"addc	%2, %1\n\t"
1271541Srgrimes		"addc	%3, %1\n\t"
1281541Srgrimes		"movt	%0\n\t"
1291541Srgrimes		"add	%1, %0"
1301541Srgrimes		: "=r" (sum), "=r" (len_proto)
1311541Srgrimes		: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
1321541Srgrimes		: "t");
1331541Srgrimes
1341541Srgrimes	return sum;
1351541Srgrimes}
1361541Srgrimes
1371541Srgrimes/*
1381541Srgrimes * computes the checksum of the TCP/UDP pseudo-header
1391541Srgrimes * returns a 16-bit checksum, already complemented
1401541Srgrimes */
1411541Srgrimesstatic inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
1421541Srgrimes					__u32 len, __u8 proto,
1431541Srgrimes					__wsum sum)
1441541Srgrimes{
1451541Srgrimes	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
1461541Srgrimes}
1471541Srgrimes
1481541Srgrimes/*
1491541Srgrimes * this routine is used for miscellaneous IP-like checksums, mainly
1501541Srgrimes * in icmp.c
1511541Srgrimes */
1521541Srgrimesstatic inline __sum16 ip_compute_csum(const void *buff, int len)
1531541Srgrimes{
1541541Srgrimes    return csum_fold(csum_partial(buff, len, 0));
1551541Srgrimes}
1561541Srgrimes
1571541Srgrimes#define _HAVE_ARCH_IPV6_CSUM
1581541Srgrimesstatic inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
1591541Srgrimes				      const struct in6_addr *daddr,
1601541Srgrimes				      __u32 len, __u8 proto, __wsum sum)
1611541Srgrimes{
1621541Srgrimes	unsigned int __dummy;
1631541Srgrimes	__asm__("clrt\n\t"
1641541Srgrimes		"mov.l	@(0,%2), %1\n\t"
1651541Srgrimes		"addc	%1, %0\n\t"
1661541Srgrimes		"mov.l	@(4,%2), %1\n\t"
1671541Srgrimes		"addc	%1, %0\n\t"
1681541Srgrimes		"mov.l	@(8,%2), %1\n\t"
1691541Srgrimes		"addc	%1, %0\n\t"
1701541Srgrimes		"mov.l	@(12,%2), %1\n\t"
1711541Srgrimes		"addc	%1, %0\n\t"
1721541Srgrimes		"mov.l	@(0,%3), %1\n\t"
1731541Srgrimes		"addc	%1, %0\n\t"
1741541Srgrimes		"mov.l	@(4,%3), %1\n\t"
1751541Srgrimes		"addc	%1, %0\n\t"
1761541Srgrimes		"mov.l	@(8,%3), %1\n\t"
1771541Srgrimes		"addc	%1, %0\n\t"
1781541Srgrimes		"mov.l	@(12,%3), %1\n\t"
1791541Srgrimes		"addc	%1, %0\n\t"
1801541Srgrimes		"addc	%4, %0\n\t"
1811541Srgrimes		"addc	%5, %0\n\t"
1821541Srgrimes		"movt	%1\n\t"
1831541Srgrimes		"add	%1, %0\n"
1841541Srgrimes		: "=r" (sum), "=&r" (__dummy)
1851541Srgrimes		: "r" (saddr), "r" (daddr),
1861541Srgrimes		  "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
1871541Srgrimes		: "t");
1881541Srgrimes
1891541Srgrimes	return csum_fold(sum);
1901541Srgrimes}
1911541Srgrimes
1921541Srgrimes/*
1931541Srgrimes *	Copy and checksum to user
1941541Srgrimes */
1951541Srgrimes#define HAVE_CSUM_COPY_USER
1961541Srgrimesstatic inline __wsum csum_and_copy_to_user(const void *src,
1971541Srgrimes					   void __user *dst,
1981541Srgrimes					   int len)
1991541Srgrimes{
2001541Srgrimes	if (!access_ok(dst, len))
2011541Srgrimes		return 0;
2021541Srgrimes	return csum_partial_copy_generic(src, (__force void *)dst, len);
2031541Srgrimes}
2041541Srgrimes#endif /* __ASM_SH_CHECKSUM_H */
2051541Srgrimes