1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_STRING_64_H
3#define _ASM_X86_STRING_64_H
4
5#ifdef __KERNEL__
6#include <linux/jump_label.h>
7
8/* Written 2002 by Andi Kleen */
9
10/* Even with __builtin_ the compiler may decide to use the out of line
11   function. */
12
13#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
14#include <linux/kmsan_string.h>
15#endif
16
17#define __HAVE_ARCH_MEMCPY 1
18extern void *memcpy(void *to, const void *from, size_t len);
19extern void *__memcpy(void *to, const void *from, size_t len);
20
21#define __HAVE_ARCH_MEMSET
22void *memset(void *s, int c, size_t n);
23void *__memset(void *s, int c, size_t n);
24
25/*
26 * KMSAN needs to instrument as much code as possible. Use C versions of
27 * memsetXX() from lib/string.c under KMSAN.
28 */
29#if !defined(CONFIG_KMSAN)
30#define __HAVE_ARCH_MEMSET16
31static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
32{
33	long d0, d1;
34	asm volatile("rep\n\t"
35		     "stosw"
36		     : "=&c" (d0), "=&D" (d1)
37		     : "a" (v), "1" (s), "0" (n)
38		     : "memory");
39	return s;
40}
41
42#define __HAVE_ARCH_MEMSET32
43static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
44{
45	long d0, d1;
46	asm volatile("rep\n\t"
47		     "stosl"
48		     : "=&c" (d0), "=&D" (d1)
49		     : "a" (v), "1" (s), "0" (n)
50		     : "memory");
51	return s;
52}
53
54#define __HAVE_ARCH_MEMSET64
55static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
56{
57	long d0, d1;
58	asm volatile("rep\n\t"
59		     "stosq"
60		     : "=&c" (d0), "=&D" (d1)
61		     : "a" (v), "1" (s), "0" (n)
62		     : "memory");
63	return s;
64}
65#endif
66
67#define __HAVE_ARCH_MEMMOVE
68void *memmove(void *dest, const void *src, size_t count);
69void *__memmove(void *dest, const void *src, size_t count);
70
71int memcmp(const void *cs, const void *ct, size_t count);
72size_t strlen(const char *s);
73char *strcpy(char *dest, const char *src);
74char *strcat(char *dest, const char *src);
75int strcmp(const char *cs, const char *ct);
76
77#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
78#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
79void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
80static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
81{
82	if (__builtin_constant_p(cnt)) {
83		switch (cnt) {
84			case 4:
85				asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
86				return;
87			case 8:
88				asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
89				return;
90			case 16:
91				asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
92				asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
93				return;
94		}
95	}
96	__memcpy_flushcache(dst, src, cnt);
97}
98#endif
99
100#endif /* __KERNEL__ */
101
102#endif /* _ASM_X86_STRING_64_H */
103