1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_STRING_64_H
3#define _ASM_X86_STRING_64_H
4
5#ifdef __KERNEL__
6#include <linux/jump_label.h>
7
8/* Written 2002 by Andi Kleen */
9
10/* Only used for special circumstances. Stolen from i386/string.h */
11static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
12{
13 unsigned long d0, d1, d2;
14 asm volatile("rep ; movsl\n\t"
15 "testb $2,%b4\n\t"
16 "je 1f\n\t"
17 "movsw\n"
18 "1:\ttestb $1,%b4\n\t"
19 "je 2f\n\t"
20 "movsb\n"
21 "2:"
22 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
23 : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
24 : "memory");
25 return to;
26}
27
28/* Even with __builtin_ the compiler may decide to use the out of line
29 function. */
30
31#define __HAVE_ARCH_MEMCPY 1
32extern void *memcpy(void *to, const void *from, size_t len);
33extern void *__memcpy(void *to, const void *from, size_t len);
34
35#ifndef CONFIG_FORTIFY_SOURCE
36#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
37#define memcpy(dst, src, len) \
38({ \
39 size_t __len = (len); \
40 void *__ret; \
41 if (__builtin_constant_p(len) && __len >= 64) \
42 __ret = __memcpy((dst), (src), __len); \
43 else \
44 __ret = __builtin_memcpy((dst), (src), __len); \
45 __ret; \
46})
47#endif
48#endif /* !CONFIG_FORTIFY_SOURCE */
49
50#define __HAVE_ARCH_MEMSET
51void *memset(void *s, int c, size_t n);
52void *__memset(void *s, int c, size_t n);
53
54#define __HAVE_ARCH_MEMSET16
55static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
56{
57 long d0, d1;
58 asm volatile("rep\n\t"
59 "stosw"
60 : "=&c" (d0), "=&D" (d1)
61 : "a" (v), "1" (s), "0" (n)
62 : "memory");
63 return s;
64}
65
66#define __HAVE_ARCH_MEMSET32
67static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
68{
69 long d0, d1;
70 asm volatile("rep\n\t"
71 "stosl"
72 : "=&c" (d0), "=&D" (d1)
73 : "a" (v), "1" (s), "0" (n)
74 : "memory");
75 return s;
76}
77
78#define __HAVE_ARCH_MEMSET64
79static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
80{
81 long d0, d1;
82 asm volatile("rep\n\t"
83 "stosq"
84 : "=&c" (d0), "=&D" (d1)
85 : "a" (v), "1" (s), "0" (n)
86 : "memory");
87 return s;
88}
89
90#define __HAVE_ARCH_MEMMOVE
91void *memmove(void *dest, const void *src, size_t count);
92void *__memmove(void *dest, const void *src, size_t count);
93
94int memcmp(const void *cs, const void *ct, size_t count);
95size_t strlen(const char *s);
96char *strcpy(char *dest, const char *src);
97char *strcat(char *dest, const char *src);
98int strcmp(const char *cs, const char *ct);
99
100#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
101
102/*
103 * For files that not instrumented (e.g. mm/slub.c) we
104 * should use not instrumented version of mem* functions.
105 */
106
107#undef memcpy
108#define memcpy(dst, src, len) __memcpy(dst, src, len)
109#define memmove(dst, src, len) __memmove(dst, src, len)
110#define memset(s, c, n) __memset(s, c, n)
111
112#ifndef __NO_FORTIFY
113#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
114#endif
115
116#endif
117
118#define __HAVE_ARCH_MEMCPY_MCSAFE 1
119__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
120 size_t cnt);
121DECLARE_STATIC_KEY_FALSE(mcsafe_key);
122
123/**
124 * memcpy_mcsafe - copy memory with indication if a machine check happened
125 *
126 * @dst: destination address
127 * @src: source address
128 * @cnt: number of bytes to copy
129 *
130 * Low level memory copy function that catches machine checks
131 * We only call into the "safe" function on systems that can
132 * actually do machine check recovery. Everyone else can just
133 * use memcpy().
134 *
135 * Return 0 for success, or number of bytes not copied if there was an
136 * exception.
137 */
138static __always_inline __must_check unsigned long
139memcpy_mcsafe(void *dst, const void *src, size_t cnt)
140{
141#ifdef CONFIG_X86_MCE
142 if (static_branch_unlikely(&mcsafe_key))
143 return __memcpy_mcsafe(dst, src, cnt);
144 else
145#endif
146 memcpy(dst, src, cnt);
147 return 0;
148}
149
150#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
151#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
152void memcpy_flushcache(void *dst, const void *src, size_t cnt);
153#endif
154
155#endif /* __KERNEL__ */
156
157#endif /* _ASM_X86_STRING_64_H */
158