1
2#ifndef _ASM_X86_STRING_64_H
3#define _ASM_X86_STRING_64_H
4
5#ifdef __KERNEL__
6#include <linux/jump_label.h>
7
8
9
10
11static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
12{
13 unsigned long d0, d1, d2;
14 asm volatile("rep ; movsl\n\t"
15 "testb $2,%b4\n\t"
16 "je 1f\n\t"
17 "movsw\n"
18 "1:\ttestb $1,%b4\n\t"
19 "je 2f\n\t"
20 "movsb\n"
21 "2:"
22 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
23 : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
24 : "memory");
25 return to;
26}
27
28
29
30
31#define __HAVE_ARCH_MEMCPY 1
32extern void *memcpy(void *to, const void *from, size_t len);
33extern void *__memcpy(void *to, const void *from, size_t len);
34
35#ifndef CONFIG_FORTIFY_SOURCE
36#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
37#define memcpy(dst, src, len) \
38({ \
39 size_t __len = (len); \
40 void *__ret; \
41 if (__builtin_constant_p(len) && __len >= 64) \
42 __ret = __memcpy((dst), (src), __len); \
43 else \
44 __ret = __builtin_memcpy((dst), (src), __len); \
45 __ret; \
46})
47#endif
48#endif
49
50#define __HAVE_ARCH_MEMSET
51void *memset(void *s, int c, size_t n);
52void *__memset(void *s, int c, size_t n);
53
54#define __HAVE_ARCH_MEMSET16
55static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
56{
57 long d0, d1;
58 asm volatile("rep\n\t"
59 "stosw"
60 : "=&c" (d0), "=&D" (d1)
61 : "a" (v), "1" (s), "0" (n)
62 : "memory");
63 return s;
64}
65
66#define __HAVE_ARCH_MEMSET32
67static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
68{
69 long d0, d1;
70 asm volatile("rep\n\t"
71 "stosl"
72 : "=&c" (d0), "=&D" (d1)
73 : "a" (v), "1" (s), "0" (n)
74 : "memory");
75 return s;
76}
77
78#define __HAVE_ARCH_MEMSET64
79static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
80{
81 long d0, d1;
82 asm volatile("rep\n\t"
83 "stosq"
84 : "=&c" (d0), "=&D" (d1)
85 : "a" (v), "1" (s), "0" (n)
86 : "memory");
87 return s;
88}
89
90#define __HAVE_ARCH_MEMMOVE
91void *memmove(void *dest, const void *src, size_t count);
92void *__memmove(void *dest, const void *src, size_t count);
93
94int memcmp(const void *cs, const void *ct, size_t count);
95size_t strlen(const char *s);
96char *strcpy(char *dest, const char *src);
97char *strcat(char *dest, const char *src);
98int strcmp(const char *cs, const char *ct);
99
100#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
101
102
103
104
105
106
107#undef memcpy
108#define memcpy(dst, src, len) __memcpy(dst, src, len)
109#define memmove(dst, src, len) __memmove(dst, src, len)
110#define memset(s, c, n) __memset(s, c, n)
111
112#ifndef __NO_FORTIFY
113#define __NO_FORTIFY
114#endif
115
116#endif
117
118#define __HAVE_ARCH_MEMCPY_MCSAFE 1
119__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
120DECLARE_STATIC_KEY_FALSE(mcsafe_key);
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136static __always_inline __must_check int
137memcpy_mcsafe(void *dst, const void *src, size_t cnt)
138{
139#ifdef CONFIG_X86_MCE
140 if (static_branch_unlikely(&mcsafe_key))
141 return memcpy_mcsafe_unrolled(dst, src, cnt);
142 else
143#endif
144 memcpy(dst, src, cnt);
145 return 0;
146}
147
148#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
149#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
150void memcpy_flushcache(void *dst, const void *src, size_t cnt);
151#endif
152
153#endif
154
155#endif
156