linux/arch/x86/include/asm/string_64.h
<<
>>
Prefs
   1#ifndef _ASM_X86_STRING_64_H
   2#define _ASM_X86_STRING_64_H
   3
   4#ifdef __KERNEL__
   5#include <linux/jump_label.h>
   6
   7/* Written 2002 by Andi Kleen */
   8
   9/* Only used for special circumstances. Stolen from i386/string.h */
  10static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
  11{
  12        unsigned long d0, d1, d2;
  13        asm volatile("rep ; movsl\n\t"
  14                     "testb $2,%b4\n\t"
  15                     "je 1f\n\t"
  16                     "movsw\n"
  17                     "1:\ttestb $1,%b4\n\t"
  18                     "je 2f\n\t"
  19                     "movsb\n"
  20                     "2:"
  21                     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
  22                     : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
  23                     : "memory");
  24        return to;
  25}
  26
  27/* Even with __builtin_ the compiler may decide to use the out of line
  28   function. */
  29
  30#define __HAVE_ARCH_MEMCPY 1
  31#ifndef CONFIG_KMEMCHECK
  32#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
  33extern void *memcpy(void *to, const void *from, size_t len);
  34#else
  35extern void *__memcpy(void *to, const void *from, size_t len);
  36#define memcpy(dst, src, len)                                   \
  37({                                                              \
  38        size_t __len = (len);                                   \
  39        void *__ret;                                            \
  40        if (__builtin_constant_p(len) && __len >= 64)           \
  41                __ret = __memcpy((dst), (src), __len);          \
  42        else                                                    \
  43                __ret = __builtin_memcpy((dst), (src), __len);  \
  44        __ret;                                                  \
  45})
  46#endif
  47#else
  48/*
  49 * kmemcheck becomes very happy if we use the REP instructions unconditionally,
  50 * because it means that we know both memory operands in advance.
  51 */
  52#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
  53#endif
  54
  55#define __HAVE_ARCH_MEMSET
  56void *memset(void *s, int c, size_t n);
  57
  58#define __HAVE_ARCH_MEMMOVE
  59void *memmove(void *dest, const void *src, size_t count);
  60
  61int memcmp(const void *cs, const void *ct, size_t count);
  62size_t strlen(const char *s);
  63char *strcpy(char *dest, const char *src);
  64char *strcat(char *dest, const char *src);
  65int strcmp(const char *cs, const char *ct);
  66
  67#define __HAVE_ARCH_MEMCPY_MCSAFE 1
  68extern struct static_key mcsafe_key;
  69__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
  70                size_t cnt);
  71/**
  72 * memcpy_mcsafe - copy memory with indication if a machine check happened
  73 *
  74 * @dst:        destination address
  75 * @src:        source address
  76 * @cnt:        number of bytes to copy
  77 *
  78 * Low level memory copy function that catches machine checks
  79 * We only call into the "safe" function on systems that can
  80 * actually do machine check recovery. Everyone else can just
  81 * use memcpy().
  82 *
  83 * Return 0 for success, or number of bytes not copied if there was an
  84 * exception.
  85 */
  86static __always_inline __must_check unsigned long
  87memcpy_mcsafe(void *dst, const void *src, size_t cnt)
  88{
  89#ifdef CONFIG_X86_MCE
  90        if (static_key_false(&mcsafe_key))
  91                return __memcpy_mcsafe(dst, src, cnt);
  92        else
  93#endif
  94                memcpy(dst, src, cnt);
  95        return 0;
  96}
  97
  98#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  99#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
 100void memcpy_flushcache(void *dst, const void *src, size_t cnt);
 101#endif
 102
 103#endif /* __KERNEL__ */
 104
 105#endif /* _ASM_X86_STRING_64_H */
 106