linux/arch/x86/include/asm/string_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_STRING_64_H
   3#define _ASM_X86_STRING_64_H
   4
   5#ifdef __KERNEL__
   6#include <linux/jump_label.h>
   7
   8/* Written 2002 by Andi Kleen */
   9
  10/* Even with __builtin_ the compiler may decide to use the out of line
  11   function. */
  12
  13#define __HAVE_ARCH_MEMCPY 1
  14extern void *memcpy(void *to, const void *from, size_t len);
  15extern void *__memcpy(void *to, const void *from, size_t len);
  16
  17#define __HAVE_ARCH_MEMSET
  18void *memset(void *s, int c, size_t n);
  19void *__memset(void *s, int c, size_t n);
  20
  21#define __HAVE_ARCH_MEMSET16
  22static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
  23{
  24        long d0, d1;
  25        asm volatile("rep\n\t"
  26                     "stosw"
  27                     : "=&c" (d0), "=&D" (d1)
  28                     : "a" (v), "1" (s), "0" (n)
  29                     : "memory");
  30        return s;
  31}
  32
  33#define __HAVE_ARCH_MEMSET32
  34static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
  35{
  36        long d0, d1;
  37        asm volatile("rep\n\t"
  38                     "stosl"
  39                     : "=&c" (d0), "=&D" (d1)
  40                     : "a" (v), "1" (s), "0" (n)
  41                     : "memory");
  42        return s;
  43}
  44
  45#define __HAVE_ARCH_MEMSET64
  46static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
  47{
  48        long d0, d1;
  49        asm volatile("rep\n\t"
  50                     "stosq"
  51                     : "=&c" (d0), "=&D" (d1)
  52                     : "a" (v), "1" (s), "0" (n)
  53                     : "memory");
  54        return s;
  55}
  56
  57#define __HAVE_ARCH_MEMMOVE
  58void *memmove(void *dest, const void *src, size_t count);
  59void *__memmove(void *dest, const void *src, size_t count);
  60
  61int memcmp(const void *cs, const void *ct, size_t count);
  62size_t strlen(const char *s);
  63char *strcpy(char *dest, const char *src);
  64char *strcat(char *dest, const char *src);
  65int strcmp(const char *cs, const char *ct);
  66
  67#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
  68
  69/*
  70 * For files that not instrumented (e.g. mm/slub.c) we
  71 * should use not instrumented version of mem* functions.
  72 */
  73
  74#undef memcpy
  75#define memcpy(dst, src, len) __memcpy(dst, src, len)
  76#define memmove(dst, src, len) __memmove(dst, src, len)
  77#define memset(s, c, n) __memset(s, c, n)
  78
  79#ifndef __NO_FORTIFY
  80#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
  81#endif
  82
  83#endif
  84
  85#define __HAVE_ARCH_MEMCPY_MCSAFE 1
  86__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
  87                size_t cnt);
  88DECLARE_STATIC_KEY_FALSE(mcsafe_key);
  89
  90/**
  91 * memcpy_mcsafe - copy memory with indication if a machine check happened
  92 *
  93 * @dst:        destination address
  94 * @src:        source address
  95 * @cnt:        number of bytes to copy
  96 *
  97 * Low level memory copy function that catches machine checks
  98 * We only call into the "safe" function on systems that can
  99 * actually do machine check recovery. Everyone else can just
 100 * use memcpy().
 101 *
 102 * Return 0 for success, or number of bytes not copied if there was an
 103 * exception.
 104 */
 105static __always_inline __must_check unsigned long
 106memcpy_mcsafe(void *dst, const void *src, size_t cnt)
 107{
 108#ifdef CONFIG_X86_MCE
 109        if (static_branch_unlikely(&mcsafe_key))
 110                return __memcpy_mcsafe(dst, src, cnt);
 111        else
 112#endif
 113                memcpy(dst, src, cnt);
 114        return 0;
 115}
 116
 117#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 118#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
 119void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
 120static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
 121{
 122        if (__builtin_constant_p(cnt)) {
 123                switch (cnt) {
 124                        case 4:
 125                                asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
 126                                return;
 127                        case 8:
 128                                asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
 129                                return;
 130                        case 16:
 131                                asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
 132                                asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
 133                                return;
 134                }
 135        }
 136        __memcpy_flushcache(dst, src, cnt);
 137}
 138#endif
 139
 140#endif /* __KERNEL__ */
 141
 142#endif /* _ASM_X86_STRING_64_H */
 143