linux/arch/x86/lib/usercopy_64.c
<<
>>
Prefs
   1/* 
   2 * User address space access functions.
   3 *
   4 * Copyright 1997 Andi Kleen <ak@muc.de>
   5 * Copyright 1997 Linus Torvalds
   6 * Copyright 2002 Andi Kleen <ak@suse.de>
   7 */
   8#include <linux/module.h>
   9#include <asm/uaccess.h>
  10
  11/*
  12 * Zero Userspace
  13 */
  14
  15unsigned long __clear_user(void __user *addr, unsigned long size)
  16{
  17        long __d0;
  18        might_fault();
  19        /* no memory constraint because it doesn't change any memory gcc knows
  20           about */
  21        stac();
  22        asm volatile(
  23                "       testq  %[size8],%[size8]\n"
  24                "       jz     4f\n"
  25                "0:     movq %[zero],(%[dst])\n"
  26                "       addq   %[eight],%[dst]\n"
  27                "       decl %%ecx ; jnz   0b\n"
  28                "4:     movq  %[size1],%%rcx\n"
  29                "       testl %%ecx,%%ecx\n"
  30                "       jz     2f\n"
  31                "1:     movb   %b[zero],(%[dst])\n"
  32                "       incq   %[dst]\n"
  33                "       decl %%ecx ; jnz  1b\n"
  34                "2:\n"
  35                ".section .fixup,\"ax\"\n"
  36                "3:     lea 0(%[size1],%[size8],8),%[size8]\n"
  37                "       jmp 2b\n"
  38                ".previous\n"
  39                _ASM_EXTABLE(0b,3b)
  40                _ASM_EXTABLE(1b,2b)
  41                : [size8] "=&c"(size), [dst] "=&D" (__d0)
  42                : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
  43                  [zero] "r" (0UL), [eight] "r" (8UL));
  44        clac();
  45        return size;
  46}
  47EXPORT_SYMBOL(__clear_user);
  48
  49unsigned long clear_user(void __user *to, unsigned long n)
  50{
  51        if (access_ok(VERIFY_WRITE, to, n))
  52                return __clear_user(to, n);
  53        return n;
  54}
  55EXPORT_SYMBOL(clear_user);
  56
  57unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
  58{
  59        if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
  60                return copy_user_generic((__force void *)to, (__force void *)from, len);
  61        } 
  62        return len;             
  63}
  64EXPORT_SYMBOL(copy_in_user);
  65
  66/*
  67 * Try to copy last bytes and clear the rest if needed.
  68 * Since protection fault in copy_from/to_user is not a normal situation,
  69 * it is not necessary to optimize tail handling.
  70 */
  71__visible unsigned long
  72copy_user_handle_tail(char *to, char *from, unsigned len)
  73{
  74        for (; len; --len, to++) {
  75                char c;
  76
  77                if (__get_user_nocheck(c, from++, sizeof(char)))
  78                        break;
  79                if (__put_user_nocheck(c, to, sizeof(char)))
  80                        break;
  81        }
  82        clac();
  83
  84        /* If the destination is a kernel buffer, we always clear the end */
  85        if (!__addr_ok(to))
  86                memset(to, 0, len);
  87        return len;
  88}
  89