linux/arch/avr32/lib/copy_user.S
<<
>>
Prefs
   1/*
   2 * Copy to/from userspace with optional address space checking.
   3 *
   4 * Copyright 2004-2006 Atmel Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <asm/page.h>
  11#include <asm/thread_info.h>
  12#include <asm/asm.h>
  13
  14        /*
  15         * __kernel_size_t
  16         * __copy_user(void *to, const void *from, __kernel_size_t n)
  17         *
  18         * Returns the number of bytes not copied. Might be off by
  19         * max 3 bytes if we get a fault in the main loop.
  20         *
  21         * The address-space checking functions simply fall through to
  22         * the non-checking version.
  23         */
  24        .text
  25        .align  1
  26        .global copy_from_user
  27        .type   copy_from_user, @function
  28copy_from_user:
  29        branch_if_kernel r8, __copy_user
  30        ret_if_privileged r8, r11, r10, r10
  31        rjmp    __copy_user
  32        .size   copy_from_user, . - copy_from_user
  33
  34        .global copy_to_user
  35        .type   copy_to_user, @function
  36copy_to_user:
  37        branch_if_kernel r8, __copy_user
  38        ret_if_privileged r8, r12, r10, r10
  39        .size   copy_to_user, . - copy_to_user
  40
  41        .global __copy_user
  42        .type   __copy_user, @function
  43__copy_user:
  44        mov     r9, r11
  45        andl    r9, 3, COH
  46        brne    6f
  47
  48        /* At this point, from is word-aligned */
  491:      sub     r10, 4
  50        brlt    3f
  51
  522:
  5310:     ld.w    r8, r11++
  5411:     st.w    r12++, r8
  55        sub     r10, 4
  56        brge    2b
  57
  583:      sub     r10, -4
  59        reteq   0
  60
  61        /*
  62         * Handle unaligned count. Need to be careful with r10 here so
  63         * that we return the correct value even if we get a fault
  64         */
  654:
  6620:     ld.ub   r8, r11++
  6721:     st.b    r12++, r8
  68        sub     r10, 1
  69        reteq   0
  7022:     ld.ub   r8, r11++
  7123:     st.b    r12++, r8
  72        sub     r10, 1
  73        reteq   0
  7424:     ld.ub   r8, r11++
  7525:     st.b    r12++, r8
  76        retal   0
  77
  78        /* Handle unaligned from-pointer */
  796:      cp.w    r10, 4
  80        brlt    4b
  81        rsub    r9, r9, 4
  82
  8330:     ld.ub   r8, r11++
  8431:     st.b    r12++, r8
  85        sub     r10, 1
  86        sub     r9, 1
  87        breq    1b
  8832:     ld.ub   r8, r11++
  8933:     st.b    r12++, r8
  90        sub     r10, 1
  91        sub     r9, 1
  92        breq    1b
  9334:     ld.ub   r8, r11++
  9435:     st.b    r12++, r8
  95        sub     r10, 1
  96        rjmp    1b
  97        .size   __copy_user, . - __copy_user
  98
  99        .section .fixup,"ax"
 100        .align  1
 10119:     sub     r10, -4
 10229:     retal   r10
 103
 104        .section __ex_table,"a"
 105        .align  2
 106        .long   10b, 19b
 107        .long   11b, 19b
 108        .long   20b, 29b
 109        .long   21b, 29b
 110        .long   22b, 29b
 111        .long   23b, 29b
 112        .long   24b, 29b
 113        .long   25b, 29b
 114        .long   30b, 29b
 115        .long   31b, 29b
 116        .long   32b, 29b
 117        .long   33b, 29b
 118        .long   34b, 29b
 119        .long   35b, 29b
 120