linux/arch/arm64/kernel/kuser32.S
<<
>>
Prefs
   1/*
   2 * Low-level user helpers placed in the vectors page for AArch32.
   3 * Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
   4 *
   5 * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
   6 * Copyright (C) 2012 ARM Ltd.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 *
  20 *
  21 * AArch32 user helpers.
  22 *
  23 * Each segment is 32-byte aligned and will be moved to the top of the high
  24 * vector page.  New segments (if ever needed) must be added in front of
  25 * existing ones.  This mechanism should be used only for things that are
  26 * really small and justified, and not be abused freely.
  27 *
  28 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
  29 */
  30
  31#include <asm/unistd.h>
  32
  33        .align  5
  34        .globl  __kuser_helper_start
  35__kuser_helper_start:
  36
  37__kuser_cmpxchg64:                      // 0xffff0f60
  38        .inst   0xe92d00f0              //      push            {r4, r5, r6, r7}
  39        .inst   0xe1c040d0              //      ldrd            r4, r5, [r0]
  40        .inst   0xe1c160d0              //      ldrd            r6, r7, [r1]
  41        .inst   0xe1b20f9f              // 1:   ldrexd          r0, r1, [r2]
  42        .inst   0xe0303004              //      eors            r3, r0, r4
  43        .inst   0x00313005              //      eoreqs          r3, r1, r5
  44        .inst   0x01a23e96              //      stlexdeq        r3, r6, [r2]
  45        .inst   0x03330001              //      teqeq           r3, #1
  46        .inst   0x0afffff9              //      beq             1b
  47        .inst   0xf57ff05b              //      dmb             ish
  48        .inst   0xe2730000              //      rsbs            r0, r3, #0
  49        .inst   0xe8bd00f0              //      pop             {r4, r5, r6, r7}
  50        .inst   0xe12fff1e              //      bx              lr
  51
  52        .align  5
  53__kuser_memory_barrier:                 // 0xffff0fa0
  54        .inst   0xf57ff05b              //      dmb             ish
  55        .inst   0xe12fff1e              //      bx              lr
  56
  57        .align  5
  58__kuser_cmpxchg:                        // 0xffff0fc0
  59        .inst   0xe1923f9f              // 1:   ldrex           r3, [r2]
  60        .inst   0xe0533000              //      subs            r3, r3, r0
  61        .inst   0x01823e91              //      stlexeq         r3, r1, [r2]
  62        .inst   0x03330001              //      teqeq           r3, #1
  63        .inst   0x0afffffa              //      beq             1b
  64        .inst   0xf57ff05b              //      dmb             ish
  65        .inst   0xe2730000              //      rsbs            r0, r3, #0
  66        .inst   0xe12fff1e              //      bx              lr
  67
  68        .align  5
  69__kuser_get_tls:                        // 0xffff0fe0
  70        .inst   0xee1d0f70              //      mrc             p15, 0, r0, c13, c0, 3
  71        .inst   0xe12fff1e              //      bx              lr
  72        .rep    5
  73        .word   0
  74        .endr
  75
  76__kuser_helper_version:                 // 0xffff0ffc
  77        .word   ((__kuser_helper_end - __kuser_helper_start) >> 5)
  78        .globl  __kuser_helper_end
  79__kuser_helper_end:
  80
  81/*
  82 * AArch32 sigreturn code
  83 *
  84 * For ARM syscalls, the syscall number has to be loaded into r7.
  85 * We do not support an OABI userspace.
  86 *
  87 * For Thumb syscalls, we also pass the syscall number via r7. We therefore
  88 * need two 16-bit instructions.
  89 */
  90        .globl __aarch32_sigret_code_start
  91__aarch32_sigret_code_start:
  92
  93        /*
  94         * ARM Code
  95         */
  96        .byte   __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov  r7, #__NR_compat_sigreturn
  97        .byte   __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc  #__NR_compat_sigreturn
  98
  99        /*
 100         * Thumb code
 101         */
 102        .byte   __NR_compat_sigreturn, 0x27                     // svc  #__NR_compat_sigreturn
 103        .byte   __NR_compat_sigreturn, 0xdf                     // mov  r7, #__NR_compat_sigreturn
 104
 105        /*
 106         * ARM code
 107         */
 108        .byte   __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3      // mov  r7, #__NR_compat_rt_sigreturn
 109        .byte   __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef      // svc  #__NR_compat_rt_sigreturn
 110
 111        /*
 112         * Thumb code
 113         */
 114        .byte   __NR_compat_rt_sigreturn, 0x27                  // svc  #__NR_compat_rt_sigreturn
 115        .byte   __NR_compat_rt_sigreturn, 0xdf                  // mov  r7, #__NR_compat_rt_sigreturn
 116
 117        .globl __aarch32_sigret_code_end
 118__aarch32_sigret_code_end:
 119