linux/arch/arm64/kernel/sys_compat.c
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/kernel/sys_arm.c
   3 *
   4 * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
   5 * Copyright (C) 1995, 1996 Russell King.
   6 * Copyright (C) 2012 ARM Ltd.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include <linux/compat.h>
  22#include <linux/personality.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25#include <linux/syscalls.h>
  26#include <linux/uaccess.h>
  27
  28#include <asm/cacheflush.h>
  29#include <asm/unistd32.h>
  30
  31static inline void
  32do_compat_cache_op(unsigned long start, unsigned long end, int flags)
  33{
  34        struct mm_struct *mm = current->active_mm;
  35        struct vm_area_struct *vma;
  36
  37        if (end < start || flags)
  38                return;
  39
  40        down_read(&mm->mmap_sem);
  41        vma = find_vma(mm, start);
  42        if (vma && vma->vm_start < end) {
  43                if (start < vma->vm_start)
  44                        start = vma->vm_start;
  45                if (end > vma->vm_end)
  46                        end = vma->vm_end;
  47                up_read(&mm->mmap_sem);
  48                __flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end));
  49                return;
  50        }
  51        up_read(&mm->mmap_sem);
  52}
  53
  54/*
  55 * Handle all unrecognised system calls.
  56 */
  57long compat_arm_syscall(struct pt_regs *regs)
  58{
  59        unsigned int no = regs->regs[7];
  60
  61        switch (no) {
  62        /*
  63         * Flush a region from virtual address 'r0' to virtual address 'r1'
  64         * _exclusive_.  There is no alignment requirement on either address;
  65         * user space does not need to know the hardware cache layout.
  66         *
  67         * r2 contains flags.  It should ALWAYS be passed as ZERO until it
  68         * is defined to be something else.  For now we ignore it, but may
  69         * the fires of hell burn in your belly if you break this rule. ;)
  70         *
  71         * (at a later date, we may want to allow this call to not flush
  72         * various aspects of the cache.  Passing '0' will guarantee that
  73         * everything necessary gets flushed to maintain consistency in
  74         * the specified region).
  75         */
  76        case __ARM_NR_compat_cacheflush:
  77                do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
  78                return 0;
  79
  80        case __ARM_NR_compat_set_tls:
  81                current->thread.tp_value = regs->regs[0];
  82                asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
  83                return 0;
  84
  85        default:
  86                return -ENOSYS;
  87        }
  88}
  89