linux/arch/mips/kernel/syscall.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
   7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   8 * Copyright (C) 2001 MIPS Technologies, Inc.
   9 */
  10#include <linux/capability.h>
  11#include <linux/errno.h>
  12#include <linux/linkage.h>
  13#include <linux/mm.h>
  14#include <linux/fs.h>
  15#include <linux/smp.h>
  16#include <linux/mman.h>
  17#include <linux/ptrace.h>
  18#include <linux/sched.h>
  19#include <linux/string.h>
  20#include <linux/syscalls.h>
  21#include <linux/file.h>
  22#include <linux/utsname.h>
  23#include <linux/unistd.h>
  24#include <linux/sem.h>
  25#include <linux/msg.h>
  26#include <linux/shm.h>
  27#include <linux/compiler.h>
  28#include <linux/module.h>
  29#include <linux/ipc.h>
  30#include <linux/uaccess.h>
  31#include <linux/slab.h>
  32#include <linux/random.h>
  33#include <linux/elf.h>
  34
  35#include <asm/asm.h>
  36#include <asm/branch.h>
  37#include <asm/cachectl.h>
  38#include <asm/cacheflush.h>
  39#include <asm/asm-offsets.h>
  40#include <asm/signal.h>
  41#include <asm/sim.h>
  42#include <asm/shmparam.h>
  43#include <asm/sysmips.h>
  44#include <asm/uaccess.h>
  45
  46/*
  47 * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
  48 * convention.  It returns results in registers $v0 / $v1 which means there
  49 * is no need for it to do verify the validity of a userspace pointer
  50 * argument.  Historically that used to be expensive in Linux.  These days
  51 * the performance advantage is negligible.
  52 */
  53asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs)
  54{
  55        int fd[2];
  56        int error, res;
  57
  58        error = do_pipe_flags(fd, 0);
  59        if (error) {
  60                res = error;
  61                goto out;
  62        }
  63        regs.regs[3] = fd[1];
  64        res = fd[0];
  65out:
  66        return res;
  67}
  68
  69unsigned long shm_align_mask = PAGE_SIZE - 1;   /* Sane caches */
  70
  71EXPORT_SYMBOL(shm_align_mask);
  72
  73#define COLOUR_ALIGN(addr,pgoff)                                \
  74        ((((addr) + shm_align_mask) & ~shm_align_mask) +        \
  75         (((pgoff) << PAGE_SHIFT) & shm_align_mask))
  76
  77unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  78        unsigned long len, unsigned long pgoff, unsigned long flags)
  79{
  80        struct vm_area_struct * vmm;
  81        int do_color_align;
  82        unsigned long task_size;
  83
  84#ifdef CONFIG_32BIT
  85        task_size = TASK_SIZE;
  86#else /* Must be CONFIG_64BIT*/
  87        task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE;
  88#endif
  89
  90        if (len > task_size)
  91                return -ENOMEM;
  92
  93        if (flags & MAP_FIXED) {
  94                /* Even MAP_FIXED mappings must reside within task_size.  */
  95                if (task_size - len < addr)
  96                        return -EINVAL;
  97
  98                /*
  99                 * We do not accept a shared mapping if it would violate
 100                 * cache aliasing constraints.
 101                 */
 102                if ((flags & MAP_SHARED) &&
 103                    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
 104                        return -EINVAL;
 105                return addr;
 106        }
 107
 108        do_color_align = 0;
 109        if (filp || (flags & MAP_SHARED))
 110                do_color_align = 1;
 111        if (addr) {
 112                if (do_color_align)
 113                        addr = COLOUR_ALIGN(addr, pgoff);
 114                else
 115                        addr = PAGE_ALIGN(addr);
 116                vmm = find_vma(current->mm, addr);
 117                if (task_size - len >= addr &&
 118                    (!vmm || addr + len <= vmm->vm_start))
 119                        return addr;
 120        }
 121        addr = current->mm->mmap_base;
 122        if (do_color_align)
 123                addr = COLOUR_ALIGN(addr, pgoff);
 124        else
 125                addr = PAGE_ALIGN(addr);
 126
 127        for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
 128                /* At this point:  (!vmm || addr < vmm->vm_end). */
 129                if (task_size - len < addr)
 130                        return -ENOMEM;
 131                if (!vmm || addr + len <= vmm->vm_start)
 132                        return addr;
 133                addr = vmm->vm_end;
 134                if (do_color_align)
 135                        addr = COLOUR_ALIGN(addr, pgoff);
 136        }
 137}
 138
 139void arch_pick_mmap_layout(struct mm_struct *mm)
 140{
 141        unsigned long random_factor = 0UL;
 142
 143        if (current->flags & PF_RANDOMIZE) {
 144                random_factor = get_random_int();
 145                random_factor = random_factor << PAGE_SHIFT;
 146                if (TASK_IS_32BIT_ADDR)
 147                        random_factor &= 0xfffffful;
 148                else
 149                        random_factor &= 0xffffffful;
 150        }
 151
 152        mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
 153        mm->get_unmapped_area = arch_get_unmapped_area;
 154        mm->unmap_area = arch_unmap_area;
 155}
 156
 157static inline unsigned long brk_rnd(void)
 158{
 159        unsigned long rnd = get_random_int();
 160
 161        rnd = rnd << PAGE_SHIFT;
 162        /* 8MB for 32bit, 256MB for 64bit */
 163        if (TASK_IS_32BIT_ADDR)
 164                rnd = rnd & 0x7ffffful;
 165        else
 166                rnd = rnd & 0xffffffful;
 167
 168        return rnd;
 169}
 170
 171unsigned long arch_randomize_brk(struct mm_struct *mm)
 172{
 173        unsigned long base = mm->brk;
 174        unsigned long ret;
 175
 176        ret = PAGE_ALIGN(base + brk_rnd());
 177
 178        if (ret < mm->brk)
 179                return mm->brk;
 180
 181        return ret;
 182}
 183
 184SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
 185        unsigned long, prot, unsigned long, flags, unsigned long,
 186        fd, off_t, offset)
 187{
 188        unsigned long result;
 189
 190        result = -EINVAL;
 191        if (offset & ~PAGE_MASK)
 192                goto out;
 193
 194        result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
 195
 196out:
 197        return result;
 198}
 199
 200SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
 201        unsigned long, prot, unsigned long, flags, unsigned long, fd,
 202        unsigned long, pgoff)
 203{
 204        if (pgoff & (~PAGE_MASK >> 12))
 205                return -EINVAL;
 206
 207        return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
 208}
 209
 210save_static_function(sys_fork);
 211static int __used noinline
 212_sys_fork(nabi_no_regargs struct pt_regs regs)
 213{
 214        return do_fork(SIGCHLD, regs.regs[29], &regs, 0, NULL, NULL);
 215}
 216
 217save_static_function(sys_clone);
 218static int __used noinline
 219_sys_clone(nabi_no_regargs struct pt_regs regs)
 220{
 221        unsigned long clone_flags;
 222        unsigned long newsp;
 223        int __user *parent_tidptr, *child_tidptr;
 224
 225        clone_flags = regs.regs[4];
 226        newsp = regs.regs[5];
 227        if (!newsp)
 228                newsp = regs.regs[29];
 229        parent_tidptr = (int __user *) regs.regs[6];
 230#ifdef CONFIG_32BIT
 231        /* We need to fetch the fifth argument off the stack.  */
 232        child_tidptr = NULL;
 233        if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) {
 234                int __user *__user *usp = (int __user *__user *) regs.regs[29];
 235                if (regs.regs[2] == __NR_syscall) {
 236                        if (get_user (child_tidptr, &usp[5]))
 237                                return -EFAULT;
 238                }
 239                else if (get_user (child_tidptr, &usp[4]))
 240                        return -EFAULT;
 241        }
 242#else
 243        child_tidptr = (int __user *) regs.regs[8];
 244#endif
 245        return do_fork(clone_flags, newsp, &regs, 0,
 246                       parent_tidptr, child_tidptr);
 247}
 248
 249/*
 250 * sys_execve() executes a new program.
 251 */
 252asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
 253{
 254        int error;
 255        char * filename;
 256
 257        filename = getname((const char __user *) (long)regs.regs[4]);
 258        error = PTR_ERR(filename);
 259        if (IS_ERR(filename))
 260                goto out;
 261        error = do_execve(filename,
 262                          (const char __user *const __user *) (long)regs.regs[5],
 263                          (const char __user *const __user *) (long)regs.regs[6],
 264                          &regs);
 265        putname(filename);
 266
 267out:
 268        return error;
 269}
 270
 271SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
 272{
 273        struct thread_info *ti = task_thread_info(current);
 274
 275        ti->tp_value = addr;
 276        if (cpu_has_userlocal)
 277                write_c0_userlocal(addr);
 278
 279        return 0;
 280}
 281
 282static inline int mips_atomic_set(struct pt_regs *regs,
 283        unsigned long addr, unsigned long new)
 284{
 285        unsigned long old, tmp;
 286        unsigned int err;
 287
 288        if (unlikely(addr & 3))
 289                return -EINVAL;
 290
 291        if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
 292                return -EINVAL;
 293
 294        if (cpu_has_llsc && R10000_LLSC_WAR) {
 295                __asm__ __volatile__ (
 296                "       .set    mips3                                   \n"
 297                "       li      %[err], 0                               \n"
 298                "1:     ll      %[old], (%[addr])                       \n"
 299                "       move    %[tmp], %[new]                          \n"
 300                "2:     sc      %[tmp], (%[addr])                       \n"
 301                "       beqzl   %[tmp], 1b                              \n"
 302                "3:                                                     \n"
 303                "       .section .fixup,\"ax\"                          \n"
 304                "4:     li      %[err], %[efault]                       \n"
 305                "       j       3b                                      \n"
 306                "       .previous                                       \n"
 307                "       .section __ex_table,\"a\"                       \n"
 308                "       "STR(PTR)"      1b, 4b                          \n"
 309                "       "STR(PTR)"      2b, 4b                          \n"
 310                "       .previous                                       \n"
 311                "       .set    mips0                                   \n"
 312                : [old] "=&r" (old),
 313                  [err] "=&r" (err),
 314                  [tmp] "=&r" (tmp)
 315                : [addr] "r" (addr),
 316                  [new] "r" (new),
 317                  [efault] "i" (-EFAULT)
 318                : "memory");
 319        } else if (cpu_has_llsc) {
 320                __asm__ __volatile__ (
 321                "       .set    mips3                                   \n"
 322                "       li      %[err], 0                               \n"
 323                "1:     ll      %[old], (%[addr])                       \n"
 324                "       move    %[tmp], %[new]                          \n"
 325                "2:     sc      %[tmp], (%[addr])                       \n"
 326                "       bnez    %[tmp], 4f                              \n"
 327                "3:                                                     \n"
 328                "       .subsection 2                                   \n"
 329                "4:     b       1b                                      \n"
 330                "       .previous                                       \n"
 331                "                                                       \n"
 332                "       .section .fixup,\"ax\"                          \n"
 333                "5:     li      %[err], %[efault]                       \n"
 334                "       j       3b                                      \n"
 335                "       .previous                                       \n"
 336                "       .section __ex_table,\"a\"                       \n"
 337                "       "STR(PTR)"      1b, 5b                          \n"
 338                "       "STR(PTR)"      2b, 5b                          \n"
 339                "       .previous                                       \n"
 340                "       .set    mips0                                   \n"
 341                : [old] "=&r" (old),
 342                  [err] "=&r" (err),
 343                  [tmp] "=&r" (tmp)
 344                : [addr] "r" (addr),
 345                  [new] "r" (new),
 346                  [efault] "i" (-EFAULT)
 347                : "memory");
 348        } else {
 349                do {
 350                        preempt_disable();
 351                        ll_bit = 1;
 352                        ll_task = current;
 353                        preempt_enable();
 354
 355                        err = __get_user(old, (unsigned int *) addr);
 356                        err |= __put_user(new, (unsigned int *) addr);
 357                        if (err)
 358                                break;
 359                        rmb();
 360                } while (!ll_bit);
 361        }
 362
 363        if (unlikely(err))
 364                return err;
 365
 366        regs->regs[2] = old;
 367        regs->regs[7] = 0;      /* No error */
 368
 369        /*
 370         * Don't let your children do this ...
 371         */
 372        __asm__ __volatile__(
 373        "       move    $29, %0                                         \n"
 374        "       j       syscall_exit                                    \n"
 375        : /* no outputs */
 376        : "r" (regs));
 377
 378        /* unreached.  Honestly.  */
 379        while (1);
 380}
 381
 382save_static_function(sys_sysmips);
 383static int __used noinline
 384_sys_sysmips(nabi_no_regargs struct pt_regs regs)
 385{
 386        long cmd, arg1, arg2;
 387
 388        cmd = regs.regs[4];
 389        arg1 = regs.regs[5];
 390        arg2 = regs.regs[6];
 391
 392        switch (cmd) {
 393        case MIPS_ATOMIC_SET:
 394                return mips_atomic_set(&regs, arg1, arg2);
 395
 396        case MIPS_FIXADE:
 397                if (arg1 & ~3)
 398                        return -EINVAL;
 399
 400                if (arg1 & 1)
 401                        set_thread_flag(TIF_FIXADE);
 402                else
 403                        clear_thread_flag(TIF_FIXADE);
 404                if (arg1 & 2)
 405                        set_thread_flag(TIF_LOGADE);
 406                else
 407                        clear_thread_flag(TIF_LOGADE);
 408
 409                return 0;
 410
 411        case FLUSH_CACHE:
 412                __flush_cache_all();
 413                return 0;
 414        }
 415
 416        return -EINVAL;
 417}
 418
 419/*
 420 * No implemented yet ...
 421 */
 422SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
 423{
 424        return -ENOSYS;
 425}
 426
 427/*
 428 * If we ever come here the user sp is bad.  Zap the process right away.
 429 * Due to the bad stack signaling wouldn't work.
 430 */
 431asmlinkage void bad_stack(void)
 432{
 433        do_exit(SIGSEGV);
 434}
 435
 436/*
 437 * Do a system call from kernel instead of calling sys_execve so we
 438 * end up with proper pt_regs.
 439 */
 440int kernel_execve(const char *filename,
 441                  const char *const argv[],
 442                  const char *const envp[])
 443{
 444        register unsigned long __a0 asm("$4") = (unsigned long) filename;
 445        register unsigned long __a1 asm("$5") = (unsigned long) argv;
 446        register unsigned long __a2 asm("$6") = (unsigned long) envp;
 447        register unsigned long __a3 asm("$7");
 448        unsigned long __v0;
 449
 450        __asm__ volatile ("                                     \n"
 451        "       .set    noreorder                               \n"
 452        "       li      $2, %5          # __NR_execve           \n"
 453        "       syscall                                         \n"
 454        "       move    %0, $2                                  \n"
 455        "       .set    reorder                                 \n"
 456        : "=&r" (__v0), "=r" (__a3)
 457        : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
 458        : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",
 459          "memory");
 460
 461        if (__a3 == 0)
 462                return __v0;
 463
 464        return -__v0;
 465}
 466