linux/arch/m68k/kernel/sys_m68k.c
<<
>>
Prefs
   1/*
   2 * linux/arch/m68k/kernel/sys_m68k.c
   3 *
   4 * This file contains various random system calls that
   5 * have a non-standard calling sequence on the Linux/m68k
   6 * platform.
   7 */
   8
   9#include <linux/capability.h>
  10#include <linux/errno.h>
  11#include <linux/sched.h>
  12#include <linux/mm.h>
  13#include <linux/fs.h>
  14#include <linux/smp.h>
  15#include <linux/sem.h>
  16#include <linux/msg.h>
  17#include <linux/shm.h>
  18#include <linux/stat.h>
  19#include <linux/syscalls.h>
  20#include <linux/mman.h>
  21#include <linux/file.h>
  22#include <linux/ipc.h>
  23
  24#include <asm/setup.h>
  25#include <asm/uaccess.h>
  26#include <asm/cachectl.h>
  27#include <asm/traps.h>
  28#include <asm/page.h>
  29#include <asm/unistd.h>
  30#include <linux/elf.h>
  31#include <asm/tlb.h>
  32
  33asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
  34                             unsigned long error_code);
  35
  36asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  37        unsigned long prot, unsigned long flags,
  38        unsigned long fd, unsigned long pgoff)
  39{
  40        /*
  41         * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
  42         * so we need to shift the argument down by 1; m68k mmap64(3)
  43         * (in libc) expects the last argument of mmap2 in 4Kb units.
  44         */
  45        return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
  46}
  47
  48/* Convert virtual (user) address VADDR to physical address PADDR */
  49#define virt_to_phys_040(vaddr)                                         \
  50({                                                                      \
  51  unsigned long _mmusr, _paddr;                                         \
  52                                                                        \
  53  __asm__ __volatile__ (".chip 68040\n\t"                               \
  54                        "ptestr (%1)\n\t"                               \
  55                        "movec %%mmusr,%0\n\t"                          \
  56                        ".chip 68k"                                     \
  57                        : "=r" (_mmusr)                                 \
  58                        : "a" (vaddr));                                 \
  59  _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;             \
  60  _paddr;                                                               \
  61})
  62
  63static inline int
  64cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  65{
  66  unsigned long paddr, i;
  67
  68  switch (scope)
  69    {
  70    case FLUSH_SCOPE_ALL:
  71      switch (cache)
  72        {
  73        case FLUSH_CACHE_DATA:
  74          /* This nop is needed for some broken versions of the 68040.  */
  75          __asm__ __volatile__ ("nop\n\t"
  76                                ".chip 68040\n\t"
  77                                "cpusha %dc\n\t"
  78                                ".chip 68k");
  79          break;
  80        case FLUSH_CACHE_INSN:
  81          __asm__ __volatile__ ("nop\n\t"
  82                                ".chip 68040\n\t"
  83                                "cpusha %ic\n\t"
  84                                ".chip 68k");
  85          break;
  86        default:
  87        case FLUSH_CACHE_BOTH:
  88          __asm__ __volatile__ ("nop\n\t"
  89                                ".chip 68040\n\t"
  90                                "cpusha %bc\n\t"
  91                                ".chip 68k");
  92          break;
  93        }
  94      break;
  95
  96    case FLUSH_SCOPE_LINE:
  97      /* Find the physical address of the first mapped page in the
  98         address range.  */
  99      if ((paddr = virt_to_phys_040(addr))) {
 100        paddr += addr & ~(PAGE_MASK | 15);
 101        len = (len + (addr & 15) + 15) >> 4;
 102      } else {
 103        unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
 104
 105        if (len <= tmp)
 106          return 0;
 107        addr += tmp;
 108        len -= tmp;
 109        tmp = PAGE_SIZE;
 110        for (;;)
 111          {
 112            if ((paddr = virt_to_phys_040(addr)))
 113              break;
 114            if (len <= tmp)
 115              return 0;
 116            addr += tmp;
 117            len -= tmp;
 118          }
 119        len = (len + 15) >> 4;
 120      }
 121      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
 122      while (len--)
 123        {
 124          switch (cache)
 125            {
 126            case FLUSH_CACHE_DATA:
 127              __asm__ __volatile__ ("nop\n\t"
 128                                    ".chip 68040\n\t"
 129                                    "cpushl %%dc,(%0)\n\t"
 130                                    ".chip 68k"
 131                                    : : "a" (paddr));
 132              break;
 133            case FLUSH_CACHE_INSN:
 134              __asm__ __volatile__ ("nop\n\t"
 135                                    ".chip 68040\n\t"
 136                                    "cpushl %%ic,(%0)\n\t"
 137                                    ".chip 68k"
 138                                    : : "a" (paddr));
 139              break;
 140            default:
 141            case FLUSH_CACHE_BOTH:
 142              __asm__ __volatile__ ("nop\n\t"
 143                                    ".chip 68040\n\t"
 144                                    "cpushl %%bc,(%0)\n\t"
 145                                    ".chip 68k"
 146                                    : : "a" (paddr));
 147              break;
 148            }
 149          if (!--i && len)
 150            {
 151              /*
 152               * No need to page align here since it is done by
 153               * virt_to_phys_040().
 154               */
 155              addr += PAGE_SIZE;
 156              i = PAGE_SIZE / 16;
 157              /* Recompute physical address when crossing a page
 158                 boundary. */
 159              for (;;)
 160                {
 161                  if ((paddr = virt_to_phys_040(addr)))
 162                    break;
 163                  if (len <= i)
 164                    return 0;
 165                  len -= i;
 166                  addr += PAGE_SIZE;
 167                }
 168            }
 169          else
 170            paddr += 16;
 171        }
 172      break;
 173
 174    default:
 175    case FLUSH_SCOPE_PAGE:
 176      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
 177      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
 178        {
 179          if (!(paddr = virt_to_phys_040(addr)))
 180            continue;
 181          switch (cache)
 182            {
 183            case FLUSH_CACHE_DATA:
 184              __asm__ __volatile__ ("nop\n\t"
 185                                    ".chip 68040\n\t"
 186                                    "cpushp %%dc,(%0)\n\t"
 187                                    ".chip 68k"
 188                                    : : "a" (paddr));
 189              break;
 190            case FLUSH_CACHE_INSN:
 191              __asm__ __volatile__ ("nop\n\t"
 192                                    ".chip 68040\n\t"
 193                                    "cpushp %%ic,(%0)\n\t"
 194                                    ".chip 68k"
 195                                    : : "a" (paddr));
 196              break;
 197            default:
 198            case FLUSH_CACHE_BOTH:
 199              __asm__ __volatile__ ("nop\n\t"
 200                                    ".chip 68040\n\t"
 201                                    "cpushp %%bc,(%0)\n\t"
 202                                    ".chip 68k"
 203                                    : : "a" (paddr));
 204              break;
 205            }
 206        }
 207      break;
 208    }
 209  return 0;
 210}
 211
 212#define virt_to_phys_060(vaddr)                         \
 213({                                                      \
 214  unsigned long paddr;                                  \
 215  __asm__ __volatile__ (".chip 68060\n\t"               \
 216                        "plpar (%0)\n\t"                \
 217                        ".chip 68k"                     \
 218                        : "=a" (paddr)                  \
 219                        : "0" (vaddr));                 \
 220  (paddr); /* XXX */                                    \
 221})
 222
 223static inline int
 224cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
 225{
 226  unsigned long paddr, i;
 227
 228  /*
 229   * 68060 manual says:
 230   *  cpush %dc : flush DC, remains valid (with our %cacr setup)
 231   *  cpush %ic : invalidate IC
 232   *  cpush %bc : flush DC + invalidate IC
 233   */
 234  switch (scope)
 235    {
 236    case FLUSH_SCOPE_ALL:
 237      switch (cache)
 238        {
 239        case FLUSH_CACHE_DATA:
 240          __asm__ __volatile__ (".chip 68060\n\t"
 241                                "cpusha %dc\n\t"
 242                                ".chip 68k");
 243          break;
 244        case FLUSH_CACHE_INSN:
 245          __asm__ __volatile__ (".chip 68060\n\t"
 246                                "cpusha %ic\n\t"
 247                                ".chip 68k");
 248          break;
 249        default:
 250        case FLUSH_CACHE_BOTH:
 251          __asm__ __volatile__ (".chip 68060\n\t"
 252                                "cpusha %bc\n\t"
 253                                ".chip 68k");
 254          break;
 255        }
 256      break;
 257
 258    case FLUSH_SCOPE_LINE:
 259      /* Find the physical address of the first mapped page in the
 260         address range.  */
 261      len += addr & 15;
 262      addr &= -16;
 263      if (!(paddr = virt_to_phys_060(addr))) {
 264        unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
 265
 266        if (len <= tmp)
 267          return 0;
 268        addr += tmp;
 269        len -= tmp;
 270        tmp = PAGE_SIZE;
 271        for (;;)
 272          {
 273            if ((paddr = virt_to_phys_060(addr)))
 274              break;
 275            if (len <= tmp)
 276              return 0;
 277            addr += tmp;
 278            len -= tmp;
 279          }
 280      }
 281      len = (len + 15) >> 4;
 282      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
 283      while (len--)
 284        {
 285          switch (cache)
 286            {
 287            case FLUSH_CACHE_DATA:
 288              __asm__ __volatile__ (".chip 68060\n\t"
 289                                    "cpushl %%dc,(%0)\n\t"
 290                                    ".chip 68k"
 291                                    : : "a" (paddr));
 292              break;
 293            case FLUSH_CACHE_INSN:
 294              __asm__ __volatile__ (".chip 68060\n\t"
 295                                    "cpushl %%ic,(%0)\n\t"
 296                                    ".chip 68k"
 297                                    : : "a" (paddr));
 298              break;
 299            default:
 300            case FLUSH_CACHE_BOTH:
 301              __asm__ __volatile__ (".chip 68060\n\t"
 302                                    "cpushl %%bc,(%0)\n\t"
 303                                    ".chip 68k"
 304                                    : : "a" (paddr));
 305              break;
 306            }
 307          if (!--i && len)
 308            {
 309
 310              /*
 311               * We just want to jump to the first cache line
 312               * in the next page.
 313               */
 314              addr += PAGE_SIZE;
 315              addr &= PAGE_MASK;
 316
 317              i = PAGE_SIZE / 16;
 318              /* Recompute physical address when crossing a page
 319                 boundary. */
 320              for (;;)
 321                {
 322                  if ((paddr = virt_to_phys_060(addr)))
 323                    break;
 324                  if (len <= i)
 325                    return 0;
 326                  len -= i;
 327                  addr += PAGE_SIZE;
 328                }
 329            }
 330          else
 331            paddr += 16;
 332        }
 333      break;
 334
 335    default:
 336    case FLUSH_SCOPE_PAGE:
 337      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
 338      addr &= PAGE_MASK;        /* Workaround for bug in some
 339                                   revisions of the 68060 */
 340      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
 341        {
 342          if (!(paddr = virt_to_phys_060(addr)))
 343            continue;
 344          switch (cache)
 345            {
 346            case FLUSH_CACHE_DATA:
 347              __asm__ __volatile__ (".chip 68060\n\t"
 348                                    "cpushp %%dc,(%0)\n\t"
 349                                    ".chip 68k"
 350                                    : : "a" (paddr));
 351              break;
 352            case FLUSH_CACHE_INSN:
 353              __asm__ __volatile__ (".chip 68060\n\t"
 354                                    "cpushp %%ic,(%0)\n\t"
 355                                    ".chip 68k"
 356                                    : : "a" (paddr));
 357              break;
 358            default:
 359            case FLUSH_CACHE_BOTH:
 360              __asm__ __volatile__ (".chip 68060\n\t"
 361                                    "cpushp %%bc,(%0)\n\t"
 362                                    ".chip 68k"
 363                                    : : "a" (paddr));
 364              break;
 365            }
 366        }
 367      break;
 368    }
 369  return 0;
 370}
 371
 372/* sys_cacheflush -- flush (part of) the processor cache.  */
 373asmlinkage int
 374sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
 375{
 376        struct vm_area_struct *vma;
 377        int ret = -EINVAL;
 378
 379        if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
 380            cache & ~FLUSH_CACHE_BOTH)
 381                goto out;
 382
 383        if (scope == FLUSH_SCOPE_ALL) {
 384                /* Only the superuser may explicitly flush the whole cache. */
 385                ret = -EPERM;
 386                if (!capable(CAP_SYS_ADMIN))
 387                        goto out;
 388        } else {
 389                /*
 390                 * Verify that the specified address region actually belongs
 391                 * to this process.
 392                 */
 393                vma = find_vma (current->mm, addr);
 394                ret = -EINVAL;
 395                /* Check for overflow.  */
 396                if (addr + len < addr)
 397                        goto out;
 398                if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
 399                        goto out;
 400        }
 401
 402        if (CPU_IS_020_OR_030) {
 403                if (scope == FLUSH_SCOPE_LINE && len < 256) {
 404                        unsigned long cacr;
 405                        __asm__ ("movec %%cacr, %0" : "=r" (cacr));
 406                        if (cache & FLUSH_CACHE_INSN)
 407                                cacr |= 4;
 408                        if (cache & FLUSH_CACHE_DATA)
 409                                cacr |= 0x400;
 410                        len >>= 2;
 411                        while (len--) {
 412                                __asm__ __volatile__ ("movec %1, %%caar\n\t"
 413                                                      "movec %0, %%cacr"
 414                                                      : /* no outputs */
 415                                                      : "r" (cacr), "r" (addr));
 416                                addr += 4;
 417                        }
 418                } else {
 419                        /* Flush the whole cache, even if page granularity requested. */
 420                        unsigned long cacr;
 421                        __asm__ ("movec %%cacr, %0" : "=r" (cacr));
 422                        if (cache & FLUSH_CACHE_INSN)
 423                                cacr |= 8;
 424                        if (cache & FLUSH_CACHE_DATA)
 425                                cacr |= 0x800;
 426                        __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
 427                }
 428                ret = 0;
 429                goto out;
 430        } else {
 431            /*
 432             * 040 or 060: don't blindly trust 'scope', someone could
 433             * try to flush a few megs of memory.
 434             */
 435
 436            if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
 437                scope=FLUSH_SCOPE_PAGE;
 438            if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
 439                scope=FLUSH_SCOPE_ALL;
 440            if (CPU_IS_040) {
 441                ret = cache_flush_040 (addr, scope, cache, len);
 442            } else if (CPU_IS_060) {
 443                ret = cache_flush_060 (addr, scope, cache, len);
 444            }
 445        }
 446out:
 447        return ret;
 448}
 449
 450asmlinkage int sys_getpagesize(void)
 451{
 452        return PAGE_SIZE;
 453}
 454
 455/*
 456 * Do a system call from kernel instead of calling sys_execve so we
 457 * end up with proper pt_regs.
 458 */
 459int kernel_execve(const char *filename,
 460                  const char *const argv[],
 461                  const char *const envp[])
 462{
 463        register long __res asm ("%d0") = __NR_execve;
 464        register long __a asm ("%d1") = (long)(filename);
 465        register long __b asm ("%d2") = (long)(argv);
 466        register long __c asm ("%d3") = (long)(envp);
 467        asm volatile ("trap  #0" : "+d" (__res)
 468                        : "d" (__a), "d" (__b), "d" (__c));
 469        return __res;
 470}
 471
 472asmlinkage unsigned long sys_get_thread_area(void)
 473{
 474        return current_thread_info()->tp_value;
 475}
 476
 477asmlinkage int sys_set_thread_area(unsigned long tp)
 478{
 479        current_thread_info()->tp_value = tp;
 480        return 0;
 481}
 482
 483/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
 484   D1 (newval).  */
 485asmlinkage int
 486sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
 487                      unsigned long __user * mem)
 488{
 489        /* This was borrowed from ARM's implementation.  */
 490        for (;;) {
 491                struct mm_struct *mm = current->mm;
 492                pgd_t *pgd;
 493                pmd_t *pmd;
 494                pte_t *pte;
 495                spinlock_t *ptl;
 496                unsigned long mem_value;
 497
 498                down_read(&mm->mmap_sem);
 499                pgd = pgd_offset(mm, (unsigned long)mem);
 500                if (!pgd_present(*pgd))
 501                        goto bad_access;
 502                pmd = pmd_offset(pgd, (unsigned long)mem);
 503                if (!pmd_present(*pmd))
 504                        goto bad_access;
 505                pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
 506                if (!pte_present(*pte) || !pte_dirty(*pte)
 507                    || !pte_write(*pte)) {
 508                        pte_unmap_unlock(pte, ptl);
 509                        goto bad_access;
 510                }
 511
 512                mem_value = *mem;
 513                if (mem_value == oldval)
 514                        *mem = newval;
 515
 516                pte_unmap_unlock(pte, ptl);
 517                up_read(&mm->mmap_sem);
 518                return mem_value;
 519
 520              bad_access:
 521                up_read(&mm->mmap_sem);
 522                /* This is not necessarily a bad access, we can get here if
 523                   a memory we're trying to write to should be copied-on-write.
 524                   Make the kernel do the necessary page stuff, then re-iterate.
 525                   Simulate a write access fault to do that.  */
 526                {
 527                        /* The first argument of the function corresponds to
 528                           D1, which is the first field of struct pt_regs.  */
 529                        struct pt_regs *fp = (struct pt_regs *)&newval;
 530
 531                        /* '3' is an RMW flag.  */
 532                        if (do_page_fault(fp, (unsigned long)mem, 3))
 533                                /* If the do_page_fault() failed, we don't
 534                                   have anything meaningful to return.
 535                                   There should be a SIGSEGV pending for
 536                                   the process.  */
 537                                return 0xdeadbeef;
 538                }
 539        }
 540}
 541
 542asmlinkage int sys_atomic_barrier(void)
 543{
 544        /* no code needed for uniprocs */
 545        return 0;
 546}
 547