linux/arch/m68k/kernel/sys_m68k.c
<<
>>
Prefs
   1/*
   2 * linux/arch/m68k/kernel/sys_m68k.c
   3 *
   4 * This file contains various random system calls that
   5 * have a non-standard calling sequence on the Linux/m68k
   6 * platform.
   7 */
   8
   9#include <linux/capability.h>
  10#include <linux/errno.h>
  11#include <linux/sched.h>
  12#include <linux/mm.h>
  13#include <linux/fs.h>
  14#include <linux/smp.h>
  15#include <linux/sem.h>
  16#include <linux/msg.h>
  17#include <linux/shm.h>
  18#include <linux/stat.h>
  19#include <linux/syscalls.h>
  20#include <linux/mman.h>
  21#include <linux/file.h>
  22#include <linux/ipc.h>
  23
  24#include <asm/setup.h>
  25#include <asm/uaccess.h>
  26#include <asm/cachectl.h>
  27#include <asm/traps.h>
  28#include <asm/page.h>
  29#include <asm/unistd.h>
  30#include <asm/cacheflush.h>
  31
  32#ifdef CONFIG_MMU
  33
  34#include <asm/tlb.h>
  35
  36asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
  37                             unsigned long error_code);
  38
  39asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  40        unsigned long prot, unsigned long flags,
  41        unsigned long fd, unsigned long pgoff)
  42{
  43        /*
  44         * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
  45         * so we need to shift the argument down by 1; m68k mmap64(3)
  46         * (in libc) expects the last argument of mmap2 in 4Kb units.
  47         */
  48        return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
  49}
  50
  51/* Convert virtual (user) address VADDR to physical address PADDR */
  52#define virt_to_phys_040(vaddr)                                         \
  53({                                                                      \
  54  unsigned long _mmusr, _paddr;                                         \
  55                                                                        \
  56  __asm__ __volatile__ (".chip 68040\n\t"                               \
  57                        "ptestr (%1)\n\t"                               \
  58                        "movec %%mmusr,%0\n\t"                          \
  59                        ".chip 68k"                                     \
  60                        : "=r" (_mmusr)                                 \
  61                        : "a" (vaddr));                                 \
  62  _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;             \
  63  _paddr;                                                               \
  64})
  65
  66static inline int
  67cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  68{
  69  unsigned long paddr, i;
  70
  71  switch (scope)
  72    {
  73    case FLUSH_SCOPE_ALL:
  74      switch (cache)
  75        {
  76        case FLUSH_CACHE_DATA:
  77          /* This nop is needed for some broken versions of the 68040.  */
  78          __asm__ __volatile__ ("nop\n\t"
  79                                ".chip 68040\n\t"
  80                                "cpusha %dc\n\t"
  81                                ".chip 68k");
  82          break;
  83        case FLUSH_CACHE_INSN:
  84          __asm__ __volatile__ ("nop\n\t"
  85                                ".chip 68040\n\t"
  86                                "cpusha %ic\n\t"
  87                                ".chip 68k");
  88          break;
  89        default:
  90        case FLUSH_CACHE_BOTH:
  91          __asm__ __volatile__ ("nop\n\t"
  92                                ".chip 68040\n\t"
  93                                "cpusha %bc\n\t"
  94                                ".chip 68k");
  95          break;
  96        }
  97      break;
  98
  99    case FLUSH_SCOPE_LINE:
 100      /* Find the physical address of the first mapped page in the
 101         address range.  */
 102      if ((paddr = virt_to_phys_040(addr))) {
 103        paddr += addr & ~(PAGE_MASK | 15);
 104        len = (len + (addr & 15) + 15) >> 4;
 105      } else {
 106        unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
 107
 108        if (len <= tmp)
 109          return 0;
 110        addr += tmp;
 111        len -= tmp;
 112        tmp = PAGE_SIZE;
 113        for (;;)
 114          {
 115            if ((paddr = virt_to_phys_040(addr)))
 116              break;
 117            if (len <= tmp)
 118              return 0;
 119            addr += tmp;
 120            len -= tmp;
 121          }
 122        len = (len + 15) >> 4;
 123      }
 124      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
 125      while (len--)
 126        {
 127          switch (cache)
 128            {
 129            case FLUSH_CACHE_DATA:
 130              __asm__ __volatile__ ("nop\n\t"
 131                                    ".chip 68040\n\t"
 132                                    "cpushl %%dc,(%0)\n\t"
 133                                    ".chip 68k"
 134                                    : : "a" (paddr));
 135              break;
 136            case FLUSH_CACHE_INSN:
 137              __asm__ __volatile__ ("nop\n\t"
 138                                    ".chip 68040\n\t"
 139                                    "cpushl %%ic,(%0)\n\t"
 140                                    ".chip 68k"
 141                                    : : "a" (paddr));
 142              break;
 143            default:
 144            case FLUSH_CACHE_BOTH:
 145              __asm__ __volatile__ ("nop\n\t"
 146                                    ".chip 68040\n\t"
 147                                    "cpushl %%bc,(%0)\n\t"
 148                                    ".chip 68k"
 149                                    : : "a" (paddr));
 150              break;
 151            }
 152          if (!--i && len)
 153            {
 154              /*
 155               * No need to page align here since it is done by
 156               * virt_to_phys_040().
 157               */
 158              addr += PAGE_SIZE;
 159              i = PAGE_SIZE / 16;
 160              /* Recompute physical address when crossing a page
 161                 boundary. */
 162              for (;;)
 163                {
 164                  if ((paddr = virt_to_phys_040(addr)))
 165                    break;
 166                  if (len <= i)
 167                    return 0;
 168                  len -= i;
 169                  addr += PAGE_SIZE;
 170                }
 171            }
 172          else
 173            paddr += 16;
 174        }
 175      break;
 176
 177    default:
 178    case FLUSH_SCOPE_PAGE:
 179      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
 180      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
 181        {
 182          if (!(paddr = virt_to_phys_040(addr)))
 183            continue;
 184          switch (cache)
 185            {
 186            case FLUSH_CACHE_DATA:
 187              __asm__ __volatile__ ("nop\n\t"
 188                                    ".chip 68040\n\t"
 189                                    "cpushp %%dc,(%0)\n\t"
 190                                    ".chip 68k"
 191                                    : : "a" (paddr));
 192              break;
 193            case FLUSH_CACHE_INSN:
 194              __asm__ __volatile__ ("nop\n\t"
 195                                    ".chip 68040\n\t"
 196                                    "cpushp %%ic,(%0)\n\t"
 197                                    ".chip 68k"
 198                                    : : "a" (paddr));
 199              break;
 200            default:
 201            case FLUSH_CACHE_BOTH:
 202              __asm__ __volatile__ ("nop\n\t"
 203                                    ".chip 68040\n\t"
 204                                    "cpushp %%bc,(%0)\n\t"
 205                                    ".chip 68k"
 206                                    : : "a" (paddr));
 207              break;
 208            }
 209        }
 210      break;
 211    }
 212  return 0;
 213}
 214
 215#define virt_to_phys_060(vaddr)                         \
 216({                                                      \
 217  unsigned long paddr;                                  \
 218  __asm__ __volatile__ (".chip 68060\n\t"               \
 219                        "plpar (%0)\n\t"                \
 220                        ".chip 68k"                     \
 221                        : "=a" (paddr)                  \
 222                        : "0" (vaddr));                 \
 223  (paddr); /* XXX */                                    \
 224})
 225
 226static inline int
 227cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
 228{
 229  unsigned long paddr, i;
 230
 231  /*
 232   * 68060 manual says:
 233   *  cpush %dc : flush DC, remains valid (with our %cacr setup)
 234   *  cpush %ic : invalidate IC
 235   *  cpush %bc : flush DC + invalidate IC
 236   */
 237  switch (scope)
 238    {
 239    case FLUSH_SCOPE_ALL:
 240      switch (cache)
 241        {
 242        case FLUSH_CACHE_DATA:
 243          __asm__ __volatile__ (".chip 68060\n\t"
 244                                "cpusha %dc\n\t"
 245                                ".chip 68k");
 246          break;
 247        case FLUSH_CACHE_INSN:
 248          __asm__ __volatile__ (".chip 68060\n\t"
 249                                "cpusha %ic\n\t"
 250                                ".chip 68k");
 251          break;
 252        default:
 253        case FLUSH_CACHE_BOTH:
 254          __asm__ __volatile__ (".chip 68060\n\t"
 255                                "cpusha %bc\n\t"
 256                                ".chip 68k");
 257          break;
 258        }
 259      break;
 260
 261    case FLUSH_SCOPE_LINE:
 262      /* Find the physical address of the first mapped page in the
 263         address range.  */
 264      len += addr & 15;
 265      addr &= -16;
 266      if (!(paddr = virt_to_phys_060(addr))) {
 267        unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
 268
 269        if (len <= tmp)
 270          return 0;
 271        addr += tmp;
 272        len -= tmp;
 273        tmp = PAGE_SIZE;
 274        for (;;)
 275          {
 276            if ((paddr = virt_to_phys_060(addr)))
 277              break;
 278            if (len <= tmp)
 279              return 0;
 280            addr += tmp;
 281            len -= tmp;
 282          }
 283      }
 284      len = (len + 15) >> 4;
 285      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
 286      while (len--)
 287        {
 288          switch (cache)
 289            {
 290            case FLUSH_CACHE_DATA:
 291              __asm__ __volatile__ (".chip 68060\n\t"
 292                                    "cpushl %%dc,(%0)\n\t"
 293                                    ".chip 68k"
 294                                    : : "a" (paddr));
 295              break;
 296            case FLUSH_CACHE_INSN:
 297              __asm__ __volatile__ (".chip 68060\n\t"
 298                                    "cpushl %%ic,(%0)\n\t"
 299                                    ".chip 68k"
 300                                    : : "a" (paddr));
 301              break;
 302            default:
 303            case FLUSH_CACHE_BOTH:
 304              __asm__ __volatile__ (".chip 68060\n\t"
 305                                    "cpushl %%bc,(%0)\n\t"
 306                                    ".chip 68k"
 307                                    : : "a" (paddr));
 308              break;
 309            }
 310          if (!--i && len)
 311            {
 312
 313              /*
 314               * We just want to jump to the first cache line
 315               * in the next page.
 316               */
 317              addr += PAGE_SIZE;
 318              addr &= PAGE_MASK;
 319
 320              i = PAGE_SIZE / 16;
 321              /* Recompute physical address when crossing a page
 322                 boundary. */
 323              for (;;)
 324                {
 325                  if ((paddr = virt_to_phys_060(addr)))
 326                    break;
 327                  if (len <= i)
 328                    return 0;
 329                  len -= i;
 330                  addr += PAGE_SIZE;
 331                }
 332            }
 333          else
 334            paddr += 16;
 335        }
 336      break;
 337
 338    default:
 339    case FLUSH_SCOPE_PAGE:
 340      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
 341      addr &= PAGE_MASK;        /* Workaround for bug in some
 342                                   revisions of the 68060 */
 343      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
 344        {
 345          if (!(paddr = virt_to_phys_060(addr)))
 346            continue;
 347          switch (cache)
 348            {
 349            case FLUSH_CACHE_DATA:
 350              __asm__ __volatile__ (".chip 68060\n\t"
 351                                    "cpushp %%dc,(%0)\n\t"
 352                                    ".chip 68k"
 353                                    : : "a" (paddr));
 354              break;
 355            case FLUSH_CACHE_INSN:
 356              __asm__ __volatile__ (".chip 68060\n\t"
 357                                    "cpushp %%ic,(%0)\n\t"
 358                                    ".chip 68k"
 359                                    : : "a" (paddr));
 360              break;
 361            default:
 362            case FLUSH_CACHE_BOTH:
 363              __asm__ __volatile__ (".chip 68060\n\t"
 364                                    "cpushp %%bc,(%0)\n\t"
 365                                    ".chip 68k"
 366                                    : : "a" (paddr));
 367              break;
 368            }
 369        }
 370      break;
 371    }
 372  return 0;
 373}
 374
 375/* sys_cacheflush -- flush (part of) the processor cache.  */
 376asmlinkage int
 377sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
 378{
 379        int ret = -EINVAL;
 380
 381        if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
 382            cache & ~FLUSH_CACHE_BOTH)
 383                goto out;
 384
 385        if (scope == FLUSH_SCOPE_ALL) {
 386                /* Only the superuser may explicitly flush the whole cache. */
 387                ret = -EPERM;
 388                if (!capable(CAP_SYS_ADMIN))
 389                        goto out;
 390        } else {
 391                struct vm_area_struct *vma;
 392
 393                /* Check for overflow.  */
 394                if (addr + len < addr)
 395                        goto out;
 396
 397                /*
 398                 * Verify that the specified address region actually belongs
 399                 * to this process.
 400                 */
 401                ret = -EINVAL;
 402                down_read(&current->mm->mmap_sem);
 403                vma = find_vma(current->mm, addr);
 404                if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
 405                        goto out_unlock;
 406        }
 407
 408        if (CPU_IS_020_OR_030) {
 409                if (scope == FLUSH_SCOPE_LINE && len < 256) {
 410                        unsigned long cacr;
 411                        __asm__ ("movec %%cacr, %0" : "=r" (cacr));
 412                        if (cache & FLUSH_CACHE_INSN)
 413                                cacr |= 4;
 414                        if (cache & FLUSH_CACHE_DATA)
 415                                cacr |= 0x400;
 416                        len >>= 2;
 417                        while (len--) {
 418                                __asm__ __volatile__ ("movec %1, %%caar\n\t"
 419                                                      "movec %0, %%cacr"
 420                                                      : /* no outputs */
 421                                                      : "r" (cacr), "r" (addr));
 422                                addr += 4;
 423                        }
 424                } else {
 425                        /* Flush the whole cache, even if page granularity requested. */
 426                        unsigned long cacr;
 427                        __asm__ ("movec %%cacr, %0" : "=r" (cacr));
 428                        if (cache & FLUSH_CACHE_INSN)
 429                                cacr |= 8;
 430                        if (cache & FLUSH_CACHE_DATA)
 431                                cacr |= 0x800;
 432                        __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
 433                }
 434                ret = 0;
 435                goto out_unlock;
 436        } else {
 437            /*
 438             * 040 or 060: don't blindly trust 'scope', someone could
 439             * try to flush a few megs of memory.
 440             */
 441
 442            if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
 443                scope=FLUSH_SCOPE_PAGE;
 444            if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
 445                scope=FLUSH_SCOPE_ALL;
 446            if (CPU_IS_040) {
 447                ret = cache_flush_040 (addr, scope, cache, len);
 448            } else if (CPU_IS_060) {
 449                ret = cache_flush_060 (addr, scope, cache, len);
 450            }
 451        }
 452out_unlock:
 453        up_read(&current->mm->mmap_sem);
 454out:
 455        return ret;
 456}
 457
 458/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
 459   D1 (newval).  */
 460asmlinkage int
 461sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
 462                      unsigned long __user * mem)
 463{
 464        /* This was borrowed from ARM's implementation.  */
 465        for (;;) {
 466                struct mm_struct *mm = current->mm;
 467                pgd_t *pgd;
 468                pmd_t *pmd;
 469                pte_t *pte;
 470                spinlock_t *ptl;
 471                unsigned long mem_value;
 472
 473                down_read(&mm->mmap_sem);
 474                pgd = pgd_offset(mm, (unsigned long)mem);
 475                if (!pgd_present(*pgd))
 476                        goto bad_access;
 477                pmd = pmd_offset(pgd, (unsigned long)mem);
 478                if (!pmd_present(*pmd))
 479                        goto bad_access;
 480                pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
 481                if (!pte_present(*pte) || !pte_dirty(*pte)
 482                    || !pte_write(*pte)) {
 483                        pte_unmap_unlock(pte, ptl);
 484                        goto bad_access;
 485                }
 486
 487                /*
 488                 * No need to check for EFAULT; we know that the page is
 489                 * present and writable.
 490                 */
 491                __get_user(mem_value, mem);
 492                if (mem_value == oldval)
 493                        __put_user(newval, mem);
 494
 495                pte_unmap_unlock(pte, ptl);
 496                up_read(&mm->mmap_sem);
 497                return mem_value;
 498
 499              bad_access:
 500                up_read(&mm->mmap_sem);
 501                /* This is not necessarily a bad access, we can get here if
 502                   a memory we're trying to write to should be copied-on-write.
 503                   Make the kernel do the necessary page stuff, then re-iterate.
 504                   Simulate a write access fault to do that.  */
 505                {
 506                        /* The first argument of the function corresponds to
 507                           D1, which is the first field of struct pt_regs.  */
 508                        struct pt_regs *fp = (struct pt_regs *)&newval;
 509
 510                        /* '3' is an RMW flag.  */
 511                        if (do_page_fault(fp, (unsigned long)mem, 3))
 512                                /* If the do_page_fault() failed, we don't
 513                                   have anything meaningful to return.
 514                                   There should be a SIGSEGV pending for
 515                                   the process.  */
 516                                return 0xdeadbeef;
 517                }
 518        }
 519}
 520
 521#else
 522
 523/* sys_cacheflush -- flush (part of) the processor cache.  */
 524asmlinkage int
 525sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
 526{
 527        flush_cache_all();
 528        return 0;
 529}
 530
 531/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
 532   D1 (newval).  */
 533asmlinkage int
 534sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
 535                      unsigned long __user * mem)
 536{
 537        struct mm_struct *mm = current->mm;
 538        unsigned long mem_value;
 539
 540        down_read(&mm->mmap_sem);
 541
 542        mem_value = *mem;
 543        if (mem_value == oldval)
 544                *mem = newval;
 545
 546        up_read(&mm->mmap_sem);
 547        return mem_value;
 548}
 549
 550#endif /* CONFIG_MMU */
 551
 552asmlinkage int sys_getpagesize(void)
 553{
 554        return PAGE_SIZE;
 555}
 556
 557asmlinkage unsigned long sys_get_thread_area(void)
 558{
 559        return current_thread_info()->tp_value;
 560}
 561
 562asmlinkage int sys_set_thread_area(unsigned long tp)
 563{
 564        current_thread_info()->tp_value = tp;
 565        return 0;
 566}
 567
 568asmlinkage int sys_atomic_barrier(void)
 569{
 570        /* no code needed for uniprocs */
 571        return 0;
 572}
 573