linux/arch/mn10300/mm/fault.c
<<
>>
Prefs
   1/* MN10300 MMU Fault handler
   2 *
   3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Modified by David Howells (dhowells@redhat.com)
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public Licence
   9 * as published by the Free Software Foundation; either version
  10 * 2 of the Licence, or (at your option) any later version.
  11 */
  12
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/types.h>
  19#include <linux/ptrace.h>
  20#include <linux/mman.h>
  21#include <linux/mm.h>
  22#include <linux/smp.h>
  23#include <linux/interrupt.h>
  24#include <linux/init.h>
  25#include <linux/vt_kern.h>              /* For unblank_screen() */
  26
  27#include <asm/uaccess.h>
  28#include <asm/pgalloc.h>
  29#include <asm/hardirq.h>
  30#include <asm/cpu-regs.h>
  31#include <asm/debugger.h>
  32#include <asm/gdb-stub.h>
  33
  34/*
  35 * Unlock any spinlocks which will prevent us from getting the
  36 * message out
  37 */
  38void bust_spinlocks(int yes)
  39{
  40        if (yes) {
  41                oops_in_progress = 1;
  42        } else {
  43                int loglevel_save = console_loglevel;
  44#ifdef CONFIG_VT
  45                unblank_screen();
  46#endif
  47                oops_in_progress = 0;
  48                /*
  49                 * OK, the message is on the console.  Now we call printk()
  50                 * without oops_in_progress set so that printk will give klogd
  51                 * a poke.  Hold onto your hats...
  52                 */
  53                console_loglevel = 15;  /* NMI oopser may have shut the console
  54                                         * up */
  55                printk(" ");
  56                console_loglevel = loglevel_save;
  57        }
  58}
  59
  60void do_BUG(const char *file, int line)
  61{
  62        bust_spinlocks(1);
  63        printk(KERN_EMERG "------------[ cut here ]------------\n");
  64        printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
  65}
  66
  67#if 0
  68static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
  69{
  70        pgd_t *pgd;
  71        pmd_t *pmd;
  72        pte_t *pte;
  73
  74        pgd = pgdir + __pgd_offset(address);
  75        printk(KERN_DEBUG "pgd entry %p: %016Lx\n",
  76               pgd, (long long) pgd_val(*pgd));
  77
  78        if (!pgd_present(*pgd)) {
  79                printk(KERN_DEBUG "... pgd not present!\n");
  80                return;
  81        }
  82        pmd = pmd_offset(pgd, address);
  83        printk(KERN_DEBUG "pmd entry %p: %016Lx\n",
  84               pmd, (long long)pmd_val(*pmd));
  85
  86        if (!pmd_present(*pmd)) {
  87                printk(KERN_DEBUG "... pmd not present!\n");
  88                return;
  89        }
  90        pte = pte_offset(pmd, address);
  91        printk(KERN_DEBUG "pte entry %p: %016Lx\n",
  92               pte, (long long) pte_val(*pte));
  93
  94        if (!pte_present(*pte))
  95                printk(KERN_DEBUG "... pte not present!\n");
  96}
  97#endif
  98
  99/*
 100 * This routine handles page faults.  It determines the address,
 101 * and the problem, and then passes it off to one of the appropriate
 102 * routines.
 103 *
 104 * fault_code:
 105 * - LSW: either MMUFCR_IFC or MMUFCR_DFC as appropriate
 106 * - MSW: 0 if data access, 1 if instruction access
 107 * - bit 0: TLB miss flag
 108 * - bit 1: initial write
 109 * - bit 2: page invalid
 110 * - bit 3: protection violation
 111 * - bit 4: accessor (0=user 1=kernel)
 112 * - bit 5: 0=read 1=write
 113 * - bit 6-8: page protection spec
 114 * - bit 9: illegal address
 115 * - bit 16: 0=data 1=ins
 116 *
 117 */
 118asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
 119                              unsigned long address)
 120{
 121        struct vm_area_struct *vma;
 122        struct task_struct *tsk;
 123        struct mm_struct *mm;
 124        unsigned long page;
 125        siginfo_t info;
 126        int fault;
 127        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 128
 129#ifdef CONFIG_GDBSTUB
 130        /* handle GDB stub causing a fault */
 131        if (gdbstub_busy) {
 132                gdbstub_exception(regs, TBR & TBR_INT_CODE);
 133                return;
 134        }
 135#endif
 136
 137#if 0
 138        printk(KERN_DEBUG "--- do_page_fault(%p,%s:%04lx,%08lx)\n",
 139               regs,
 140               fault_code & 0x10000 ? "ins" : "data",
 141               fault_code & 0xffff, address);
 142#endif
 143
 144        tsk = current;
 145
 146        /*
 147         * We fault-in kernel-space virtual memory on-demand. The
 148         * 'reference' page table is init_mm.pgd.
 149         *
 150         * NOTE! We MUST NOT take any locks for this case. We may
 151         * be in an interrupt or a critical region, and should
 152         * only copy the information from the master page table,
 153         * nothing more.
 154         *
 155         * This verifies that the fault happens in kernel space
 156         * and that the fault was a page not present (invalid) error
 157         */
 158        if (address >= VMALLOC_START && address < VMALLOC_END &&
 159            (fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR &&
 160            (fault_code & MMUFCR_xFC_PGINVAL) == MMUFCR_xFC_PGINVAL
 161            )
 162                goto vmalloc_fault;
 163
 164        mm = tsk->mm;
 165        info.si_code = SEGV_MAPERR;
 166
 167        /*
 168         * If we're in an interrupt or have no user
 169         * context, we must not take the fault..
 170         */
 171        if (in_atomic() || !mm)
 172                goto no_context;
 173
 174        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
 175                flags |= FAULT_FLAG_USER;
 176retry:
 177        down_read(&mm->mmap_sem);
 178
 179        vma = find_vma(mm, address);
 180        if (!vma)
 181                goto bad_area;
 182        if (vma->vm_start <= address)
 183                goto good_area;
 184        if (!(vma->vm_flags & VM_GROWSDOWN))
 185                goto bad_area;
 186
 187        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
 188                /* accessing the stack below the stack pointer is always a
 189                 * bug */
 190                if ((address & PAGE_MASK) + 2 * PAGE_SIZE < regs->sp) {
 191#if 0
 192                        printk(KERN_WARNING
 193                               "[%d] ### Access below stack @%lx (sp=%lx)\n",
 194                               current->pid, address, regs->sp);
 195                        printk(KERN_WARNING
 196                               "vma [%08x - %08x]\n",
 197                               vma->vm_start, vma->vm_end);
 198                        show_registers(regs);
 199                        printk(KERN_WARNING
 200                               "[%d] ### Code: [%08lx]"
 201                               " %02x %02x %02x %02x %02x %02x %02x %02x\n",
 202                               current->pid,
 203                               regs->pc,
 204                               ((u8 *) regs->pc)[0],
 205                               ((u8 *) regs->pc)[1],
 206                               ((u8 *) regs->pc)[2],
 207                               ((u8 *) regs->pc)[3],
 208                               ((u8 *) regs->pc)[4],
 209                               ((u8 *) regs->pc)[5],
 210                               ((u8 *) regs->pc)[6],
 211                               ((u8 *) regs->pc)[7]
 212                               );
 213#endif
 214                        goto bad_area;
 215                }
 216        }
 217
 218        if (expand_stack(vma, address))
 219                goto bad_area;
 220
 221/*
 222 * Ok, we have a good vm_area for this memory access, so
 223 * we can handle it..
 224 */
 225good_area:
 226        info.si_code = SEGV_ACCERR;
 227        switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) {
 228        default:        /* 3: write, present */
 229        case MMUFCR_xFC_TYPE_WRITE:
 230#ifdef TEST_VERIFY_AREA
 231                if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
 232                        printk(KERN_DEBUG "WP fault at %08lx\n", regs->pc);
 233#endif
 234                /* write to absent page */
 235        case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE:
 236                if (!(vma->vm_flags & VM_WRITE))
 237                        goto bad_area;
 238                flags |= FAULT_FLAG_WRITE;
 239                break;
 240
 241                /* read from protected page */
 242        case MMUFCR_xFC_TYPE_READ:
 243                goto bad_area;
 244
 245                /* read from absent page present */
 246        case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_READ:
 247                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 248                        goto bad_area;
 249                break;
 250        }
 251
 252        /*
 253         * If for any reason at all we couldn't handle the fault,
 254         * make sure we exit gracefully rather than endlessly redo
 255         * the fault.
 256         */
 257        fault = handle_mm_fault(mm, vma, address, flags);
 258
 259        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 260                return;
 261
 262        if (unlikely(fault & VM_FAULT_ERROR)) {
 263                if (fault & VM_FAULT_OOM)
 264                        goto out_of_memory;
 265                else if (fault & VM_FAULT_SIGBUS)
 266                        goto do_sigbus;
 267                BUG();
 268        }
 269        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 270                if (fault & VM_FAULT_MAJOR)
 271                        current->maj_flt++;
 272                else
 273                        current->min_flt++;
 274                if (fault & VM_FAULT_RETRY) {
 275                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 276
 277                         /* No need to up_read(&mm->mmap_sem) as we would
 278                         * have already released it in __lock_page_or_retry
 279                         * in mm/filemap.c.
 280                         */
 281
 282                        goto retry;
 283                }
 284        }
 285
 286        up_read(&mm->mmap_sem);
 287        return;
 288
 289/*
 290 * Something tried to access memory that isn't in our memory map..
 291 * Fix it, but check if it's kernel or user first..
 292 */
 293bad_area:
 294        up_read(&mm->mmap_sem);
 295
 296        /* User mode accesses just cause a SIGSEGV */
 297        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
 298                info.si_signo = SIGSEGV;
 299                info.si_errno = 0;
 300                /* info.si_code has been set above */
 301                info.si_addr = (void *)address;
 302                force_sig_info(SIGSEGV, &info, tsk);
 303                return;
 304        }
 305
 306no_context:
 307        /* Are we prepared to handle this kernel fault?  */
 308        if (fixup_exception(regs))
 309                return;
 310
 311/*
 312 * Oops. The kernel tried to access some bad page. We'll have to
 313 * terminate things with extreme prejudice.
 314 */
 315
 316        bust_spinlocks(1);
 317
 318        if (address < PAGE_SIZE)
 319                printk(KERN_ALERT
 320                       "Unable to handle kernel NULL pointer dereference");
 321        else
 322                printk(KERN_ALERT
 323                       "Unable to handle kernel paging request");
 324        printk(" at virtual address %08lx\n", address);
 325        printk(" printing pc:\n");
 326        printk(KERN_ALERT "%08lx\n", regs->pc);
 327
 328        debugger_intercept(fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR,
 329                           SIGSEGV, SEGV_ACCERR, regs);
 330
 331        page = PTBR;
 332        page = ((unsigned long *) __va(page))[address >> 22];
 333        printk(KERN_ALERT "*pde = %08lx\n", page);
 334        if (page & 1) {
 335                page &= PAGE_MASK;
 336                address &= 0x003ff000;
 337                page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
 338                printk(KERN_ALERT "*pte = %08lx\n", page);
 339        }
 340
 341        die("Oops", regs, fault_code);
 342        do_exit(SIGKILL);
 343
 344/*
 345 * We ran out of memory, or some other thing happened to us that made
 346 * us unable to handle the page fault gracefully.
 347 */
 348out_of_memory:
 349        up_read(&mm->mmap_sem);
 350        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
 351                pagefault_out_of_memory();
 352                return;
 353        }
 354        goto no_context;
 355
 356do_sigbus:
 357        up_read(&mm->mmap_sem);
 358
 359        /*
 360         * Send a sigbus, regardless of whether we were in kernel
 361         * or user mode.
 362         */
 363        info.si_signo = SIGBUS;
 364        info.si_errno = 0;
 365        info.si_code = BUS_ADRERR;
 366        info.si_addr = (void *)address;
 367        force_sig_info(SIGBUS, &info, tsk);
 368
 369        /* Kernel mode? Handle exceptions or die */
 370        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
 371                goto no_context;
 372        return;
 373
 374vmalloc_fault:
 375        {
 376                /*
 377                 * Synchronize this task's top level page-table
 378                 * with the 'reference' page table.
 379                 *
 380                 * Do _not_ use "tsk" here. We might be inside
 381                 * an interrupt in the middle of a task switch..
 382                 */
 383                int index = pgd_index(address);
 384                pgd_t *pgd, *pgd_k;
 385                pud_t *pud, *pud_k;
 386                pmd_t *pmd, *pmd_k;
 387                pte_t *pte_k;
 388
 389                pgd_k = init_mm.pgd + index;
 390
 391                if (!pgd_present(*pgd_k))
 392                        goto no_context;
 393
 394                pud_k = pud_offset(pgd_k, address);
 395                if (!pud_present(*pud_k))
 396                        goto no_context;
 397
 398                pmd_k = pmd_offset(pud_k, address);
 399                if (!pmd_present(*pmd_k))
 400                        goto no_context;
 401
 402                pgd = (pgd_t *) PTBR + index;
 403                pud = pud_offset(pgd, address);
 404                pmd = pmd_offset(pud, address);
 405                set_pmd(pmd, *pmd_k);
 406
 407                pte_k = pte_offset_kernel(pmd_k, address);
 408                if (!pte_present(*pte_k))
 409                        goto no_context;
 410                return;
 411        }
 412}
 413