linux/arch/microblaze/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  arch/microblaze/mm/fault.c
   3 *
   4 *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
   5 *
   6 *  Derived from "arch/ppc/mm/fault.c"
   7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   8 *
   9 *  Derived from "arch/i386/mm/fault.c"
  10 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  11 *
  12 *  Modified by Cort Dougan and Paul Mackerras.
  13 *
  14 * This file is subject to the terms and conditions of the GNU General
  15 * Public License.  See the file COPYING in the main directory of this
  16 * archive for more details.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/signal.h>
  22#include <linux/sched.h>
  23#include <linux/kernel.h>
  24#include <linux/errno.h>
  25#include <linux/string.h>
  26#include <linux/types.h>
  27#include <linux/ptrace.h>
  28#include <linux/mman.h>
  29#include <linux/mm.h>
  30#include <linux/interrupt.h>
  31
  32#include <asm/page.h>
  33#include <asm/pgtable.h>
  34#include <asm/mmu.h>
  35#include <linux/mmu_context.h>
  36#include <linux/uaccess.h>
  37#include <asm/exceptions.h>
  38
  39static unsigned long pte_misses;        /* updated by do_page_fault() */
  40static unsigned long pte_errors;        /* updated by do_page_fault() */
  41
  42/*
  43 * Check whether the instruction at regs->pc is a store using
  44 * an update addressing form which will update r1.
  45 */
  46static int store_updates_sp(struct pt_regs *regs)
  47{
  48        unsigned int inst;
  49
  50        if (get_user(inst, (unsigned int __user *)regs->pc))
  51                return 0;
  52        /* check for 1 in the rD field */
  53        if (((inst >> 21) & 0x1f) != 1)
  54                return 0;
  55        /* check for store opcodes */
  56        if ((inst & 0xd0000000) == 0xd0000000)
  57                return 1;
  58        return 0;
  59}
  60
  61
  62/*
  63 * bad_page_fault is called when we have a bad access from the kernel.
  64 * It is called from do_page_fault above and from some of the procedures
  65 * in traps.c.
  66 */
  67void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
  68{
  69        const struct exception_table_entry *fixup;
  70/* MS: no context */
  71        /* Are we prepared to handle this fault?  */
  72        fixup = search_exception_tables(regs->pc);
  73        if (fixup) {
  74                regs->pc = fixup->fixup;
  75                return;
  76        }
  77
  78        /* kernel has accessed a bad area */
  79        die("kernel access of bad area", regs, sig);
  80}
  81
  82/*
  83 * The error_code parameter is ESR for a data fault,
  84 * 0 for an instruction fault.
  85 */
  86void do_page_fault(struct pt_regs *regs, unsigned long address,
  87                   unsigned long error_code)
  88{
  89        struct vm_area_struct *vma;
  90        struct mm_struct *mm = current->mm;
  91        siginfo_t info;
  92        int code = SEGV_MAPERR;
  93        int is_write = error_code & ESR_S;
  94        int fault;
  95        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
  96                                         (is_write ? FAULT_FLAG_WRITE : 0);
  97
  98        regs->ear = address;
  99        regs->esr = error_code;
 100
 101        /* On a kernel SLB miss we can only check for a valid exception entry */
 102        if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
 103                pr_warn("kernel task_size exceed");
 104                _exception(SIGSEGV, regs, code, address);
 105        }
 106
 107        /* for instr TLB miss and instr storage exception ESR_S is undefined */
 108        if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
 109                is_write = 0;
 110
 111        if (unlikely(in_atomic() || !mm)) {
 112                if (kernel_mode(regs))
 113                        goto bad_area_nosemaphore;
 114
 115                /* in_atomic() in user mode is really bad,
 116                   as is current->mm == NULL. */
 117                pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
 118                                                                        mm);
 119                pr_emerg("r15 = %lx  MSR = %lx\n",
 120                       regs->r15, regs->msr);
 121                die("Weird page fault", regs, SIGSEGV);
 122        }
 123
 124        /* When running in the kernel we expect faults to occur only to
 125         * addresses in user space.  All other faults represent errors in the
 126         * kernel and should generate an OOPS.  Unfortunately, in the case of an
 127         * erroneous fault occurring in a code path which already holds mmap_sem
 128         * we will deadlock attempting to validate the fault against the
 129         * address space.  Luckily the kernel only validly references user
 130         * space from well defined areas of code, which are listed in the
 131         * exceptions table.
 132         *
 133         * As the vast majority of faults will be valid we will only perform
 134         * the source reference check when there is a possibility of a deadlock.
 135         * Attempt to lock the address space, if we cannot we then validate the
 136         * source.  If this is invalid we can skip the address space check,
 137         * thus avoiding the deadlock.
 138         */
 139        if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
 140                if (kernel_mode(regs) && !search_exception_tables(regs->pc))
 141                        goto bad_area_nosemaphore;
 142
 143retry:
 144                down_read(&mm->mmap_sem);
 145        }
 146
 147        vma = find_vma(mm, address);
 148        if (unlikely(!vma))
 149                goto bad_area;
 150
 151        if (vma->vm_start <= address)
 152                goto good_area;
 153
 154        if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
 155                goto bad_area;
 156
 157        if (unlikely(!is_write))
 158                goto bad_area;
 159
 160        /*
 161         * N.B. The ABI allows programs to access up to
 162         * a few hundred bytes below the stack pointer (TBD).
 163         * The kernel signal delivery code writes up to about 1.5kB
 164         * below the stack pointer (r1) before decrementing it.
 165         * The exec code can write slightly over 640kB to the stack
 166         * before setting the user r1.  Thus we allow the stack to
 167         * expand to 1MB without further checks.
 168         */
 169        if (unlikely(address + 0x100000 < vma->vm_end)) {
 170
 171                /* get user regs even if this fault is in kernel mode */
 172                struct pt_regs *uregs = current->thread.regs;
 173                if (uregs == NULL)
 174                        goto bad_area;
 175
 176                /*
 177                 * A user-mode access to an address a long way below
 178                 * the stack pointer is only valid if the instruction
 179                 * is one which would update the stack pointer to the
 180                 * address accessed if the instruction completed,
 181                 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
 182                 * (or the byte, halfword, float or double forms).
 183                 *
 184                 * If we don't check this then any write to the area
 185                 * between the last mapped region and the stack will
 186                 * expand the stack rather than segfaulting.
 187                 */
 188                if (address + 2048 < uregs->r1
 189                        && (kernel_mode(regs) || !store_updates_sp(regs)))
 190                                goto bad_area;
 191        }
 192        if (expand_stack(vma, address))
 193                goto bad_area;
 194
 195good_area:
 196        code = SEGV_ACCERR;
 197
 198        /* a write */
 199        if (unlikely(is_write)) {
 200                if (unlikely(!(vma->vm_flags & VM_WRITE)))
 201                        goto bad_area;
 202        /* a read */
 203        } else {
 204                /* protection fault */
 205                if (unlikely(error_code & 0x08000000))
 206                        goto bad_area;
 207                if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
 208                        goto bad_area;
 209        }
 210
 211        /*
 212         * If for any reason at all we couldn't handle the fault,
 213         * make sure we exit gracefully rather than endlessly redo
 214         * the fault.
 215         */
 216        fault = handle_mm_fault(mm, vma, address, flags);
 217
 218        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 219                return;
 220
 221        if (unlikely(fault & VM_FAULT_ERROR)) {
 222                if (fault & VM_FAULT_OOM)
 223                        goto out_of_memory;
 224                else if (fault & VM_FAULT_SIGBUS)
 225                        goto do_sigbus;
 226                BUG();
 227        }
 228
 229        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 230                if (unlikely(fault & VM_FAULT_MAJOR))
 231                        current->maj_flt++;
 232                else
 233                        current->min_flt++;
 234                if (fault & VM_FAULT_RETRY) {
 235                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 236                        flags |= FAULT_FLAG_TRIED;
 237
 238                        /*
 239                         * No need to up_read(&mm->mmap_sem) as we would
 240                         * have already released it in __lock_page_or_retry
 241                         * in mm/filemap.c.
 242                         */
 243
 244                        goto retry;
 245                }
 246        }
 247
 248        up_read(&mm->mmap_sem);
 249
 250        /*
 251         * keep track of tlb+htab misses that are good addrs but
 252         * just need pte's created via handle_mm_fault()
 253         * -- Cort
 254         */
 255        pte_misses++;
 256        return;
 257
 258bad_area:
 259        up_read(&mm->mmap_sem);
 260
 261bad_area_nosemaphore:
 262        pte_errors++;
 263
 264        /* User mode accesses cause a SIGSEGV */
 265        if (user_mode(regs)) {
 266                _exception(SIGSEGV, regs, code, address);
 267/*              info.si_signo = SIGSEGV;
 268                info.si_errno = 0;
 269                info.si_code = code;
 270                info.si_addr = (void *) address;
 271                force_sig_info(SIGSEGV, &info, current);*/
 272                return;
 273        }
 274
 275        bad_page_fault(regs, address, SIGSEGV);
 276        return;
 277
 278/*
 279 * We ran out of memory, or some other thing happened to us that made
 280 * us unable to handle the page fault gracefully.
 281 */
 282out_of_memory:
 283        up_read(&mm->mmap_sem);
 284        if (!user_mode(regs))
 285                bad_page_fault(regs, address, SIGKILL);
 286        else
 287                pagefault_out_of_memory();
 288        return;
 289
 290do_sigbus:
 291        up_read(&mm->mmap_sem);
 292        if (user_mode(regs)) {
 293                info.si_signo = SIGBUS;
 294                info.si_errno = 0;
 295                info.si_code = BUS_ADRERR;
 296                info.si_addr = (void __user *)address;
 297                force_sig_info(SIGBUS, &info, current);
 298                return;
 299        }
 300        bad_page_fault(regs, address, SIGBUS);
 301}
 302