linux/arch/powerpc/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/i386/mm/fault.c"
   6 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  Modified by Cort Dougan and Paul Mackerras.
   9 *
  10 *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17
  18#include <linux/signal.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/string.h>
  23#include <linux/types.h>
  24#include <linux/ptrace.h>
  25#include <linux/mman.h>
  26#include <linux/mm.h>
  27#include <linux/interrupt.h>
  28#include <linux/highmem.h>
  29#include <linux/module.h>
  30#include <linux/kprobes.h>
  31#include <linux/kdebug.h>
  32#include <linux/perf_event.h>
  33#include <linux/magic.h>
  34#include <linux/ratelimit.h>
  35
  36#include <asm/firmware.h>
  37#include <asm/page.h>
  38#include <asm/pgtable.h>
  39#include <asm/mmu.h>
  40#include <asm/mmu_context.h>
  41#include <asm/system.h>
  42#include <asm/uaccess.h>
  43#include <asm/tlbflush.h>
  44#include <asm/siginfo.h>
  45#include <mm/mmu_decl.h>
  46
  47#include "icswx.h"
  48
  49#ifdef CONFIG_KPROBES
  50static inline int notify_page_fault(struct pt_regs *regs)
  51{
  52        int ret = 0;
  53
  54        /* kprobe_running() needs smp_processor_id() */
  55        if (!user_mode(regs)) {
  56                preempt_disable();
  57                if (kprobe_running() && kprobe_fault_handler(regs, 11))
  58                        ret = 1;
  59                preempt_enable();
  60        }
  61
  62        return ret;
  63}
  64#else
  65static inline int notify_page_fault(struct pt_regs *regs)
  66{
  67        return 0;
  68}
  69#endif
  70
  71/*
  72 * Check whether the instruction at regs->nip is a store using
  73 * an update addressing form which will update r1.
  74 */
  75static int store_updates_sp(struct pt_regs *regs)
  76{
  77        unsigned int inst;
  78
  79        if (get_user(inst, (unsigned int __user *)regs->nip))
  80                return 0;
  81        /* check for 1 in the rA field */
  82        if (((inst >> 16) & 0x1f) != 1)
  83                return 0;
  84        /* check major opcode */
  85        switch (inst >> 26) {
  86        case 37:        /* stwu */
  87        case 39:        /* stbu */
  88        case 45:        /* sthu */
  89        case 53:        /* stfsu */
  90        case 55:        /* stfdu */
  91                return 1;
  92        case 62:        /* std or stdu */
  93                return (inst & 3) == 1;
  94        case 31:
  95                /* check minor opcode */
  96                switch ((inst >> 1) & 0x3ff) {
  97                case 181:       /* stdux */
  98                case 183:       /* stwux */
  99                case 247:       /* stbux */
 100                case 439:       /* sthux */
 101                case 695:       /* stfsux */
 102                case 759:       /* stfdux */
 103                        return 1;
 104                }
 105        }
 106        return 0;
 107}
 108
 109/*
 110 * For 600- and 800-family processors, the error_code parameter is DSISR
 111 * for a data fault, SRR1 for an instruction fault. For 400-family processors
 112 * the error_code parameter is ESR for a data fault, 0 for an instruction
 113 * fault.
 114 * For 64-bit processors, the error_code parameter is
 115 *  - DSISR for a non-SLB data access fault,
 116 *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
 117 *  - 0 any SLB fault.
 118 *
 119 * The return value is 0 if the fault was handled, or the signal
 120 * number if this is a kernel fault that can't be handled here.
 121 */
 122int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 123                            unsigned long error_code)
 124{
 125        struct vm_area_struct * vma;
 126        struct mm_struct *mm = current->mm;
 127        siginfo_t info;
 128        int code = SEGV_MAPERR;
 129        int is_write = 0, ret;
 130        int trap = TRAP(regs);
 131        int is_exec = trap == 0x400;
 132
 133#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 134        /*
 135         * Fortunately the bit assignments in SRR1 for an instruction
 136         * fault and DSISR for a data fault are mostly the same for the
 137         * bits we are interested in.  But there are some bits which
 138         * indicate errors in DSISR but can validly be set in SRR1.
 139         */
 140        if (trap == 0x400)
 141                error_code &= 0x48200000;
 142        else
 143                is_write = error_code & DSISR_ISSTORE;
 144#else
 145        is_write = error_code & ESR_DST;
 146#endif /* CONFIG_4xx || CONFIG_BOOKE */
 147
 148#ifdef CONFIG_PPC_ICSWX
 149        /*
 150         * we need to do this early because this "data storage
 151         * interrupt" does not update the DAR/DEAR so we don't want to
 152         * look at it
 153         */
 154        if (error_code & ICSWX_DSI_UCT) {
 155                int ret;
 156
 157                ret = acop_handle_fault(regs, address, error_code);
 158                if (ret)
 159                        return ret;
 160        }
 161#endif
 162
 163        if (notify_page_fault(regs))
 164                return 0;
 165
 166        if (unlikely(debugger_fault_handler(regs)))
 167                return 0;
 168
 169        /* On a kernel SLB miss we can only check for a valid exception entry */
 170        if (!user_mode(regs) && (address >= TASK_SIZE))
 171                return SIGSEGV;
 172
 173#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
 174                             defined(CONFIG_PPC_BOOK3S_64))
 175        if (error_code & DSISR_DABRMATCH) {
 176                /* DABR match */
 177                do_dabr(regs, address, error_code);
 178                return 0;
 179        }
 180#endif
 181
 182        if (in_atomic() || mm == NULL) {
 183                if (!user_mode(regs))
 184                        return SIGSEGV;
 185                /* in_atomic() in user mode is really bad,
 186                   as is current->mm == NULL. */
 187                printk(KERN_EMERG "Page fault in user mode with "
 188                       "in_atomic() = %d mm = %p\n", in_atomic(), mm);
 189                printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
 190                       regs->nip, regs->msr);
 191                die("Weird page fault", regs, SIGSEGV);
 192        }
 193
 194        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 195
 196        /* When running in the kernel we expect faults to occur only to
 197         * addresses in user space.  All other faults represent errors in the
 198         * kernel and should generate an OOPS.  Unfortunately, in the case of an
 199         * erroneous fault occurring in a code path which already holds mmap_sem
 200         * we will deadlock attempting to validate the fault against the
 201         * address space.  Luckily the kernel only validly references user
 202         * space from well defined areas of code, which are listed in the
 203         * exceptions table.
 204         *
 205         * As the vast majority of faults will be valid we will only perform
 206         * the source reference check when there is a possibility of a deadlock.
 207         * Attempt to lock the address space, if we cannot we then validate the
 208         * source.  If this is invalid we can skip the address space check,
 209         * thus avoiding the deadlock.
 210         */
 211        if (!down_read_trylock(&mm->mmap_sem)) {
 212                if (!user_mode(regs) && !search_exception_tables(regs->nip))
 213                        goto bad_area_nosemaphore;
 214
 215                down_read(&mm->mmap_sem);
 216        }
 217
 218        vma = find_vma(mm, address);
 219        if (!vma)
 220                goto bad_area;
 221        if (vma->vm_start <= address)
 222                goto good_area;
 223        if (!(vma->vm_flags & VM_GROWSDOWN))
 224                goto bad_area;
 225
 226        /*
 227         * N.B. The POWER/Open ABI allows programs to access up to
 228         * 288 bytes below the stack pointer.
 229         * The kernel signal delivery code writes up to about 1.5kB
 230         * below the stack pointer (r1) before decrementing it.
 231         * The exec code can write slightly over 640kB to the stack
 232         * before setting the user r1.  Thus we allow the stack to
 233         * expand to 1MB without further checks.
 234         */
 235        if (address + 0x100000 < vma->vm_end) {
 236                /* get user regs even if this fault is in kernel mode */
 237                struct pt_regs *uregs = current->thread.regs;
 238                if (uregs == NULL)
 239                        goto bad_area;
 240
 241                /*
 242                 * A user-mode access to an address a long way below
 243                 * the stack pointer is only valid if the instruction
 244                 * is one which would update the stack pointer to the
 245                 * address accessed if the instruction completed,
 246                 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
 247                 * (or the byte, halfword, float or double forms).
 248                 *
 249                 * If we don't check this then any write to the area
 250                 * between the last mapped region and the stack will
 251                 * expand the stack rather than segfaulting.
 252                 */
 253                if (address + 2048 < uregs->gpr[1]
 254                    && (!user_mode(regs) || !store_updates_sp(regs)))
 255                        goto bad_area;
 256        }
 257        if (expand_stack(vma, address))
 258                goto bad_area;
 259
 260good_area:
 261        code = SEGV_ACCERR;
 262#if defined(CONFIG_6xx)
 263        if (error_code & 0x95700000)
 264                /* an error such as lwarx to I/O controller space,
 265                   address matching DABR, eciwx, etc. */
 266                goto bad_area;
 267#endif /* CONFIG_6xx */
 268#if defined(CONFIG_8xx)
 269        /* 8xx sometimes need to load a invalid/non-present TLBs.
 270         * These must be invalidated separately as linux mm don't.
 271         */
 272        if (error_code & 0x40000000) /* no translation? */
 273                _tlbil_va(address, 0, 0, 0);
 274
 275        /* The MPC8xx seems to always set 0x80000000, which is
 276         * "undefined".  Of those that can be set, this is the only
 277         * one which seems bad.
 278         */
 279        if (error_code & 0x10000000)
 280                /* Guarded storage error. */
 281                goto bad_area;
 282#endif /* CONFIG_8xx */
 283
 284        if (is_exec) {
 285#ifdef CONFIG_PPC_STD_MMU
 286                /* Protection fault on exec go straight to failure on
 287                 * Hash based MMUs as they either don't support per-page
 288                 * execute permission, or if they do, it's handled already
 289                 * at the hash level. This test would probably have to
 290                 * be removed if we change the way this works to make hash
 291                 * processors use the same I/D cache coherency mechanism
 292                 * as embedded.
 293                 */
 294                if (error_code & DSISR_PROTFAULT)
 295                        goto bad_area;
 296#endif /* CONFIG_PPC_STD_MMU */
 297
 298                /*
 299                 * Allow execution from readable areas if the MMU does not
 300                 * provide separate controls over reading and executing.
 301                 *
 302                 * Note: That code used to not be enabled for 4xx/BookE.
 303                 * It is now as I/D cache coherency for these is done at
 304                 * set_pte_at() time and I see no reason why the test
 305                 * below wouldn't be valid on those processors. This -may-
 306                 * break programs compiled with a really old ABI though.
 307                 */
 308                if (!(vma->vm_flags & VM_EXEC) &&
 309                    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
 310                     !(vma->vm_flags & (VM_READ | VM_WRITE))))
 311                        goto bad_area;
 312        /* a write */
 313        } else if (is_write) {
 314                if (!(vma->vm_flags & VM_WRITE))
 315                        goto bad_area;
 316        /* a read */
 317        } else {
 318                /* protection fault */
 319                if (error_code & 0x08000000)
 320                        goto bad_area;
 321                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 322                        goto bad_area;
 323        }
 324
 325        /*
 326         * If for any reason at all we couldn't handle the fault,
 327         * make sure we exit gracefully rather than endlessly redo
 328         * the fault.
 329         */
 330        ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
 331        if (unlikely(ret & VM_FAULT_ERROR)) {
 332                if (ret & VM_FAULT_OOM)
 333                        goto out_of_memory;
 334                else if (ret & VM_FAULT_SIGBUS)
 335                        goto do_sigbus;
 336                BUG();
 337        }
 338        if (ret & VM_FAULT_MAJOR) {
 339                current->maj_flt++;
 340                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 341                                     regs, address);
 342#ifdef CONFIG_PPC_SMLPAR
 343                if (firmware_has_feature(FW_FEATURE_CMO)) {
 344                        preempt_disable();
 345                        get_lppaca()->page_ins += (1 << PAGE_FACTOR);
 346                        preempt_enable();
 347                }
 348#endif
 349        } else {
 350                current->min_flt++;
 351                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 352                                     regs, address);
 353        }
 354        up_read(&mm->mmap_sem);
 355        return 0;
 356
 357bad_area:
 358        up_read(&mm->mmap_sem);
 359
 360bad_area_nosemaphore:
 361        /* User mode accesses cause a SIGSEGV */
 362        if (user_mode(regs)) {
 363                _exception(SIGSEGV, regs, code, address);
 364                return 0;
 365        }
 366
 367        if (is_exec && (error_code & DSISR_PROTFAULT))
 368                printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
 369                                   " page (%lx) - exploit attempt? (uid: %d)\n",
 370                                   address, current_uid());
 371
 372        return SIGSEGV;
 373
 374/*
 375 * We ran out of memory, or some other thing happened to us that made
 376 * us unable to handle the page fault gracefully.
 377 */
 378out_of_memory:
 379        up_read(&mm->mmap_sem);
 380        if (!user_mode(regs))
 381                return SIGKILL;
 382        pagefault_out_of_memory();
 383        return 0;
 384
 385do_sigbus:
 386        up_read(&mm->mmap_sem);
 387        if (user_mode(regs)) {
 388                info.si_signo = SIGBUS;
 389                info.si_errno = 0;
 390                info.si_code = BUS_ADRERR;
 391                info.si_addr = (void __user *)address;
 392                force_sig_info(SIGBUS, &info, current);
 393                return 0;
 394        }
 395        return SIGBUS;
 396}
 397
 398/*
 399 * bad_page_fault is called when we have a bad access from the kernel.
 400 * It is called from the DSI and ISI handlers in head.S and from some
 401 * of the procedures in traps.c.
 402 */
 403void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 404{
 405        const struct exception_table_entry *entry;
 406        unsigned long *stackend;
 407
 408        /* Are we prepared to handle this fault?  */
 409        if ((entry = search_exception_tables(regs->nip)) != NULL) {
 410                regs->nip = entry->fixup;
 411                return;
 412        }
 413
 414        /* kernel has accessed a bad area */
 415
 416        switch (regs->trap) {
 417        case 0x300:
 418        case 0x380:
 419                printk(KERN_ALERT "Unable to handle kernel paging request for "
 420                        "data at address 0x%08lx\n", regs->dar);
 421                break;
 422        case 0x400:
 423        case 0x480:
 424                printk(KERN_ALERT "Unable to handle kernel paging request for "
 425                        "instruction fetch\n");
 426                break;
 427        default:
 428                printk(KERN_ALERT "Unable to handle kernel paging request for "
 429                        "unknown fault\n");
 430                break;
 431        }
 432        printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
 433                regs->nip);
 434
 435        stackend = end_of_stack(current);
 436        if (current != &init_task && *stackend != STACK_END_MAGIC)
 437                printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
 438
 439        die("Kernel access of bad area", regs, sig);
 440}
 441