linux/arch/powerpc/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/i386/mm/fault.c"
   6 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  Modified by Cort Dougan and Paul Mackerras.
   9 *
  10 *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17
  18#include <linux/signal.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/string.h>
  23#include <linux/types.h>
  24#include <linux/ptrace.h>
  25#include <linux/mman.h>
  26#include <linux/mm.h>
  27#include <linux/interrupt.h>
  28#include <linux/highmem.h>
  29#include <linux/module.h>
  30#include <linux/kprobes.h>
  31#include <linux/kdebug.h>
  32#include <linux/perf_event.h>
  33
  34#include <asm/firmware.h>
  35#include <asm/page.h>
  36#include <asm/pgtable.h>
  37#include <asm/mmu.h>
  38#include <asm/mmu_context.h>
  39#include <asm/system.h>
  40#include <asm/uaccess.h>
  41#include <asm/tlbflush.h>
  42#include <asm/siginfo.h>
  43
  44
  45#ifdef CONFIG_KPROBES
  46static inline int notify_page_fault(struct pt_regs *regs)
  47{
  48        int ret = 0;
  49
  50        /* kprobe_running() needs smp_processor_id() */
  51        if (!user_mode(regs)) {
  52                preempt_disable();
  53                if (kprobe_running() && kprobe_fault_handler(regs, 11))
  54                        ret = 1;
  55                preempt_enable();
  56        }
  57
  58        return ret;
  59}
  60#else
  61static inline int notify_page_fault(struct pt_regs *regs)
  62{
  63        return 0;
  64}
  65#endif
  66
  67/*
  68 * Check whether the instruction at regs->nip is a store using
  69 * an update addressing form which will update r1.
  70 */
  71static int store_updates_sp(struct pt_regs *regs)
  72{
  73        unsigned int inst;
  74
  75        if (get_user(inst, (unsigned int __user *)regs->nip))
  76                return 0;
  77        /* check for 1 in the rA field */
  78        if (((inst >> 16) & 0x1f) != 1)
  79                return 0;
  80        /* check major opcode */
  81        switch (inst >> 26) {
  82        case 37:        /* stwu */
  83        case 39:        /* stbu */
  84        case 45:        /* sthu */
  85        case 53:        /* stfsu */
  86        case 55:        /* stfdu */
  87                return 1;
  88        case 62:        /* std or stdu */
  89                return (inst & 3) == 1;
  90        case 31:
  91                /* check minor opcode */
  92                switch ((inst >> 1) & 0x3ff) {
  93                case 181:       /* stdux */
  94                case 183:       /* stwux */
  95                case 247:       /* stbux */
  96                case 439:       /* sthux */
  97                case 695:       /* stfsux */
  98                case 759:       /* stfdux */
  99                        return 1;
 100                }
 101        }
 102        return 0;
 103}
 104
 105/*
 106 * For 600- and 800-family processors, the error_code parameter is DSISR
 107 * for a data fault, SRR1 for an instruction fault. For 400-family processors
 108 * the error_code parameter is ESR for a data fault, 0 for an instruction
 109 * fault.
 110 * For 64-bit processors, the error_code parameter is
 111 *  - DSISR for a non-SLB data access fault,
 112 *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
 113 *  - 0 any SLB fault.
 114 *
 115 * The return value is 0 if the fault was handled, or the signal
 116 * number if this is a kernel fault that can't be handled here.
 117 */
 118int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 119                            unsigned long error_code)
 120{
 121        struct vm_area_struct * vma;
 122        struct mm_struct *mm = current->mm;
 123        siginfo_t info;
 124        int code = SEGV_MAPERR;
 125        int is_write = 0, ret;
 126        int trap = TRAP(regs);
 127        int is_exec = trap == 0x400;
 128
 129#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 130        /*
 131         * Fortunately the bit assignments in SRR1 for an instruction
 132         * fault and DSISR for a data fault are mostly the same for the
 133         * bits we are interested in.  But there are some bits which
 134         * indicate errors in DSISR but can validly be set in SRR1.
 135         */
 136        if (trap == 0x400)
 137                error_code &= 0x48200000;
 138        else
 139                is_write = error_code & DSISR_ISSTORE;
 140#else
 141        is_write = error_code & ESR_DST;
 142#endif /* CONFIG_4xx || CONFIG_BOOKE */
 143
 144        if (notify_page_fault(regs))
 145                return 0;
 146
 147        if (unlikely(debugger_fault_handler(regs)))
 148                return 0;
 149
 150        /* On a kernel SLB miss we can only check for a valid exception entry */
 151        if (!user_mode(regs) && (address >= TASK_SIZE))
 152                return SIGSEGV;
 153
 154#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 155        if (error_code & DSISR_DABRMATCH) {
 156                /* DABR match */
 157                do_dabr(regs, address, error_code);
 158                return 0;
 159        }
 160#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
 161
 162        if (in_atomic() || mm == NULL) {
 163                if (!user_mode(regs))
 164                        return SIGSEGV;
 165                /* in_atomic() in user mode is really bad,
 166                   as is current->mm == NULL. */
 167                printk(KERN_EMERG "Page fault in user mode with "
 168                       "in_atomic() = %d mm = %p\n", in_atomic(), mm);
 169                printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
 170                       regs->nip, regs->msr);
 171                die("Weird page fault", regs, SIGSEGV);
 172        }
 173
 174        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
 175
 176        /* When running in the kernel we expect faults to occur only to
 177         * addresses in user space.  All other faults represent errors in the
 178         * kernel and should generate an OOPS.  Unfortunately, in the case of an
 179         * erroneous fault occurring in a code path which already holds mmap_sem
 180         * we will deadlock attempting to validate the fault against the
 181         * address space.  Luckily the kernel only validly references user
 182         * space from well defined areas of code, which are listed in the
 183         * exceptions table.
 184         *
 185         * As the vast majority of faults will be valid we will only perform
 186         * the source reference check when there is a possibility of a deadlock.
 187         * Attempt to lock the address space, if we cannot we then validate the
 188         * source.  If this is invalid we can skip the address space check,
 189         * thus avoiding the deadlock.
 190         */
 191        if (!down_read_trylock(&mm->mmap_sem)) {
 192                if (!user_mode(regs) && !search_exception_tables(regs->nip))
 193                        goto bad_area_nosemaphore;
 194
 195                down_read(&mm->mmap_sem);
 196        }
 197
 198        vma = find_vma(mm, address);
 199        if (!vma)
 200                goto bad_area;
 201        if (vma->vm_start <= address)
 202                goto good_area;
 203        if (!(vma->vm_flags & VM_GROWSDOWN))
 204                goto bad_area;
 205
 206        /*
 207         * N.B. The POWER/Open ABI allows programs to access up to
 208         * 288 bytes below the stack pointer.
 209         * The kernel signal delivery code writes up to about 1.5kB
 210         * below the stack pointer (r1) before decrementing it.
 211         * The exec code can write slightly over 640kB to the stack
 212         * before setting the user r1.  Thus we allow the stack to
 213         * expand to 1MB without further checks.
 214         */
 215        if (address + 0x100000 < vma->vm_end) {
 216                /* get user regs even if this fault is in kernel mode */
 217                struct pt_regs *uregs = current->thread.regs;
 218                if (uregs == NULL)
 219                        goto bad_area;
 220
 221                /*
 222                 * A user-mode access to an address a long way below
 223                 * the stack pointer is only valid if the instruction
 224                 * is one which would update the stack pointer to the
 225                 * address accessed if the instruction completed,
 226                 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
 227                 * (or the byte, halfword, float or double forms).
 228                 *
 229                 * If we don't check this then any write to the area
 230                 * between the last mapped region and the stack will
 231                 * expand the stack rather than segfaulting.
 232                 */
 233                if (address + 2048 < uregs->gpr[1]
 234                    && (!user_mode(regs) || !store_updates_sp(regs)))
 235                        goto bad_area;
 236        }
 237        if (expand_stack(vma, address))
 238                goto bad_area;
 239
 240good_area:
 241        code = SEGV_ACCERR;
 242#if defined(CONFIG_6xx)
 243        if (error_code & 0x95700000)
 244                /* an error such as lwarx to I/O controller space,
 245                   address matching DABR, eciwx, etc. */
 246                goto bad_area;
 247#endif /* CONFIG_6xx */
 248#if defined(CONFIG_8xx)
 249        /* The MPC8xx seems to always set 0x80000000, which is
 250         * "undefined".  Of those that can be set, this is the only
 251         * one which seems bad.
 252         */
 253        if (error_code & 0x10000000)
 254                /* Guarded storage error. */
 255                goto bad_area;
 256#endif /* CONFIG_8xx */
 257
 258        if (is_exec) {
 259#ifdef CONFIG_PPC_STD_MMU
 260                /* Protection fault on exec go straight to failure on
 261                 * Hash based MMUs as they either don't support per-page
 262                 * execute permission, or if they do, it's handled already
 263                 * at the hash level. This test would probably have to
 264                 * be removed if we change the way this works to make hash
 265                 * processors use the same I/D cache coherency mechanism
 266                 * as embedded.
 267                 */
 268                if (error_code & DSISR_PROTFAULT)
 269                        goto bad_area;
 270#endif /* CONFIG_PPC_STD_MMU */
 271
 272                /*
 273                 * Allow execution from readable areas if the MMU does not
 274                 * provide separate controls over reading and executing.
 275                 *
 276                 * Note: That code used to not be enabled for 4xx/BookE.
 277                 * It is now as I/D cache coherency for these is done at
 278                 * set_pte_at() time and I see no reason why the test
 279                 * below wouldn't be valid on those processors. This -may-
 280                 * break programs compiled with a really old ABI though.
 281                 */
 282                if (!(vma->vm_flags & VM_EXEC) &&
 283                    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
 284                     !(vma->vm_flags & (VM_READ | VM_WRITE))))
 285                        goto bad_area;
 286        /* a write */
 287        } else if (is_write) {
 288                if (!(vma->vm_flags & VM_WRITE))
 289                        goto bad_area;
 290        /* a read */
 291        } else {
 292                /* protection fault */
 293                if (error_code & 0x08000000)
 294                        goto bad_area;
 295                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 296                        goto bad_area;
 297        }
 298
 299        /*
 300         * If for any reason at all we couldn't handle the fault,
 301         * make sure we exit gracefully rather than endlessly redo
 302         * the fault.
 303         */
 304 survive:
 305        ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
 306        if (unlikely(ret & VM_FAULT_ERROR)) {
 307                if (ret & VM_FAULT_OOM)
 308                        goto out_of_memory;
 309                else if (ret & VM_FAULT_SIGBUS)
 310                        goto do_sigbus;
 311                BUG();
 312        }
 313        if (ret & VM_FAULT_MAJOR) {
 314                current->maj_flt++;
 315                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
 316                                     regs, address);
 317#ifdef CONFIG_PPC_SMLPAR
 318                if (firmware_has_feature(FW_FEATURE_CMO)) {
 319                        preempt_disable();
 320                        get_lppaca()->page_ins += (1 << PAGE_FACTOR);
 321                        preempt_enable();
 322                }
 323#endif
 324        } else {
 325                current->min_flt++;
 326                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
 327                                     regs, address);
 328        }
 329        up_read(&mm->mmap_sem);
 330        return 0;
 331
 332bad_area:
 333        up_read(&mm->mmap_sem);
 334
 335bad_area_nosemaphore:
 336        /* User mode accesses cause a SIGSEGV */
 337        if (user_mode(regs)) {
 338                _exception(SIGSEGV, regs, code, address);
 339                return 0;
 340        }
 341
 342        if (is_exec && (error_code & DSISR_PROTFAULT)
 343            && printk_ratelimit())
 344                printk(KERN_CRIT "kernel tried to execute NX-protected"
 345                       " page (%lx) - exploit attempt? (uid: %d)\n",
 346                       address, current_uid());
 347
 348        return SIGSEGV;
 349
 350/*
 351 * We ran out of memory, or some other thing happened to us that made
 352 * us unable to handle the page fault gracefully.
 353 */
 354out_of_memory:
 355        up_read(&mm->mmap_sem);
 356        if (is_global_init(current)) {
 357                yield();
 358                down_read(&mm->mmap_sem);
 359                goto survive;
 360        }
 361        printk("VM: killing process %s\n", current->comm);
 362        if (user_mode(regs))
 363                do_group_exit(SIGKILL);
 364        return SIGKILL;
 365
 366do_sigbus:
 367        up_read(&mm->mmap_sem);
 368        if (user_mode(regs)) {
 369                info.si_signo = SIGBUS;
 370                info.si_errno = 0;
 371                info.si_code = BUS_ADRERR;
 372                info.si_addr = (void __user *)address;
 373                force_sig_info(SIGBUS, &info, current);
 374                return 0;
 375        }
 376        return SIGBUS;
 377}
 378
 379/*
 380 * bad_page_fault is called when we have a bad access from the kernel.
 381 * It is called from the DSI and ISI handlers in head.S and from some
 382 * of the procedures in traps.c.
 383 */
 384void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 385{
 386        const struct exception_table_entry *entry;
 387
 388        /* Are we prepared to handle this fault?  */
 389        if ((entry = search_exception_tables(regs->nip)) != NULL) {
 390                regs->nip = entry->fixup;
 391                return;
 392        }
 393
 394        /* kernel has accessed a bad area */
 395
 396        switch (regs->trap) {
 397        case 0x300:
 398        case 0x380:
 399                printk(KERN_ALERT "Unable to handle kernel paging request for "
 400                        "data at address 0x%08lx\n", regs->dar);
 401                break;
 402        case 0x400:
 403        case 0x480:
 404                printk(KERN_ALERT "Unable to handle kernel paging request for "
 405                        "instruction fetch\n");
 406                break;
 407        default:
 408                printk(KERN_ALERT "Unable to handle kernel paging request for "
 409                        "unknown fault\n");
 410                break;
 411        }
 412        printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
 413                regs->nip);
 414
 415        die("Kernel access of bad area", regs, sig);
 416}
 417