linux/arch/powerpc/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/i386/mm/fault.c"
   6 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  Modified by Cort Dougan and Paul Mackerras.
   9 *
  10 *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17
  18#include <linux/signal.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/string.h>
  23#include <linux/types.h>
  24#include <linux/ptrace.h>
  25#include <linux/mman.h>
  26#include <linux/mm.h>
  27#include <linux/interrupt.h>
  28#include <linux/highmem.h>
  29#include <linux/module.h>
  30#include <linux/kprobes.h>
  31#include <linux/kdebug.h>
  32#include <linux/perf_event.h>
  33#include <linux/ratelimit.h>
  34#include <linux/context_tracking.h>
  35#include <linux/hugetlb.h>
  36
  37#include <asm/firmware.h>
  38#include <asm/page.h>
  39#include <asm/pgtable.h>
  40#include <asm/mmu.h>
  41#include <asm/mmu_context.h>
  42#include <asm/uaccess.h>
  43#include <asm/tlbflush.h>
  44#include <asm/siginfo.h>
  45#include <asm/debug.h>
  46#include <mm/mmu_decl.h>
  47
  48#include "icswx.h"
  49
  50#ifdef CONFIG_KPROBES
  51static inline int notify_page_fault(struct pt_regs *regs)
  52{
  53        int ret = 0;
  54
  55        /* kprobe_running() needs smp_processor_id() */
  56        if (!user_mode(regs)) {
  57                preempt_disable();
  58                if (kprobe_running() && kprobe_fault_handler(regs, 11))
  59                        ret = 1;
  60                preempt_enable();
  61        }
  62
  63        return ret;
  64}
  65#else
  66static inline int notify_page_fault(struct pt_regs *regs)
  67{
  68        return 0;
  69}
  70#endif
  71
  72/*
  73 * Check whether the instruction at regs->nip is a store using
  74 * an update addressing form which will update r1.
  75 */
  76static int store_updates_sp(struct pt_regs *regs)
  77{
  78        unsigned int inst;
  79
  80        if (get_user(inst, (unsigned int __user *)regs->nip))
  81                return 0;
  82        /* check for 1 in the rA field */
  83        if (((inst >> 16) & 0x1f) != 1)
  84                return 0;
  85        /* check major opcode */
  86        switch (inst >> 26) {
  87        case 37:        /* stwu */
  88        case 39:        /* stbu */
  89        case 45:        /* sthu */
  90        case 53:        /* stfsu */
  91        case 55:        /* stfdu */
  92                return 1;
  93        case 62:        /* std or stdu */
  94                return (inst & 3) == 1;
  95        case 31:
  96                /* check minor opcode */
  97                switch ((inst >> 1) & 0x3ff) {
  98                case 181:       /* stdux */
  99                case 183:       /* stwux */
 100                case 247:       /* stbux */
 101                case 439:       /* sthux */
 102                case 695:       /* stfsux */
 103                case 759:       /* stfdux */
 104                        return 1;
 105                }
 106        }
 107        return 0;
 108}
 109/*
 110 * do_page_fault error handling helpers
 111 */
 112
 113#define MM_FAULT_RETURN         0
 114#define MM_FAULT_CONTINUE       -1
 115#define MM_FAULT_ERR(sig)       (sig)
 116
 117static int do_sigbus(struct pt_regs *regs, unsigned long address,
 118                     unsigned int fault)
 119{
 120        siginfo_t info;
 121        unsigned int lsb = 0;
 122
 123        up_read(&current->mm->mmap_sem);
 124
 125        if (!user_mode(regs))
 126                return MM_FAULT_ERR(SIGBUS);
 127
 128        current->thread.trap_nr = BUS_ADRERR;
 129        info.si_signo = SIGBUS;
 130        info.si_errno = 0;
 131        info.si_code = BUS_ADRERR;
 132        info.si_addr = (void __user *)address;
 133#ifdef CONFIG_MEMORY_FAILURE
 134        if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 135                pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
 136                        current->comm, current->pid, address);
 137                info.si_code = BUS_MCEERR_AR;
 138        }
 139
 140        if (fault & VM_FAULT_HWPOISON_LARGE)
 141                lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
 142        if (fault & VM_FAULT_HWPOISON)
 143                lsb = PAGE_SHIFT;
 144#endif
 145        info.si_addr_lsb = lsb;
 146        force_sig_info(SIGBUS, &info, current);
 147        return MM_FAULT_RETURN;
 148}
 149
 150static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
 151{
 152        /*
 153         * Pagefault was interrupted by SIGKILL. We have no reason to
 154         * continue the pagefault.
 155         */
 156        if (fatal_signal_pending(current)) {
 157                /*
 158                 * If we have retry set, the mmap semaphore will have
 159                 * alrady been released in __lock_page_or_retry(). Else
 160                 * we release it now.
 161                 */
 162                if (!(fault & VM_FAULT_RETRY))
 163                        up_read(&current->mm->mmap_sem);
 164                /* Coming from kernel, we need to deal with uaccess fixups */
 165                if (user_mode(regs))
 166                        return MM_FAULT_RETURN;
 167                return MM_FAULT_ERR(SIGKILL);
 168        }
 169
 170        /* No fault: be happy */
 171        if (!(fault & VM_FAULT_ERROR))
 172                return MM_FAULT_CONTINUE;
 173
 174        /* Out of memory */
 175        if (fault & VM_FAULT_OOM) {
 176                up_read(&current->mm->mmap_sem);
 177
 178                /*
 179                 * We ran out of memory, or some other thing happened to us that
 180                 * made us unable to handle the page fault gracefully.
 181                 */
 182                if (!user_mode(regs))
 183                        return MM_FAULT_ERR(SIGKILL);
 184                pagefault_out_of_memory();
 185                return MM_FAULT_RETURN;
 186        }
 187
 188        if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE))
 189                return do_sigbus(regs, addr, fault);
 190
 191        /* We don't understand the fault code, this is fatal */
 192        BUG();
 193        return MM_FAULT_CONTINUE;
 194}
 195
 196/*
 197 * For 600- and 800-family processors, the error_code parameter is DSISR
 198 * for a data fault, SRR1 for an instruction fault. For 400-family processors
 199 * the error_code parameter is ESR for a data fault, 0 for an instruction
 200 * fault.
 201 * For 64-bit processors, the error_code parameter is
 202 *  - DSISR for a non-SLB data access fault,
 203 *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
 204 *  - 0 any SLB fault.
 205 *
 206 * The return value is 0 if the fault was handled, or the signal
 207 * number if this is a kernel fault that can't be handled here.
 208 */
 209int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 210                            unsigned long error_code)
 211{
 212        enum ctx_state prev_state = exception_enter();
 213        struct vm_area_struct * vma;
 214        struct mm_struct *mm = current->mm;
 215        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 216        int code = SEGV_MAPERR;
 217        int is_write = 0;
 218        int trap = TRAP(regs);
 219        int is_exec = trap == 0x400;
 220        int fault;
 221        int rc = 0, store_update_sp = 0;
 222
 223#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 224        /*
 225         * Fortunately the bit assignments in SRR1 for an instruction
 226         * fault and DSISR for a data fault are mostly the same for the
 227         * bits we are interested in.  But there are some bits which
 228         * indicate errors in DSISR but can validly be set in SRR1.
 229         */
 230        if (trap == 0x400)
 231                error_code &= 0x48200000;
 232        else
 233                is_write = error_code & DSISR_ISSTORE;
 234#else
 235        is_write = error_code & ESR_DST;
 236#endif /* CONFIG_4xx || CONFIG_BOOKE */
 237
 238#ifdef CONFIG_PPC_ICSWX
 239        /*
 240         * we need to do this early because this "data storage
 241         * interrupt" does not update the DAR/DEAR so we don't want to
 242         * look at it
 243         */
 244        if (error_code & ICSWX_DSI_UCT) {
 245                rc = acop_handle_fault(regs, address, error_code);
 246                if (rc)
 247                        goto bail;
 248        }
 249#endif /* CONFIG_PPC_ICSWX */
 250
 251        if (notify_page_fault(regs))
 252                goto bail;
 253
 254        if (unlikely(debugger_fault_handler(regs)))
 255                goto bail;
 256
 257        /* On a kernel SLB miss we can only check for a valid exception entry */
 258        if (!user_mode(regs) && (address >= TASK_SIZE)) {
 259                rc = SIGSEGV;
 260                goto bail;
 261        }
 262
 263#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
 264                             defined(CONFIG_PPC_BOOK3S_64))
 265        if (error_code & DSISR_DABRMATCH) {
 266                /* breakpoint match */
 267                do_break(regs, address, error_code);
 268                goto bail;
 269        }
 270#endif
 271
 272        /* We restore the interrupt state now */
 273        if (!arch_irq_disabled_regs(regs))
 274                local_irq_enable();
 275
 276        if (in_atomic() || mm == NULL) {
 277                if (!user_mode(regs)) {
 278                        rc = SIGSEGV;
 279                        goto bail;
 280                }
 281                /* in_atomic() in user mode is really bad,
 282                   as is current->mm == NULL. */
 283                printk(KERN_EMERG "Page fault in user mode with "
 284                       "in_atomic() = %d mm = %p\n", in_atomic(), mm);
 285                printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
 286                       regs->nip, regs->msr);
 287                die("Weird page fault", regs, SIGSEGV);
 288        }
 289
 290        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 291
 292        /*
 293         * We want to do this outside mmap_sem, because reading code around nip
 294         * can result in fault, which will cause a deadlock when called with
 295         * mmap_sem held
 296         */
 297        if (user_mode(regs))
 298                store_update_sp = store_updates_sp(regs);
 299
 300        if (user_mode(regs))
 301                flags |= FAULT_FLAG_USER;
 302
 303        /* When running in the kernel we expect faults to occur only to
 304         * addresses in user space.  All other faults represent errors in the
 305         * kernel and should generate an OOPS.  Unfortunately, in the case of an
 306         * erroneous fault occurring in a code path which already holds mmap_sem
 307         * we will deadlock attempting to validate the fault against the
 308         * address space.  Luckily the kernel only validly references user
 309         * space from well defined areas of code, which are listed in the
 310         * exceptions table.
 311         *
 312         * As the vast majority of faults will be valid we will only perform
 313         * the source reference check when there is a possibility of a deadlock.
 314         * Attempt to lock the address space, if we cannot we then validate the
 315         * source.  If this is invalid we can skip the address space check,
 316         * thus avoiding the deadlock.
 317         */
 318        if (!down_read_trylock(&mm->mmap_sem)) {
 319                if (!user_mode(regs) && !search_exception_tables(regs->nip))
 320                        goto bad_area_nosemaphore;
 321
 322retry:
 323                down_read(&mm->mmap_sem);
 324        } else {
 325                /*
 326                 * The above down_read_trylock() might have succeeded in
 327                 * which case we'll have missed the might_sleep() from
 328                 * down_read():
 329                 */
 330                might_sleep();
 331        }
 332
 333        vma = find_vma(mm, address);
 334        if (!vma)
 335                goto bad_area;
 336        if (vma->vm_start <= address)
 337                goto good_area;
 338        if (!(vma->vm_flags & VM_GROWSDOWN))
 339                goto bad_area;
 340
 341        /*
 342         * N.B. The POWER/Open ABI allows programs to access up to
 343         * 288 bytes below the stack pointer.
 344         * The kernel signal delivery code writes up to about 1.5kB
 345         * below the stack pointer (r1) before decrementing it.
 346         * The exec code can write slightly over 640kB to the stack
 347         * before setting the user r1.  Thus we allow the stack to
 348         * expand to 1MB without further checks.
 349         */
 350        if (address + 0x100000 < vma->vm_end) {
 351                /* get user regs even if this fault is in kernel mode */
 352                struct pt_regs *uregs = current->thread.regs;
 353                if (uregs == NULL)
 354                        goto bad_area;
 355
 356                /*
 357                 * A user-mode access to an address a long way below
 358                 * the stack pointer is only valid if the instruction
 359                 * is one which would update the stack pointer to the
 360                 * address accessed if the instruction completed,
 361                 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
 362                 * (or the byte, halfword, float or double forms).
 363                 *
 364                 * If we don't check this then any write to the area
 365                 * between the last mapped region and the stack will
 366                 * expand the stack rather than segfaulting.
 367                 */
 368                if (address + 2048 < uregs->gpr[1] && !store_update_sp)
 369                        goto bad_area;
 370        }
 371        if (expand_stack(vma, address))
 372                goto bad_area;
 373
 374good_area:
 375        code = SEGV_ACCERR;
 376#if defined(CONFIG_6xx)
 377        if (error_code & 0x95700000)
 378                /* an error such as lwarx to I/O controller space,
 379                   address matching DABR, eciwx, etc. */
 380                goto bad_area;
 381#endif /* CONFIG_6xx */
 382#if defined(CONFIG_8xx)
 383        /* 8xx sometimes need to load a invalid/non-present TLBs.
 384         * These must be invalidated separately as linux mm don't.
 385         */
 386        if (error_code & 0x40000000) /* no translation? */
 387                _tlbil_va(address, 0, 0, 0);
 388
 389        /* The MPC8xx seems to always set 0x80000000, which is
 390         * "undefined".  Of those that can be set, this is the only
 391         * one which seems bad.
 392         */
 393        if (error_code & 0x10000000)
 394                /* Guarded storage error. */
 395                goto bad_area;
 396#endif /* CONFIG_8xx */
 397
 398        if (is_exec) {
 399#ifdef CONFIG_PPC_STD_MMU
 400                /* Protection fault on exec go straight to failure on
 401                 * Hash based MMUs as they either don't support per-page
 402                 * execute permission, or if they do, it's handled already
 403                 * at the hash level. This test would probably have to
 404                 * be removed if we change the way this works to make hash
 405                 * processors use the same I/D cache coherency mechanism
 406                 * as embedded.
 407                 */
 408                if (error_code & DSISR_PROTFAULT)
 409                        goto bad_area;
 410#endif /* CONFIG_PPC_STD_MMU */
 411
 412                /*
 413                 * Allow execution from readable areas if the MMU does not
 414                 * provide separate controls over reading and executing.
 415                 *
 416                 * Note: That code used to not be enabled for 4xx/BookE.
 417                 * It is now as I/D cache coherency for these is done at
 418                 * set_pte_at() time and I see no reason why the test
 419                 * below wouldn't be valid on those processors. This -may-
 420                 * break programs compiled with a really old ABI though.
 421                 */
 422                if (!(vma->vm_flags & VM_EXEC) &&
 423                    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
 424                     !(vma->vm_flags & (VM_READ | VM_WRITE))))
 425                        goto bad_area;
 426        /* a write */
 427        } else if (is_write) {
 428                if (!(vma->vm_flags & VM_WRITE))
 429                        goto bad_area;
 430                flags |= FAULT_FLAG_WRITE;
 431        /* a read */
 432        } else {
 433                /* protection fault */
 434                if (error_code & 0x08000000)
 435                        goto bad_area;
 436                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 437                        goto bad_area;
 438        }
 439
 440        /*
 441         * If for any reason at all we couldn't handle the fault,
 442         * make sure we exit gracefully rather than endlessly redo
 443         * the fault.
 444         */
 445        fault = handle_mm_fault(mm, vma, address, flags);
 446        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
 447                rc = mm_fault_error(regs, address, fault);
 448                if (rc >= MM_FAULT_RETURN)
 449                        goto bail;
 450                else
 451                        rc = 0;
 452        }
 453
 454        /*
 455         * Major/minor page fault accounting is only done on the
 456         * initial attempt. If we go through a retry, it is extremely
 457         * likely that the page will be found in page cache at that point.
 458         */
 459        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 460                if (fault & VM_FAULT_MAJOR) {
 461                        current->maj_flt++;
 462                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 463                                      regs, address);
 464#ifdef CONFIG_PPC_SMLPAR
 465                        if (firmware_has_feature(FW_FEATURE_CMO)) {
 466                                u32 page_ins;
 467
 468                                preempt_disable();
 469                                page_ins = be32_to_cpu(get_lppaca()->page_ins);
 470                                page_ins += 1 << PAGE_FACTOR;
 471                                get_lppaca()->page_ins = cpu_to_be32(page_ins);
 472                                preempt_enable();
 473                        }
 474#endif /* CONFIG_PPC_SMLPAR */
 475                } else {
 476                        current->min_flt++;
 477                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 478                                      regs, address);
 479                }
 480                if (fault & VM_FAULT_RETRY) {
 481                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 482                         * of starvation. */
 483                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 484                        flags |= FAULT_FLAG_TRIED;
 485                        goto retry;
 486                }
 487        }
 488
 489        up_read(&mm->mmap_sem);
 490        goto bail;
 491
 492bad_area:
 493        up_read(&mm->mmap_sem);
 494
 495bad_area_nosemaphore:
 496        /* User mode accesses cause a SIGSEGV */
 497        if (user_mode(regs)) {
 498                _exception(SIGSEGV, regs, code, address);
 499                goto bail;
 500        }
 501
 502        if (is_exec && (error_code & DSISR_PROTFAULT))
 503                printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
 504                                   " page (%lx) - exploit attempt? (uid: %d)\n",
 505                                   address, from_kuid(&init_user_ns, current_uid()));
 506
 507        rc = SIGSEGV;
 508
 509bail:
 510        exception_exit(prev_state);
 511        return rc;
 512
 513}
 514
 515/*
 516 * bad_page_fault is called when we have a bad access from the kernel.
 517 * It is called from the DSI and ISI handlers in head.S and from some
 518 * of the procedures in traps.c.
 519 */
 520void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 521{
 522        const struct exception_table_entry *entry;
 523
 524        /* Are we prepared to handle this fault?  */
 525        if ((entry = search_exception_tables(regs->nip)) != NULL) {
 526                regs->nip = entry->fixup;
 527                return;
 528        }
 529
 530        /* kernel has accessed a bad area */
 531
 532        switch (regs->trap) {
 533        case 0x300:
 534        case 0x380:
 535                printk(KERN_ALERT "Unable to handle kernel paging request for "
 536                        "data at address 0x%08lx\n", regs->dar);
 537                break;
 538        case 0x400:
 539        case 0x480:
 540                printk(KERN_ALERT "Unable to handle kernel paging request for "
 541                        "instruction fetch\n");
 542                break;
 543        default:
 544                printk(KERN_ALERT "Unable to handle kernel paging request for "
 545                        "unknown fault\n");
 546                break;
 547        }
 548        printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
 549                regs->nip);
 550
 551        if (task_stack_end_corrupted(current))
 552                printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
 553
 554        die("Kernel access of bad area", regs, sig);
 555}
 556