linux/arch/powerpc/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/i386/mm/fault.c"
   6 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  Modified by Cort Dougan and Paul Mackerras.
   9 *
  10 *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17
  18#include <linux/signal.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/string.h>
  23#include <linux/types.h>
  24#include <linux/ptrace.h>
  25#include <linux/mman.h>
  26#include <linux/mm.h>
  27#include <linux/interrupt.h>
  28#include <linux/highmem.h>
  29#include <linux/module.h>
  30#include <linux/kprobes.h>
  31#include <linux/kdebug.h>
  32#include <linux/perf_event.h>
  33#include <linux/ratelimit.h>
  34#include <linux/context_tracking.h>
  35#include <linux/hugetlb.h>
  36#include <linux/uaccess.h>
  37
  38#include <asm/firmware.h>
  39#include <asm/page.h>
  40#include <asm/pgtable.h>
  41#include <asm/mmu.h>
  42#include <asm/mmu_context.h>
  43#include <asm/tlbflush.h>
  44#include <asm/siginfo.h>
  45#include <asm/debug.h>
  46
  47#include "icswx.h"
  48
  49#ifdef CONFIG_KPROBES
  50static inline int notify_page_fault(struct pt_regs *regs)
  51{
  52        int ret = 0;
  53
  54        /* kprobe_running() needs smp_processor_id() */
  55        if (!user_mode(regs)) {
  56                preempt_disable();
  57                if (kprobe_running() && kprobe_fault_handler(regs, 11))
  58                        ret = 1;
  59                preempt_enable();
  60        }
  61
  62        return ret;
  63}
  64#else
  65static inline int notify_page_fault(struct pt_regs *regs)
  66{
  67        return 0;
  68}
  69#endif
  70
  71/*
  72 * Check whether the instruction at regs->nip is a store using
  73 * an update addressing form which will update r1.
  74 */
  75static int store_updates_sp(struct pt_regs *regs)
  76{
  77        unsigned int inst;
  78
  79        if (get_user(inst, (unsigned int __user *)regs->nip))
  80                return 0;
  81        /* check for 1 in the rA field */
  82        if (((inst >> 16) & 0x1f) != 1)
  83                return 0;
  84        /* check major opcode */
  85        switch (inst >> 26) {
  86        case 37:        /* stwu */
  87        case 39:        /* stbu */
  88        case 45:        /* sthu */
  89        case 53:        /* stfsu */
  90        case 55:        /* stfdu */
  91                return 1;
  92        case 62:        /* std or stdu */
  93                return (inst & 3) == 1;
  94        case 31:
  95                /* check minor opcode */
  96                switch ((inst >> 1) & 0x3ff) {
  97                case 181:       /* stdux */
  98                case 183:       /* stwux */
  99                case 247:       /* stbux */
 100                case 439:       /* sthux */
 101                case 695:       /* stfsux */
 102                case 759:       /* stfdux */
 103                        return 1;
 104                }
 105        }
 106        return 0;
 107}
 108/*
 109 * do_page_fault error handling helpers
 110 */
 111
 112#define MM_FAULT_RETURN         0
 113#define MM_FAULT_CONTINUE       -1
 114#define MM_FAULT_ERR(sig)       (sig)
 115
 116static int do_sigbus(struct pt_regs *regs, unsigned long address,
 117                     unsigned int fault)
 118{
 119        siginfo_t info;
 120        unsigned int lsb = 0;
 121
 122        up_read(&current->mm->mmap_sem);
 123
 124        if (!user_mode(regs))
 125                return MM_FAULT_ERR(SIGBUS);
 126
 127        current->thread.trap_nr = BUS_ADRERR;
 128        info.si_signo = SIGBUS;
 129        info.si_errno = 0;
 130        info.si_code = BUS_ADRERR;
 131        info.si_addr = (void __user *)address;
 132#ifdef CONFIG_MEMORY_FAILURE
 133        if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 134                pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
 135                        current->comm, current->pid, address);
 136                info.si_code = BUS_MCEERR_AR;
 137        }
 138
 139        if (fault & VM_FAULT_HWPOISON_LARGE)
 140                lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
 141        if (fault & VM_FAULT_HWPOISON)
 142                lsb = PAGE_SHIFT;
 143#endif
 144        info.si_addr_lsb = lsb;
 145        force_sig_info(SIGBUS, &info, current);
 146        return MM_FAULT_RETURN;
 147}
 148
 149static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
 150{
 151        /*
 152         * Pagefault was interrupted by SIGKILL. We have no reason to
 153         * continue the pagefault.
 154         */
 155        if (fatal_signal_pending(current)) {
 156                /*
 157                 * If we have retry set, the mmap semaphore will have
 158                 * alrady been released in __lock_page_or_retry(). Else
 159                 * we release it now.
 160                 */
 161                if (!(fault & VM_FAULT_RETRY))
 162                        up_read(&current->mm->mmap_sem);
 163                /* Coming from kernel, we need to deal with uaccess fixups */
 164                if (user_mode(regs))
 165                        return MM_FAULT_RETURN;
 166                return MM_FAULT_ERR(SIGKILL);
 167        }
 168
 169        /* No fault: be happy */
 170        if (!(fault & VM_FAULT_ERROR))
 171                return MM_FAULT_CONTINUE;
 172
 173        /* Out of memory */
 174        if (fault & VM_FAULT_OOM) {
 175                up_read(&current->mm->mmap_sem);
 176
 177                /*
 178                 * We ran out of memory, or some other thing happened to us that
 179                 * made us unable to handle the page fault gracefully.
 180                 */
 181                if (!user_mode(regs))
 182                        return MM_FAULT_ERR(SIGKILL);
 183                pagefault_out_of_memory();
 184                return MM_FAULT_RETURN;
 185        }
 186
 187        if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE))
 188                return do_sigbus(regs, addr, fault);
 189
 190        /* We don't understand the fault code, this is fatal */
 191        BUG();
 192        return MM_FAULT_CONTINUE;
 193}
 194
 195/*
 196 * For 600- and 800-family processors, the error_code parameter is DSISR
 197 * for a data fault, SRR1 for an instruction fault. For 400-family processors
 198 * the error_code parameter is ESR for a data fault, 0 for an instruction
 199 * fault.
 200 * For 64-bit processors, the error_code parameter is
 201 *  - DSISR for a non-SLB data access fault,
 202 *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
 203 *  - 0 any SLB fault.
 204 *
 205 * The return value is 0 if the fault was handled, or the signal
 206 * number if this is a kernel fault that can't be handled here.
 207 */
 208int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 209                            unsigned long error_code)
 210{
 211        enum ctx_state prev_state = exception_enter();
 212        struct vm_area_struct * vma;
 213        struct mm_struct *mm = current->mm;
 214        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 215        int code = SEGV_MAPERR;
 216        int is_write = 0;
 217        int trap = TRAP(regs);
 218        int is_exec = trap == 0x400;
 219        int fault;
 220        int rc = 0, store_update_sp = 0;
 221
 222#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 223        /*
 224         * Fortunately the bit assignments in SRR1 for an instruction
 225         * fault and DSISR for a data fault are mostly the same for the
 226         * bits we are interested in.  But there are some bits which
 227         * indicate errors in DSISR but can validly be set in SRR1.
 228         */
 229        if (trap == 0x400)
 230                error_code &= 0x48200000;
 231        else
 232                is_write = error_code & DSISR_ISSTORE;
 233#else
 234        is_write = error_code & ESR_DST;
 235#endif /* CONFIG_4xx || CONFIG_BOOKE */
 236
 237#ifdef CONFIG_PPC_ICSWX
 238        /*
 239         * we need to do this early because this "data storage
 240         * interrupt" does not update the DAR/DEAR so we don't want to
 241         * look at it
 242         */
 243        if (error_code & ICSWX_DSI_UCT) {
 244                rc = acop_handle_fault(regs, address, error_code);
 245                if (rc)
 246                        goto bail;
 247        }
 248#endif /* CONFIG_PPC_ICSWX */
 249
 250        if (notify_page_fault(regs))
 251                goto bail;
 252
 253        if (unlikely(debugger_fault_handler(regs)))
 254                goto bail;
 255
 256        /* On a kernel SLB miss we can only check for a valid exception entry */
 257        if (!user_mode(regs) && (address >= TASK_SIZE)) {
 258                rc = SIGSEGV;
 259                goto bail;
 260        }
 261
 262#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
 263                             defined(CONFIG_PPC_BOOK3S_64))
 264        if (error_code & DSISR_DABRMATCH) {
 265                /* breakpoint match */
 266                do_break(regs, address, error_code);
 267                goto bail;
 268        }
 269#endif
 270
 271        /* We restore the interrupt state now */
 272        if (!arch_irq_disabled_regs(regs))
 273                local_irq_enable();
 274
 275        if (faulthandler_disabled() || mm == NULL) {
 276                if (!user_mode(regs)) {
 277                        rc = SIGSEGV;
 278                        goto bail;
 279                }
 280                /* faulthandler_disabled() in user mode is really bad,
 281                   as is current->mm == NULL. */
 282                printk(KERN_EMERG "Page fault in user mode with "
 283                       "faulthandler_disabled() = %d mm = %p\n",
 284                       faulthandler_disabled(), mm);
 285                printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
 286                       regs->nip, regs->msr);
 287                die("Weird page fault", regs, SIGSEGV);
 288        }
 289
 290        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 291
 292        /*
 293         * We want to do this outside mmap_sem, because reading code around nip
 294         * can result in fault, which will cause a deadlock when called with
 295         * mmap_sem held
 296         */
 297        if (user_mode(regs))
 298                store_update_sp = store_updates_sp(regs);
 299
 300        if (user_mode(regs))
 301                flags |= FAULT_FLAG_USER;
 302
 303        /* When running in the kernel we expect faults to occur only to
 304         * addresses in user space.  All other faults represent errors in the
 305         * kernel and should generate an OOPS.  Unfortunately, in the case of an
 306         * erroneous fault occurring in a code path which already holds mmap_sem
 307         * we will deadlock attempting to validate the fault against the
 308         * address space.  Luckily the kernel only validly references user
 309         * space from well defined areas of code, which are listed in the
 310         * exceptions table.
 311         *
 312         * As the vast majority of faults will be valid we will only perform
 313         * the source reference check when there is a possibility of a deadlock.
 314         * Attempt to lock the address space, if we cannot we then validate the
 315         * source.  If this is invalid we can skip the address space check,
 316         * thus avoiding the deadlock.
 317         */
 318        if (!down_read_trylock(&mm->mmap_sem)) {
 319                if (!user_mode(regs) && !search_exception_tables(regs->nip))
 320                        goto bad_area_nosemaphore;
 321
 322retry:
 323                down_read(&mm->mmap_sem);
 324        } else {
 325                /*
 326                 * The above down_read_trylock() might have succeeded in
 327                 * which case we'll have missed the might_sleep() from
 328                 * down_read():
 329                 */
 330                might_sleep();
 331        }
 332
 333        vma = find_vma(mm, address);
 334        if (!vma)
 335                goto bad_area;
 336        if (vma->vm_start <= address)
 337                goto good_area;
 338        if (!(vma->vm_flags & VM_GROWSDOWN))
 339                goto bad_area;
 340
 341        /*
 342         * N.B. The POWER/Open ABI allows programs to access up to
 343         * 288 bytes below the stack pointer.
 344         * The kernel signal delivery code writes up to about 1.5kB
 345         * below the stack pointer (r1) before decrementing it.
 346         * The exec code can write slightly over 640kB to the stack
 347         * before setting the user r1.  Thus we allow the stack to
 348         * expand to 1MB without further checks.
 349         */
 350        if (address + 0x100000 < vma->vm_end) {
 351                /* get user regs even if this fault is in kernel mode */
 352                struct pt_regs *uregs = current->thread.regs;
 353                if (uregs == NULL)
 354                        goto bad_area;
 355
 356                /*
 357                 * A user-mode access to an address a long way below
 358                 * the stack pointer is only valid if the instruction
 359                 * is one which would update the stack pointer to the
 360                 * address accessed if the instruction completed,
 361                 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
 362                 * (or the byte, halfword, float or double forms).
 363                 *
 364                 * If we don't check this then any write to the area
 365                 * between the last mapped region and the stack will
 366                 * expand the stack rather than segfaulting.
 367                 */
 368                if (address + 2048 < uregs->gpr[1] && !store_update_sp)
 369                        goto bad_area;
 370        }
 371        if (expand_stack(vma, address))
 372                goto bad_area;
 373
 374good_area:
 375        code = SEGV_ACCERR;
 376#if defined(CONFIG_6xx)
 377        if (error_code & 0x95700000)
 378                /* an error such as lwarx to I/O controller space,
 379                   address matching DABR, eciwx, etc. */
 380                goto bad_area;
 381#endif /* CONFIG_6xx */
 382#if defined(CONFIG_8xx)
 383        /* The MPC8xx seems to always set 0x80000000, which is
 384         * "undefined".  Of those that can be set, this is the only
 385         * one which seems bad.
 386         */
 387        if (error_code & 0x10000000)
 388                /* Guarded storage error. */
 389                goto bad_area;
 390#endif /* CONFIG_8xx */
 391
 392        if (is_exec) {
 393                /*
 394                 * Allow execution from readable areas if the MMU does not
 395                 * provide separate controls over reading and executing.
 396                 *
 397                 * Note: That code used to not be enabled for 4xx/BookE.
 398                 * It is now as I/D cache coherency for these is done at
 399                 * set_pte_at() time and I see no reason why the test
 400                 * below wouldn't be valid on those processors. This -may-
 401                 * break programs compiled with a really old ABI though.
 402                 */
 403                if (!(vma->vm_flags & VM_EXEC) &&
 404                    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
 405                     !(vma->vm_flags & (VM_READ | VM_WRITE))))
 406                        goto bad_area;
 407#ifdef CONFIG_PPC_STD_MMU
 408                /*
 409                 * protfault should only happen due to us
 410                 * mapping a region readonly temporarily. PROT_NONE
 411                 * is also covered by the VMA check above.
 412                 */
 413                WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
 414#endif /* CONFIG_PPC_STD_MMU */
 415        /* a write */
 416        } else if (is_write) {
 417                if (!(vma->vm_flags & VM_WRITE))
 418                        goto bad_area;
 419                flags |= FAULT_FLAG_WRITE;
 420        /* a read */
 421        } else {
 422                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 423                        goto bad_area;
 424                WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
 425        }
 426
 427        /*
 428         * If for any reason at all we couldn't handle the fault,
 429         * make sure we exit gracefully rather than endlessly redo
 430         * the fault.
 431         */
 432        fault = handle_mm_fault(mm, vma, address, flags);
 433        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
 434                if (fault & VM_FAULT_SIGSEGV)
 435                        goto bad_area;
 436                rc = mm_fault_error(regs, address, fault);
 437                if (rc >= MM_FAULT_RETURN)
 438                        goto bail;
 439                else
 440                        rc = 0;
 441        }
 442
 443        /*
 444         * Major/minor page fault accounting is only done on the
 445         * initial attempt. If we go through a retry, it is extremely
 446         * likely that the page will be found in page cache at that point.
 447         */
 448        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 449                if (fault & VM_FAULT_MAJOR) {
 450                        current->maj_flt++;
 451                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 452                                      regs, address);
 453#ifdef CONFIG_PPC_SMLPAR
 454                        if (firmware_has_feature(FW_FEATURE_CMO)) {
 455                                u32 page_ins;
 456
 457                                preempt_disable();
 458                                page_ins = be32_to_cpu(get_lppaca()->page_ins);
 459                                page_ins += 1 << PAGE_FACTOR;
 460                                get_lppaca()->page_ins = cpu_to_be32(page_ins);
 461                                preempt_enable();
 462                        }
 463#endif /* CONFIG_PPC_SMLPAR */
 464                } else {
 465                        current->min_flt++;
 466                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 467                                      regs, address);
 468                }
 469                if (fault & VM_FAULT_RETRY) {
 470                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 471                         * of starvation. */
 472                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 473                        flags |= FAULT_FLAG_TRIED;
 474                        goto retry;
 475                }
 476        }
 477
 478        up_read(&mm->mmap_sem);
 479        goto bail;
 480
 481bad_area:
 482        up_read(&mm->mmap_sem);
 483
 484bad_area_nosemaphore:
 485        /* User mode accesses cause a SIGSEGV */
 486        if (user_mode(regs)) {
 487                _exception(SIGSEGV, regs, code, address);
 488                goto bail;
 489        }
 490
 491        if (is_exec && (error_code & DSISR_PROTFAULT))
 492                printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
 493                                   " page (%lx) - exploit attempt? (uid: %d)\n",
 494                                   address, from_kuid(&init_user_ns, current_uid()));
 495
 496        rc = SIGSEGV;
 497
 498bail:
 499        exception_exit(prev_state);
 500        return rc;
 501
 502}
 503
 504/*
 505 * bad_page_fault is called when we have a bad access from the kernel.
 506 * It is called from the DSI and ISI handlers in head.S and from some
 507 * of the procedures in traps.c.
 508 */
 509void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 510{
 511        const struct exception_table_entry *entry;
 512
 513        /* Are we prepared to handle this fault?  */
 514        if ((entry = search_exception_tables(regs->nip)) != NULL) {
 515                regs->nip = entry->fixup;
 516                return;
 517        }
 518
 519        /* kernel has accessed a bad area */
 520
 521        switch (regs->trap) {
 522        case 0x300:
 523        case 0x380:
 524                printk(KERN_ALERT "Unable to handle kernel paging request for "
 525                        "data at address 0x%08lx\n", regs->dar);
 526                break;
 527        case 0x400:
 528        case 0x480:
 529                printk(KERN_ALERT "Unable to handle kernel paging request for "
 530                        "instruction fetch\n");
 531                break;
 532        case 0x600:
 533                printk(KERN_ALERT "Unable to handle kernel paging request for "
 534                        "unaligned access at address 0x%08lx\n", regs->dar);
 535                break;
 536        default:
 537                printk(KERN_ALERT "Unable to handle kernel paging request for "
 538                        "unknown fault\n");
 539                break;
 540        }
 541        printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
 542                regs->nip);
 543
 544        if (task_stack_end_corrupted(current))
 545                printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
 546
 547        die("Kernel access of bad area", regs, sig);
 548}
 549