linux/arch/powerpc/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/i386/mm/fault.c"
   6 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   7 *
   8 *  Modified by Cort Dougan and Paul Mackerras.
   9 *
  10 *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 */
  17
  18#include <linux/signal.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/string.h>
  23#include <linux/types.h>
  24#include <linux/ptrace.h>
  25#include <linux/mman.h>
  26#include <linux/mm.h>
  27#include <linux/interrupt.h>
  28#include <linux/highmem.h>
  29#include <linux/module.h>
  30#include <linux/kprobes.h>
  31#include <linux/kdebug.h>
  32#include <linux/perf_event.h>
  33#include <linux/magic.h>
  34#include <linux/ratelimit.h>
  35#include <linux/context_tracking.h>
  36#include <linux/hugetlb.h>
  37
  38#include <asm/firmware.h>
  39#include <asm/page.h>
  40#include <asm/pgtable.h>
  41#include <asm/mmu.h>
  42#include <asm/mmu_context.h>
  43#include <asm/uaccess.h>
  44#include <asm/tlbflush.h>
  45#include <asm/siginfo.h>
  46#include <asm/debug.h>
  47#include <mm/mmu_decl.h>
  48
  49#include "icswx.h"
  50
  51#ifdef CONFIG_KPROBES
  52static inline int notify_page_fault(struct pt_regs *regs)
  53{
  54        int ret = 0;
  55
  56        /* kprobe_running() needs smp_processor_id() */
  57        if (!user_mode(regs)) {
  58                preempt_disable();
  59                if (kprobe_running() && kprobe_fault_handler(regs, 11))
  60                        ret = 1;
  61                preempt_enable();
  62        }
  63
  64        return ret;
  65}
  66#else
  67static inline int notify_page_fault(struct pt_regs *regs)
  68{
  69        return 0;
  70}
  71#endif
  72
  73/*
  74 * Check whether the instruction at regs->nip is a store using
  75 * an update addressing form which will update r1.
  76 */
  77static int store_updates_sp(struct pt_regs *regs)
  78{
  79        unsigned int inst;
  80
  81        if (get_user(inst, (unsigned int __user *)regs->nip))
  82                return 0;
  83        /* check for 1 in the rA field */
  84        if (((inst >> 16) & 0x1f) != 1)
  85                return 0;
  86        /* check major opcode */
  87        switch (inst >> 26) {
  88        case 37:        /* stwu */
  89        case 39:        /* stbu */
  90        case 45:        /* sthu */
  91        case 53:        /* stfsu */
  92        case 55:        /* stfdu */
  93                return 1;
  94        case 62:        /* std or stdu */
  95                return (inst & 3) == 1;
  96        case 31:
  97                /* check minor opcode */
  98                switch ((inst >> 1) & 0x3ff) {
  99                case 181:       /* stdux */
 100                case 183:       /* stwux */
 101                case 247:       /* stbux */
 102                case 439:       /* sthux */
 103                case 695:       /* stfsux */
 104                case 759:       /* stfdux */
 105                        return 1;
 106                }
 107        }
 108        return 0;
 109}
 110/*
 111 * do_page_fault error handling helpers
 112 */
 113
 114#define MM_FAULT_RETURN         0
 115#define MM_FAULT_CONTINUE       -1
 116#define MM_FAULT_ERR(sig)       (sig)
 117
 118static int do_sigbus(struct pt_regs *regs, unsigned long address,
 119                     unsigned int fault)
 120{
 121        siginfo_t info;
 122        unsigned int lsb = 0;
 123
 124        up_read(&current->mm->mmap_sem);
 125
 126        if (user_mode(regs)) {
 127                current->thread.trap_nr = BUS_ADRERR;
 128                info.si_signo = SIGBUS;
 129                info.si_errno = 0;
 130                info.si_code = BUS_ADRERR;
 131                info.si_addr = (void __user *)address;
 132#ifdef CONFIG_MEMORY_FAILURE
 133                if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 134                        pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
 135                                current->comm, current->pid, address);
 136                        info.si_code = BUS_MCEERR_AR;
 137                }
 138
 139                if (fault & VM_FAULT_HWPOISON_LARGE)
 140                        lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
 141                if (fault & VM_FAULT_HWPOISON)
 142                        lsb = PAGE_SHIFT;
 143#endif
 144                info.si_addr_lsb = lsb;
 145                force_sig_info(SIGBUS, &info, current);
 146                return MM_FAULT_RETURN;
 147        }
 148        return MM_FAULT_ERR(SIGBUS);
 149}
 150
 151static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
 152{
 153        /*
 154         * Pagefault was interrupted by SIGKILL. We have no reason to
 155         * continue the pagefault.
 156         */
 157        if (fatal_signal_pending(current)) {
 158                /*
 159                 * If we have retry set, the mmap semaphore will have
 160                 * alrady been released in __lock_page_or_retry(). Else
 161                 * we release it now.
 162                 */
 163                if (!(fault & VM_FAULT_RETRY))
 164                        up_read(&current->mm->mmap_sem);
 165                /* Coming from kernel, we need to deal with uaccess fixups */
 166                if (user_mode(regs))
 167                        return MM_FAULT_RETURN;
 168                return MM_FAULT_ERR(SIGKILL);
 169        }
 170
 171        /* No fault: be happy */
 172        if (!(fault & VM_FAULT_ERROR))
 173                return MM_FAULT_CONTINUE;
 174
 175        /* Out of memory */
 176        if (fault & VM_FAULT_OOM) {
 177                up_read(&current->mm->mmap_sem);
 178
 179                /*
 180                 * We ran out of memory, or some other thing happened to us that
 181                 * made us unable to handle the page fault gracefully.
 182                 */
 183                if (!user_mode(regs))
 184                        return MM_FAULT_ERR(SIGKILL);
 185                pagefault_out_of_memory();
 186                return MM_FAULT_RETURN;
 187        }
 188
 189        if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE))
 190                return do_sigbus(regs, addr, fault);
 191
 192        /* We don't understand the fault code, this is fatal */
 193        BUG();
 194        return MM_FAULT_CONTINUE;
 195}
 196
 197/*
 198 * For 600- and 800-family processors, the error_code parameter is DSISR
 199 * for a data fault, SRR1 for an instruction fault. For 400-family processors
 200 * the error_code parameter is ESR for a data fault, 0 for an instruction
 201 * fault.
 202 * For 64-bit processors, the error_code parameter is
 203 *  - DSISR for a non-SLB data access fault,
 204 *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
 205 *  - 0 any SLB fault.
 206 *
 207 * The return value is 0 if the fault was handled, or the signal
 208 * number if this is a kernel fault that can't be handled here.
 209 */
 210int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 211                            unsigned long error_code)
 212{
 213        enum ctx_state prev_state = exception_enter();
 214        struct vm_area_struct * vma;
 215        struct mm_struct *mm = current->mm;
 216        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 217        int code = SEGV_MAPERR;
 218        int is_write = 0;
 219        int trap = TRAP(regs);
 220        int is_exec = trap == 0x400;
 221        int fault;
 222        int rc = 0, store_update_sp = 0;
 223
 224#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 225        /*
 226         * Fortunately the bit assignments in SRR1 for an instruction
 227         * fault and DSISR for a data fault are mostly the same for the
 228         * bits we are interested in.  But there are some bits which
 229         * indicate errors in DSISR but can validly be set in SRR1.
 230         */
 231        if (trap == 0x400)
 232                error_code &= 0x48200000;
 233        else
 234                is_write = error_code & DSISR_ISSTORE;
 235#else
 236        is_write = error_code & ESR_DST;
 237#endif /* CONFIG_4xx || CONFIG_BOOKE */
 238
 239#ifdef CONFIG_PPC_ICSWX
 240        /*
 241         * we need to do this early because this "data storage
 242         * interrupt" does not update the DAR/DEAR so we don't want to
 243         * look at it
 244         */
 245        if (error_code & ICSWX_DSI_UCT) {
 246                rc = acop_handle_fault(regs, address, error_code);
 247                if (rc)
 248                        goto bail;
 249        }
 250#endif /* CONFIG_PPC_ICSWX */
 251
 252        if (notify_page_fault(regs))
 253                goto bail;
 254
 255        if (unlikely(debugger_fault_handler(regs)))
 256                goto bail;
 257
 258        /* On a kernel SLB miss we can only check for a valid exception entry */
 259        if (!user_mode(regs) && (address >= TASK_SIZE)) {
 260                rc = SIGSEGV;
 261                goto bail;
 262        }
 263
 264#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
 265                             defined(CONFIG_PPC_BOOK3S_64))
 266        if (error_code & DSISR_DABRMATCH) {
 267                /* breakpoint match */
 268                do_break(regs, address, error_code);
 269                goto bail;
 270        }
 271#endif
 272
 273        /* We restore the interrupt state now */
 274        if (!arch_irq_disabled_regs(regs))
 275                local_irq_enable();
 276
 277        if (in_atomic() || mm == NULL) {
 278                if (!user_mode(regs)) {
 279                        rc = SIGSEGV;
 280                        goto bail;
 281                }
 282                /* in_atomic() in user mode is really bad,
 283                   as is current->mm == NULL. */
 284                printk(KERN_EMERG "Page fault in user mode with "
 285                       "in_atomic() = %d mm = %p\n", in_atomic(), mm);
 286                printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
 287                       regs->nip, regs->msr);
 288                die("Weird page fault", regs, SIGSEGV);
 289        }
 290
 291        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 292
 293        /*
 294         * We want to do this outside mmap_sem, because reading code around nip
 295         * can result in fault, which will cause a deadlock when called with
 296         * mmap_sem held
 297         */
 298        if (user_mode(regs))
 299                store_update_sp = store_updates_sp(regs);
 300
 301        if (user_mode(regs))
 302                flags |= FAULT_FLAG_USER;
 303
 304        /* When running in the kernel we expect faults to occur only to
 305         * addresses in user space.  All other faults represent errors in the
 306         * kernel and should generate an OOPS.  Unfortunately, in the case of an
 307         * erroneous fault occurring in a code path which already holds mmap_sem
 308         * we will deadlock attempting to validate the fault against the
 309         * address space.  Luckily the kernel only validly references user
 310         * space from well defined areas of code, which are listed in the
 311         * exceptions table.
 312         *
 313         * As the vast majority of faults will be valid we will only perform
 314         * the source reference check when there is a possibility of a deadlock.
 315         * Attempt to lock the address space, if we cannot we then validate the
 316         * source.  If this is invalid we can skip the address space check,
 317         * thus avoiding the deadlock.
 318         */
 319        if (!down_read_trylock(&mm->mmap_sem)) {
 320                if (!user_mode(regs) && !search_exception_tables(regs->nip))
 321                        goto bad_area_nosemaphore;
 322
 323retry:
 324                down_read(&mm->mmap_sem);
 325        } else {
 326                /*
 327                 * The above down_read_trylock() might have succeeded in
 328                 * which case we'll have missed the might_sleep() from
 329                 * down_read():
 330                 */
 331                might_sleep();
 332        }
 333
 334        vma = find_vma(mm, address);
 335        if (!vma)
 336                goto bad_area;
 337        if (vma->vm_start <= address)
 338                goto good_area;
 339        if (!(vma->vm_flags & VM_GROWSDOWN))
 340                goto bad_area;
 341
 342        /*
 343         * N.B. The POWER/Open ABI allows programs to access up to
 344         * 288 bytes below the stack pointer.
 345         * The kernel signal delivery code writes up to about 1.5kB
 346         * below the stack pointer (r1) before decrementing it.
 347         * The exec code can write slightly over 640kB to the stack
 348         * before setting the user r1.  Thus we allow the stack to
 349         * expand to 1MB without further checks.
 350         */
 351        if (address + 0x100000 < vma->vm_end) {
 352                /* get user regs even if this fault is in kernel mode */
 353                struct pt_regs *uregs = current->thread.regs;
 354                if (uregs == NULL)
 355                        goto bad_area;
 356
 357                /*
 358                 * A user-mode access to an address a long way below
 359                 * the stack pointer is only valid if the instruction
 360                 * is one which would update the stack pointer to the
 361                 * address accessed if the instruction completed,
 362                 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
 363                 * (or the byte, halfword, float or double forms).
 364                 *
 365                 * If we don't check this then any write to the area
 366                 * between the last mapped region and the stack will
 367                 * expand the stack rather than segfaulting.
 368                 */
 369                if (address + 2048 < uregs->gpr[1] && !store_update_sp)
 370                        goto bad_area;
 371        }
 372        if (expand_stack(vma, address))
 373                goto bad_area;
 374
 375good_area:
 376        code = SEGV_ACCERR;
 377#if defined(CONFIG_6xx)
 378        if (error_code & 0x95700000)
 379                /* an error such as lwarx to I/O controller space,
 380                   address matching DABR, eciwx, etc. */
 381                goto bad_area;
 382#endif /* CONFIG_6xx */
 383#if defined(CONFIG_8xx)
 384        /* 8xx sometimes need to load a invalid/non-present TLBs.
 385         * These must be invalidated separately as linux mm don't.
 386         */
 387        if (error_code & 0x40000000) /* no translation? */
 388                _tlbil_va(address, 0, 0, 0);
 389
 390        /* The MPC8xx seems to always set 0x80000000, which is
 391         * "undefined".  Of those that can be set, this is the only
 392         * one which seems bad.
 393         */
 394        if (error_code & 0x10000000)
 395                /* Guarded storage error. */
 396                goto bad_area;
 397#endif /* CONFIG_8xx */
 398
 399        if (is_exec) {
 400#ifdef CONFIG_PPC_STD_MMU
 401                /* Protection fault on exec go straight to failure on
 402                 * Hash based MMUs as they either don't support per-page
 403                 * execute permission, or if they do, it's handled already
 404                 * at the hash level. This test would probably have to
 405                 * be removed if we change the way this works to make hash
 406                 * processors use the same I/D cache coherency mechanism
 407                 * as embedded.
 408                 */
 409                if (error_code & DSISR_PROTFAULT)
 410                        goto bad_area;
 411#endif /* CONFIG_PPC_STD_MMU */
 412
 413                /*
 414                 * Allow execution from readable areas if the MMU does not
 415                 * provide separate controls over reading and executing.
 416                 *
 417                 * Note: That code used to not be enabled for 4xx/BookE.
 418                 * It is now as I/D cache coherency for these is done at
 419                 * set_pte_at() time and I see no reason why the test
 420                 * below wouldn't be valid on those processors. This -may-
 421                 * break programs compiled with a really old ABI though.
 422                 */
 423                if (!(vma->vm_flags & VM_EXEC) &&
 424                    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
 425                     !(vma->vm_flags & (VM_READ | VM_WRITE))))
 426                        goto bad_area;
 427        /* a write */
 428        } else if (is_write) {
 429                if (!(vma->vm_flags & VM_WRITE))
 430                        goto bad_area;
 431                flags |= FAULT_FLAG_WRITE;
 432        /* a read */
 433        } else {
 434                /* protection fault */
 435                if (error_code & 0x08000000)
 436                        goto bad_area;
 437                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 438                        goto bad_area;
 439        }
 440
 441        /*
 442         * If for any reason at all we couldn't handle the fault,
 443         * make sure we exit gracefully rather than endlessly redo
 444         * the fault.
 445         */
 446        fault = handle_mm_fault(vma, address, flags);
 447        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
 448                if (fault & VM_FAULT_SIGSEGV)
 449                        goto bad_area;
 450                rc = mm_fault_error(regs, address, fault);
 451                if (rc >= MM_FAULT_RETURN)
 452                        goto bail;
 453                else
 454                        rc = 0;
 455        }
 456
 457        /*
 458         * Major/minor page fault accounting is only done on the
 459         * initial attempt. If we go through a retry, it is extremely
 460         * likely that the page will be found in page cache at that point.
 461         */
 462        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 463                if (fault & VM_FAULT_MAJOR) {
 464                        current->maj_flt++;
 465                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 466                                      regs, address);
 467#ifdef CONFIG_PPC_SMLPAR
 468                        if (firmware_has_feature(FW_FEATURE_CMO)) {
 469                                u32 page_ins;
 470
 471                                preempt_disable();
 472                                page_ins = be32_to_cpu(get_lppaca()->page_ins);
 473                                page_ins += 1 << PAGE_FACTOR;
 474                                get_lppaca()->page_ins = cpu_to_be32(page_ins);
 475                                preempt_enable();
 476                        }
 477#endif /* CONFIG_PPC_SMLPAR */
 478                } else {
 479                        current->min_flt++;
 480                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 481                                      regs, address);
 482                }
 483                if (fault & VM_FAULT_RETRY) {
 484                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 485                         * of starvation. */
 486                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 487                        flags |= FAULT_FLAG_TRIED;
 488                        goto retry;
 489                }
 490        }
 491
 492        up_read(&mm->mmap_sem);
 493        goto bail;
 494
 495bad_area:
 496        up_read(&mm->mmap_sem);
 497
 498bad_area_nosemaphore:
 499        /* User mode accesses cause a SIGSEGV */
 500        if (user_mode(regs)) {
 501                _exception(SIGSEGV, regs, code, address);
 502                goto bail;
 503        }
 504
 505        if (is_exec && (error_code & DSISR_PROTFAULT))
 506                printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
 507                                   " page (%lx) - exploit attempt? (uid: %d)\n",
 508                                   address, from_kuid(&init_user_ns, current_uid()));
 509
 510        rc = SIGSEGV;
 511
 512bail:
 513        exception_exit(prev_state);
 514        return rc;
 515
 516}
 517
 518/*
 519 * bad_page_fault is called when we have a bad access from the kernel.
 520 * It is called from the DSI and ISI handlers in head.S and from some
 521 * of the procedures in traps.c.
 522 */
 523void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 524{
 525        const struct exception_table_entry *entry;
 526        unsigned long *stackend;
 527
 528        /* Are we prepared to handle this fault?  */
 529        if ((entry = search_exception_tables(regs->nip)) != NULL) {
 530                regs->nip = entry->fixup;
 531                return;
 532        }
 533
 534        /* kernel has accessed a bad area */
 535
 536        switch (regs->trap) {
 537        case 0x300:
 538        case 0x380:
 539                printk(KERN_ALERT "Unable to handle kernel paging request for "
 540                        "data at address 0x%08lx\n", regs->dar);
 541                break;
 542        case 0x400:
 543        case 0x480:
 544                printk(KERN_ALERT "Unable to handle kernel paging request for "
 545                        "instruction fetch\n");
 546                break;
 547        default:
 548                printk(KERN_ALERT "Unable to handle kernel paging request for "
 549                        "unknown fault\n");
 550                break;
 551        }
 552        printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
 553                regs->nip);
 554
 555        stackend = end_of_stack(current);
 556        if (current != &init_task && *stackend != STACK_END_MAGIC)
 557                printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
 558
 559        die("Kernel access of bad area", regs, sig);
 560}
 561