linux/arch/arm/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/fault.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Modifications for ARM processor (c) 1995-2004 Russell King
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/module.h>
  12#include <linux/signal.h>
  13#include <linux/mm.h>
  14#include <linux/hardirq.h>
  15#include <linux/init.h>
  16#include <linux/kprobes.h>
  17#include <linux/uaccess.h>
  18#include <linux/page-flags.h>
  19#include <linux/sched.h>
  20#include <linux/highmem.h>
  21
  22#include <asm/system.h>
  23#include <asm/pgtable.h>
  24#include <asm/tlbflush.h>
  25
  26#include "fault.h"
  27
  28/*
  29 * Fault status register encodings.  We steal bit 31 for our own purposes.
  30 */
  31#define FSR_LNX_PF              (1 << 31)
  32#define FSR_WRITE               (1 << 11)
  33#define FSR_FS4                 (1 << 10)
  34#define FSR_FS3_0               (15)
  35
  36static inline int fsr_fs(unsigned int fsr)
  37{
  38        return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
  39}
  40
  41#ifdef CONFIG_MMU
  42
  43#ifdef CONFIG_KPROBES
  44static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
  45{
  46        int ret = 0;
  47
  48        if (!user_mode(regs)) {
  49                /* kprobe_running() needs smp_processor_id() */
  50                preempt_disable();
  51                if (kprobe_running() && kprobe_fault_handler(regs, fsr))
  52                        ret = 1;
  53                preempt_enable();
  54        }
  55
  56        return ret;
  57}
  58#else
  59static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
  60{
  61        return 0;
  62}
  63#endif
  64
  65/*
  66 * This is useful to dump out the page tables associated with
  67 * 'addr' in mm 'mm'.
  68 */
  69void show_pte(struct mm_struct *mm, unsigned long addr)
  70{
  71        pgd_t *pgd;
  72
  73        if (!mm)
  74                mm = &init_mm;
  75
  76        printk(KERN_ALERT "pgd = %p\n", mm->pgd);
  77        pgd = pgd_offset(mm, addr);
  78        printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
  79
  80        do {
  81                pmd_t *pmd;
  82                pte_t *pte;
  83
  84                if (pgd_none(*pgd))
  85                        break;
  86
  87                if (pgd_bad(*pgd)) {
  88                        printk("(bad)");
  89                        break;
  90                }
  91
  92                pmd = pmd_offset(pgd, addr);
  93                if (PTRS_PER_PMD != 1)
  94                        printk(", *pmd=%08lx", pmd_val(*pmd));
  95
  96                if (pmd_none(*pmd))
  97                        break;
  98
  99                if (pmd_bad(*pmd)) {
 100                        printk("(bad)");
 101                        break;
 102                }
 103
 104                /* We must not map this if we have highmem enabled */
 105                if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
 106                        break;
 107
 108                pte = pte_offset_map(pmd, addr);
 109                printk(", *pte=%08lx", pte_val(*pte));
 110                printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE]));
 111                pte_unmap(pte);
 112        } while(0);
 113
 114        printk("\n");
 115}
 116#else                                   /* CONFIG_MMU */
 117void show_pte(struct mm_struct *mm, unsigned long addr)
 118{ }
 119#endif                                  /* CONFIG_MMU */
 120
 121/*
 122 * Oops.  The kernel tried to access some page that wasn't present.
 123 */
 124static void
 125__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
 126                  struct pt_regs *regs)
 127{
 128        /*
 129         * Are we prepared to handle this kernel fault?
 130         */
 131        if (fixup_exception(regs))
 132                return;
 133
 134        /*
 135         * No handler, we'll have to terminate things with extreme prejudice.
 136         */
 137        bust_spinlocks(1);
 138        printk(KERN_ALERT
 139                "Unable to handle kernel %s at virtual address %08lx\n",
 140                (addr < PAGE_SIZE) ? "NULL pointer dereference" :
 141                "paging request", addr);
 142
 143        show_pte(mm, addr);
 144        die("Oops", regs, fsr);
 145        bust_spinlocks(0);
 146        do_exit(SIGKILL);
 147}
 148
 149/*
 150 * Something tried to access memory that isn't in our memory map..
 151 * User mode accesses just cause a SIGSEGV
 152 */
 153static void
 154__do_user_fault(struct task_struct *tsk, unsigned long addr,
 155                unsigned int fsr, unsigned int sig, int code,
 156                struct pt_regs *regs)
 157{
 158        struct siginfo si;
 159
 160#ifdef CONFIG_DEBUG_USER
 161        if (user_debug & UDBG_SEGV) {
 162                printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
 163                       tsk->comm, sig, addr, fsr);
 164                show_pte(tsk->mm, addr);
 165                show_regs(regs);
 166        }
 167#endif
 168
 169        tsk->thread.address = addr;
 170        tsk->thread.error_code = fsr;
 171        tsk->thread.trap_no = 14;
 172        si.si_signo = sig;
 173        si.si_errno = 0;
 174        si.si_code = code;
 175        si.si_addr = (void __user *)addr;
 176        force_sig_info(sig, &si, tsk);
 177}
 178
 179void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 180{
 181        struct task_struct *tsk = current;
 182        struct mm_struct *mm = tsk->active_mm;
 183
 184        /*
 185         * If we are in kernel mode at this point, we
 186         * have no context to handle this fault with.
 187         */
 188        if (user_mode(regs))
 189                __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
 190        else
 191                __do_kernel_fault(mm, addr, fsr, regs);
 192}
 193
 194#ifdef CONFIG_MMU
 195#define VM_FAULT_BADMAP         0x010000
 196#define VM_FAULT_BADACCESS      0x020000
 197
 198/*
 199 * Check that the permissions on the VMA allow for the fault which occurred.
 200 * If we encountered a write fault, we must have write permission, otherwise
 201 * we allow any permission.
 202 */
 203static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
 204{
 205        unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
 206
 207        if (fsr & FSR_WRITE)
 208                mask = VM_WRITE;
 209        if (fsr & FSR_LNX_PF)
 210                mask = VM_EXEC;
 211
 212        return vma->vm_flags & mask ? false : true;
 213}
 214
 215static int __kprobes
 216__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
 217                struct task_struct *tsk)
 218{
 219        struct vm_area_struct *vma;
 220        int fault;
 221
 222        vma = find_vma(mm, addr);
 223        fault = VM_FAULT_BADMAP;
 224        if (unlikely(!vma))
 225                goto out;
 226        if (unlikely(vma->vm_start > addr))
 227                goto check_stack;
 228
 229        /*
 230         * Ok, we have a good vm_area for this
 231         * memory access, so we can handle it.
 232         */
 233good_area:
 234        if (access_error(fsr, vma)) {
 235                fault = VM_FAULT_BADACCESS;
 236                goto out;
 237        }
 238
 239        /*
 240         * If for any reason at all we couldn't handle the fault, make
 241         * sure we exit gracefully rather than endlessly redo the fault.
 242         */
 243        fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
 244        if (unlikely(fault & VM_FAULT_ERROR))
 245                return fault;
 246        if (fault & VM_FAULT_MAJOR)
 247                tsk->maj_flt++;
 248        else
 249                tsk->min_flt++;
 250        return fault;
 251
 252check_stack:
 253        if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
 254                goto good_area;
 255out:
 256        return fault;
 257}
 258
 259static int __kprobes
 260do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 261{
 262        struct task_struct *tsk;
 263        struct mm_struct *mm;
 264        int fault, sig, code;
 265
 266        if (notify_page_fault(regs, fsr))
 267                return 0;
 268
 269        tsk = current;
 270        mm  = tsk->mm;
 271
 272        /*
 273         * If we're in an interrupt or have no user
 274         * context, we must not take the fault..
 275         */
 276        if (in_atomic() || !mm)
 277                goto no_context;
 278
 279        /*
 280         * As per x86, we may deadlock here.  However, since the kernel only
 281         * validly references user space from well defined areas of the code,
 282         * we can bug out early if this is from code which shouldn't.
 283         */
 284        if (!down_read_trylock(&mm->mmap_sem)) {
 285                if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
 286                        goto no_context;
 287                down_read(&mm->mmap_sem);
 288        } else {
 289                /*
 290                 * The above down_read_trylock() might have succeeded in
 291                 * which case, we'll have missed the might_sleep() from
 292                 * down_read()
 293                 */
 294                might_sleep();
 295#ifdef CONFIG_DEBUG_VM
 296                if (!user_mode(regs) &&
 297                    !search_exception_tables(regs->ARM_pc))
 298                        goto no_context;
 299#endif
 300        }
 301
 302        fault = __do_page_fault(mm, addr, fsr, tsk);
 303        up_read(&mm->mmap_sem);
 304
 305        /*
 306         * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
 307         */
 308        if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
 309                return 0;
 310
 311        if (fault & VM_FAULT_OOM) {
 312                /*
 313                 * We ran out of memory, call the OOM killer, and return to
 314                 * userspace (which will retry the fault, or kill us if we
 315                 * got oom-killed)
 316                 */
 317                pagefault_out_of_memory();
 318                return 0;
 319        }
 320
 321        /*
 322         * If we are in kernel mode at this point, we
 323         * have no context to handle this fault with.
 324         */
 325        if (!user_mode(regs))
 326                goto no_context;
 327
 328        if (fault & VM_FAULT_SIGBUS) {
 329                /*
 330                 * We had some memory, but were unable to
 331                 * successfully fix up this page fault.
 332                 */
 333                sig = SIGBUS;
 334                code = BUS_ADRERR;
 335        } else {
 336                /*
 337                 * Something tried to access memory that
 338                 * isn't in our memory map..
 339                 */
 340                sig = SIGSEGV;
 341                code = fault == VM_FAULT_BADACCESS ?
 342                        SEGV_ACCERR : SEGV_MAPERR;
 343        }
 344
 345        __do_user_fault(tsk, addr, fsr, sig, code, regs);
 346        return 0;
 347
 348no_context:
 349        __do_kernel_fault(mm, addr, fsr, regs);
 350        return 0;
 351}
 352#else                                   /* CONFIG_MMU */
 353static int
 354do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 355{
 356        return 0;
 357}
 358#endif                                  /* CONFIG_MMU */
 359
 360/*
 361 * First Level Translation Fault Handler
 362 *
 363 * We enter here because the first level page table doesn't contain
 364 * a valid entry for the address.
 365 *
 366 * If the address is in kernel space (>= TASK_SIZE), then we are
 367 * probably faulting in the vmalloc() area.
 368 *
 369 * If the init_task's first level page tables contains the relevant
 370 * entry, we copy the it to this task.  If not, we send the process
 371 * a signal, fixup the exception, or oops the kernel.
 372 *
 373 * NOTE! We MUST NOT take any locks for this case. We may be in an
 374 * interrupt or a critical region, and should only copy the information
 375 * from the master page table, nothing more.
 376 */
 377#ifdef CONFIG_MMU
 378static int __kprobes
 379do_translation_fault(unsigned long addr, unsigned int fsr,
 380                     struct pt_regs *regs)
 381{
 382        unsigned int index;
 383        pgd_t *pgd, *pgd_k;
 384        pmd_t *pmd, *pmd_k;
 385
 386        if (addr < TASK_SIZE)
 387                return do_page_fault(addr, fsr, regs);
 388
 389        index = pgd_index(addr);
 390
 391        /*
 392         * FIXME: CP15 C1 is write only on ARMv3 architectures.
 393         */
 394        pgd = cpu_get_pgd() + index;
 395        pgd_k = init_mm.pgd + index;
 396
 397        if (pgd_none(*pgd_k))
 398                goto bad_area;
 399
 400        if (!pgd_present(*pgd))
 401                set_pgd(pgd, *pgd_k);
 402
 403        pmd_k = pmd_offset(pgd_k, addr);
 404        pmd   = pmd_offset(pgd, addr);
 405
 406        if (pmd_none(*pmd_k))
 407                goto bad_area;
 408
 409        copy_pmd(pmd, pmd_k);
 410        return 0;
 411
 412bad_area:
 413        do_bad_area(addr, fsr, regs);
 414        return 0;
 415}
 416#else                                   /* CONFIG_MMU */
 417static int
 418do_translation_fault(unsigned long addr, unsigned int fsr,
 419                     struct pt_regs *regs)
 420{
 421        return 0;
 422}
 423#endif                                  /* CONFIG_MMU */
 424
 425/*
 426 * Some section permission faults need to be handled gracefully.
 427 * They can happen due to a __{get,put}_user during an oops.
 428 */
 429static int
 430do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 431{
 432        do_bad_area(addr, fsr, regs);
 433        return 0;
 434}
 435
 436/*
 437 * This abort handler always returns "fault".
 438 */
 439static int
 440do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 441{
 442        return 1;
 443}
 444
 445static struct fsr_info {
 446        int     (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 447        int     sig;
 448        int     code;
 449        const char *name;
 450} fsr_info[] = {
 451        /*
 452         * The following are the standard ARMv3 and ARMv4 aborts.  ARMv5
 453         * defines these to be "precise" aborts.
 454         */
 455        { do_bad,               SIGSEGV, 0,             "vector exception"                 },
 456        { do_bad,               SIGILL,  BUS_ADRALN,    "alignment exception"              },
 457        { do_bad,               SIGKILL, 0,             "terminal exception"               },
 458        { do_bad,               SIGILL,  BUS_ADRALN,    "alignment exception"              },
 459        { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
 460        { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
 461        { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
 462        { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
 463        { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
 464        { do_bad,               SIGSEGV, SEGV_ACCERR,   "section domain fault"             },
 465        { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
 466        { do_bad,               SIGSEGV, SEGV_ACCERR,   "page domain fault"                },
 467        { do_bad,               SIGBUS,  0,             "external abort on translation"    },
 468        { do_sect_fault,        SIGSEGV, SEGV_ACCERR,   "section permission fault"         },
 469        { do_bad,               SIGBUS,  0,             "external abort on translation"    },
 470        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "page permission fault"            },
 471        /*
 472         * The following are "imprecise" aborts, which are signalled by bit
 473         * 10 of the FSR, and may not be recoverable.  These are only
 474         * supported if the CPU abort handler supports bit 10.
 475         */
 476        { do_bad,               SIGBUS,  0,             "unknown 16"                       },
 477        { do_bad,               SIGBUS,  0,             "unknown 17"                       },
 478        { do_bad,               SIGBUS,  0,             "unknown 18"                       },
 479        { do_bad,               SIGBUS,  0,             "unknown 19"                       },
 480        { do_bad,               SIGBUS,  0,             "lock abort"                       }, /* xscale */
 481        { do_bad,               SIGBUS,  0,             "unknown 21"                       },
 482        { do_bad,               SIGBUS,  BUS_OBJERR,    "imprecise external abort"         }, /* xscale */
 483        { do_bad,               SIGBUS,  0,             "unknown 23"                       },
 484        { do_bad,               SIGBUS,  0,             "dcache parity error"              }, /* xscale */
 485        { do_bad,               SIGBUS,  0,             "unknown 25"                       },
 486        { do_bad,               SIGBUS,  0,             "unknown 26"                       },
 487        { do_bad,               SIGBUS,  0,             "unknown 27"                       },
 488        { do_bad,               SIGBUS,  0,             "unknown 28"                       },
 489        { do_bad,               SIGBUS,  0,             "unknown 29"                       },
 490        { do_bad,               SIGBUS,  0,             "unknown 30"                       },
 491        { do_bad,               SIGBUS,  0,             "unknown 31"                       }
 492};
 493
 494void __init
 495hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
 496                int sig, const char *name)
 497{
 498        if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) {
 499                fsr_info[nr].fn   = fn;
 500                fsr_info[nr].sig  = sig;
 501                fsr_info[nr].name = name;
 502        }
 503}
 504
 505/*
 506 * Dispatch a data abort to the relevant handler.
 507 */
 508asmlinkage void __exception
 509do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 510{
 511        const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
 512        struct siginfo info;
 513
 514        if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
 515                return;
 516
 517        printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
 518                inf->name, fsr, addr);
 519
 520        info.si_signo = inf->sig;
 521        info.si_errno = 0;
 522        info.si_code  = inf->code;
 523        info.si_addr  = (void __user *)addr;
 524        arm_notify_die("", regs, &info, fsr, 0);
 525}
 526
 527
 528static struct fsr_info ifsr_info[] = {
 529        { do_bad,               SIGBUS,  0,             "unknown 0"                        },
 530        { do_bad,               SIGBUS,  0,             "unknown 1"                        },
 531        { do_bad,               SIGBUS,  0,             "debug event"                      },
 532        { do_bad,               SIGSEGV, SEGV_ACCERR,   "section access flag fault"        },
 533        { do_bad,               SIGBUS,  0,             "unknown 4"                        },
 534        { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
 535        { do_bad,               SIGSEGV, SEGV_ACCERR,   "page access flag fault"           },
 536        { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
 537        { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
 538        { do_bad,               SIGSEGV, SEGV_ACCERR,   "section domain fault"             },
 539        { do_bad,               SIGBUS,  0,             "unknown 10"                       },
 540        { do_bad,               SIGSEGV, SEGV_ACCERR,   "page domain fault"                },
 541        { do_bad,               SIGBUS,  0,             "external abort on translation"    },
 542        { do_sect_fault,        SIGSEGV, SEGV_ACCERR,   "section permission fault"         },
 543        { do_bad,               SIGBUS,  0,             "external abort on translation"    },
 544        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "page permission fault"            },
 545        { do_bad,               SIGBUS,  0,             "unknown 16"                       },
 546        { do_bad,               SIGBUS,  0,             "unknown 17"                       },
 547        { do_bad,               SIGBUS,  0,             "unknown 18"                       },
 548        { do_bad,               SIGBUS,  0,             "unknown 19"                       },
 549        { do_bad,               SIGBUS,  0,             "unknown 20"                       },
 550        { do_bad,               SIGBUS,  0,             "unknown 21"                       },
 551        { do_bad,               SIGBUS,  0,             "unknown 22"                       },
 552        { do_bad,               SIGBUS,  0,             "unknown 23"                       },
 553        { do_bad,               SIGBUS,  0,             "unknown 24"                       },
 554        { do_bad,               SIGBUS,  0,             "unknown 25"                       },
 555        { do_bad,               SIGBUS,  0,             "unknown 26"                       },
 556        { do_bad,               SIGBUS,  0,             "unknown 27"                       },
 557        { do_bad,               SIGBUS,  0,             "unknown 28"                       },
 558        { do_bad,               SIGBUS,  0,             "unknown 29"                       },
 559        { do_bad,               SIGBUS,  0,             "unknown 30"                       },
 560        { do_bad,               SIGBUS,  0,             "unknown 31"                       },
 561};
 562
 563asmlinkage void __exception
 564do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
 565{
 566        const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
 567        struct siginfo info;
 568
 569        if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
 570                return;
 571
 572        printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
 573                inf->name, ifsr, addr);
 574
 575        info.si_signo = inf->sig;
 576        info.si_errno = 0;
 577        info.si_code  = inf->code;
 578        info.si_addr  = (void __user *)addr;
 579        arm_notify_die("", regs, &info, ifsr, 0);
 580}
 581
 582