linux/arch/sh/mm/fault.c
<<
>>
Prefs
   1/*
   2 * Page fault handler for SH with an MMU.
   3 *
   4 *  Copyright (C) 1999  Niibe Yutaka
   5 *  Copyright (C) 2003 - 2012  Paul Mundt
   6 *
   7 *  Based on linux/arch/i386/mm/fault.c:
   8 *   Copyright (C) 1995  Linus Torvalds
   9 *
  10 * This file is subject to the terms and conditions of the GNU General Public
  11 * License.  See the file "COPYING" in the main directory of this archive
  12 * for more details.
  13 */
  14#include <linux/kernel.h>
  15#include <linux/mm.h>
  16#include <linux/hardirq.h>
  17#include <linux/kprobes.h>
  18#include <linux/perf_event.h>
  19#include <linux/kdebug.h>
  20#include <linux/uaccess.h>
  21#include <asm/io_trapped.h>
  22#include <asm/mmu_context.h>
  23#include <asm/tlbflush.h>
  24#include <asm/traps.h>
  25
  26static inline int notify_page_fault(struct pt_regs *regs, int trap)
  27{
  28        int ret = 0;
  29
  30        if (kprobes_built_in() && !user_mode(regs)) {
  31                preempt_disable();
  32                if (kprobe_running() && kprobe_fault_handler(regs, trap))
  33                        ret = 1;
  34                preempt_enable();
  35        }
  36
  37        return ret;
  38}
  39
  40static void
  41force_sig_info_fault(int si_signo, int si_code, unsigned long address,
  42                     struct task_struct *tsk)
  43{
  44        siginfo_t info;
  45
  46        info.si_signo   = si_signo;
  47        info.si_errno   = 0;
  48        info.si_code    = si_code;
  49        info.si_addr    = (void __user *)address;
  50
  51        force_sig_info(si_signo, &info, tsk);
  52}
  53
  54/*
  55 * This is useful to dump out the page tables associated with
  56 * 'addr' in mm 'mm'.
  57 */
  58static void show_pte(struct mm_struct *mm, unsigned long addr)
  59{
  60        pgd_t *pgd;
  61
  62        if (mm) {
  63                pgd = mm->pgd;
  64        } else {
  65                pgd = get_TTB();
  66
  67                if (unlikely(!pgd))
  68                        pgd = swapper_pg_dir;
  69        }
  70
  71        printk(KERN_ALERT "pgd = %p\n", pgd);
  72        pgd += pgd_index(addr);
  73        printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
  74               (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
  75
  76        do {
  77                pud_t *pud;
  78                pmd_t *pmd;
  79                pte_t *pte;
  80
  81                if (pgd_none(*pgd))
  82                        break;
  83
  84                if (pgd_bad(*pgd)) {
  85                        printk("(bad)");
  86                        break;
  87                }
  88
  89                pud = pud_offset(pgd, addr);
  90                if (PTRS_PER_PUD != 1)
  91                        printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
  92                               (u64)pud_val(*pud));
  93
  94                if (pud_none(*pud))
  95                        break;
  96
  97                if (pud_bad(*pud)) {
  98                        printk("(bad)");
  99                        break;
 100                }
 101
 102                pmd = pmd_offset(pud, addr);
 103                if (PTRS_PER_PMD != 1)
 104                        printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
 105                               (u64)pmd_val(*pmd));
 106
 107                if (pmd_none(*pmd))
 108                        break;
 109
 110                if (pmd_bad(*pmd)) {
 111                        printk("(bad)");
 112                        break;
 113                }
 114
 115                /* We must not map this if we have highmem enabled */
 116                if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
 117                        break;
 118
 119                pte = pte_offset_kernel(pmd, addr);
 120                printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
 121                       (u64)pte_val(*pte));
 122        } while (0);
 123
 124        printk("\n");
 125}
 126
 127static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 128{
 129        unsigned index = pgd_index(address);
 130        pgd_t *pgd_k;
 131        pud_t *pud, *pud_k;
 132        pmd_t *pmd, *pmd_k;
 133
 134        pgd += index;
 135        pgd_k = init_mm.pgd + index;
 136
 137        if (!pgd_present(*pgd_k))
 138                return NULL;
 139
 140        pud = pud_offset(pgd, address);
 141        pud_k = pud_offset(pgd_k, address);
 142        if (!pud_present(*pud_k))
 143                return NULL;
 144
 145        if (!pud_present(*pud))
 146            set_pud(pud, *pud_k);
 147
 148        pmd = pmd_offset(pud, address);
 149        pmd_k = pmd_offset(pud_k, address);
 150        if (!pmd_present(*pmd_k))
 151                return NULL;
 152
 153        if (!pmd_present(*pmd))
 154                set_pmd(pmd, *pmd_k);
 155        else {
 156                /*
 157                 * The page tables are fully synchronised so there must
 158                 * be another reason for the fault. Return NULL here to
 159                 * signal that we have not taken care of the fault.
 160                 */
 161                BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
 162                return NULL;
 163        }
 164
 165        return pmd_k;
 166}
 167
 168#ifdef CONFIG_SH_STORE_QUEUES
 169#define __FAULT_ADDR_LIMIT      P3_ADDR_MAX
 170#else
 171#define __FAULT_ADDR_LIMIT      VMALLOC_END
 172#endif
 173
 174/*
 175 * Handle a fault on the vmalloc or module mapping area
 176 */
 177static noinline int vmalloc_fault(unsigned long address)
 178{
 179        pgd_t *pgd_k;
 180        pmd_t *pmd_k;
 181        pte_t *pte_k;
 182
 183        /* Make sure we are in vmalloc/module/P3 area: */
 184        if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
 185                return -1;
 186
 187        /*
 188         * Synchronize this task's top level page-table
 189         * with the 'reference' page table.
 190         *
 191         * Do _not_ use "current" here. We might be inside
 192         * an interrupt in the middle of a task switch..
 193         */
 194        pgd_k = get_TTB();
 195        pmd_k = vmalloc_sync_one(pgd_k, address);
 196        if (!pmd_k)
 197                return -1;
 198
 199        pte_k = pte_offset_kernel(pmd_k, address);
 200        if (!pte_present(*pte_k))
 201                return -1;
 202
 203        return 0;
 204}
 205
 206static void
 207show_fault_oops(struct pt_regs *regs, unsigned long address)
 208{
 209        if (!oops_may_print())
 210                return;
 211
 212        printk(KERN_ALERT "BUG: unable to handle kernel ");
 213        if (address < PAGE_SIZE)
 214                printk(KERN_CONT "NULL pointer dereference");
 215        else
 216                printk(KERN_CONT "paging request");
 217
 218        printk(KERN_CONT " at %08lx\n", address);
 219        printk(KERN_ALERT "PC:");
 220        printk_address(regs->pc, 1);
 221
 222        show_pte(NULL, address);
 223}
 224
 225static noinline void
 226no_context(struct pt_regs *regs, unsigned long error_code,
 227           unsigned long address)
 228{
 229        /* Are we prepared to handle this kernel fault?  */
 230        if (fixup_exception(regs))
 231                return;
 232
 233        if (handle_trapped_io(regs, address))
 234                return;
 235
 236        /*
 237         * Oops. The kernel tried to access some bad page. We'll have to
 238         * terminate things with extreme prejudice.
 239         */
 240        bust_spinlocks(1);
 241
 242        show_fault_oops(regs, address);
 243
 244        die("Oops", regs, error_code);
 245        bust_spinlocks(0);
 246        do_exit(SIGKILL);
 247}
 248
 249static void
 250__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 251                       unsigned long address, int si_code)
 252{
 253        struct task_struct *tsk = current;
 254
 255        /* User mode accesses just cause a SIGSEGV */
 256        if (user_mode(regs)) {
 257                /*
 258                 * It's possible to have interrupts off here:
 259                 */
 260                local_irq_enable();
 261
 262                force_sig_info_fault(SIGSEGV, si_code, address, tsk);
 263
 264                return;
 265        }
 266
 267        no_context(regs, error_code, address);
 268}
 269
 270static noinline void
 271bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 272                     unsigned long address)
 273{
 274        __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
 275}
 276
 277static void
 278__bad_area(struct pt_regs *regs, unsigned long error_code,
 279           unsigned long address, int si_code)
 280{
 281        struct mm_struct *mm = current->mm;
 282
 283        /*
 284         * Something tried to access memory that isn't in our memory map..
 285         * Fix it, but check if it's kernel or user first..
 286         */
 287        up_read(&mm->mmap_sem);
 288
 289        __bad_area_nosemaphore(regs, error_code, address, si_code);
 290}
 291
 292static noinline void
 293bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 294{
 295        __bad_area(regs, error_code, address, SEGV_MAPERR);
 296}
 297
 298static noinline void
 299bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 300                      unsigned long address)
 301{
 302        __bad_area(regs, error_code, address, SEGV_ACCERR);
 303}
 304
 305static void
 306do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 307{
 308        struct task_struct *tsk = current;
 309        struct mm_struct *mm = tsk->mm;
 310
 311        up_read(&mm->mmap_sem);
 312
 313        /* Kernel mode? Handle exceptions or die: */
 314        if (!user_mode(regs))
 315                no_context(regs, error_code, address);
 316
 317        force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
 318}
 319
 320static noinline int
 321mm_fault_error(struct pt_regs *regs, unsigned long error_code,
 322               unsigned long address, unsigned int fault)
 323{
 324        /*
 325         * Pagefault was interrupted by SIGKILL. We have no reason to
 326         * continue pagefault.
 327         */
 328        if (fatal_signal_pending(current)) {
 329                if (!(fault & VM_FAULT_RETRY))
 330                        up_read(&current->mm->mmap_sem);
 331                if (!user_mode(regs))
 332                        no_context(regs, error_code, address);
 333                return 1;
 334        }
 335
 336        if (!(fault & VM_FAULT_ERROR))
 337                return 0;
 338
 339        if (fault & VM_FAULT_OOM) {
 340                /* Kernel mode? Handle exceptions or die: */
 341                if (!user_mode(regs)) {
 342                        up_read(&current->mm->mmap_sem);
 343                        no_context(regs, error_code, address);
 344                        return 1;
 345                }
 346                up_read(&current->mm->mmap_sem);
 347
 348                /*
 349                 * We ran out of memory, call the OOM killer, and return the
 350                 * userspace (which will retry the fault, or kill us if we got
 351                 * oom-killed):
 352                 */
 353                pagefault_out_of_memory();
 354        } else {
 355                if (fault & VM_FAULT_SIGBUS)
 356                        do_sigbus(regs, error_code, address);
 357                else if (fault & VM_FAULT_SIGSEGV)
 358                        bad_area(regs, error_code, address);
 359                else
 360                        BUG();
 361        }
 362
 363        return 1;
 364}
 365
 366static inline int access_error(int error_code, struct vm_area_struct *vma)
 367{
 368        if (error_code & FAULT_CODE_WRITE) {
 369                /* write, present and write, not present: */
 370                if (unlikely(!(vma->vm_flags & VM_WRITE)))
 371                        return 1;
 372                return 0;
 373        }
 374
 375        /* ITLB miss on NX page */
 376        if (unlikely((error_code & FAULT_CODE_ITLB) &&
 377                     !(vma->vm_flags & VM_EXEC)))
 378                return 1;
 379
 380        /* read, not present: */
 381        if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
 382                return 1;
 383
 384        return 0;
 385}
 386
 387static int fault_in_kernel_space(unsigned long address)
 388{
 389        return address >= TASK_SIZE;
 390}
 391
 392/*
 393 * This routine handles page faults.  It determines the address,
 394 * and the problem, and then passes it off to one of the appropriate
 395 * routines.
 396 */
 397asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
 398                                        unsigned long error_code,
 399                                        unsigned long address)
 400{
 401        unsigned long vec;
 402        struct task_struct *tsk;
 403        struct mm_struct *mm;
 404        struct vm_area_struct * vma;
 405        int fault;
 406        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 407
 408        tsk = current;
 409        mm = tsk->mm;
 410        vec = lookup_exception_vector();
 411
 412        /*
 413         * We fault-in kernel-space virtual memory on-demand. The
 414         * 'reference' page table is init_mm.pgd.
 415         *
 416         * NOTE! We MUST NOT take any locks for this case. We may
 417         * be in an interrupt or a critical region, and should
 418         * only copy the information from the master page table,
 419         * nothing more.
 420         */
 421        if (unlikely(fault_in_kernel_space(address))) {
 422                if (vmalloc_fault(address) >= 0)
 423                        return;
 424                if (notify_page_fault(regs, vec))
 425                        return;
 426
 427                bad_area_nosemaphore(regs, error_code, address);
 428                return;
 429        }
 430
 431        if (unlikely(notify_page_fault(regs, vec)))
 432                return;
 433
 434        /* Only enable interrupts if they were on before the fault */
 435        if ((regs->sr & SR_IMASK) != SR_IMASK)
 436                local_irq_enable();
 437
 438        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 439
 440        /*
 441         * If we're in an interrupt, have no user context or are running
 442         * with pagefaults disabled then we must not take the fault:
 443         */
 444        if (unlikely(faulthandler_disabled() || !mm)) {
 445                bad_area_nosemaphore(regs, error_code, address);
 446                return;
 447        }
 448
 449retry:
 450        down_read(&mm->mmap_sem);
 451
 452        vma = find_vma(mm, address);
 453        if (unlikely(!vma)) {
 454                bad_area(regs, error_code, address);
 455                return;
 456        }
 457        if (likely(vma->vm_start <= address))
 458                goto good_area;
 459        if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
 460                bad_area(regs, error_code, address);
 461                return;
 462        }
 463        if (unlikely(expand_stack(vma, address))) {
 464                bad_area(regs, error_code, address);
 465                return;
 466        }
 467
 468        /*
 469         * Ok, we have a good vm_area for this memory access, so
 470         * we can handle it..
 471         */
 472good_area:
 473        if (unlikely(access_error(error_code, vma))) {
 474                bad_area_access_error(regs, error_code, address);
 475                return;
 476        }
 477
 478        set_thread_fault_code(error_code);
 479
 480        if (user_mode(regs))
 481                flags |= FAULT_FLAG_USER;
 482        if (error_code & FAULT_CODE_WRITE)
 483                flags |= FAULT_FLAG_WRITE;
 484
 485        /*
 486         * If for any reason at all we couldn't handle the fault,
 487         * make sure we exit gracefully rather than endlessly redo
 488         * the fault.
 489         */
 490        fault = handle_mm_fault(mm, vma, address, flags);
 491
 492        if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
 493                if (mm_fault_error(regs, error_code, address, fault))
 494                        return;
 495
 496        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 497                if (fault & VM_FAULT_MAJOR) {
 498                        tsk->maj_flt++;
 499                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 500                                      regs, address);
 501                } else {
 502                        tsk->min_flt++;
 503                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 504                                      regs, address);
 505                }
 506                if (fault & VM_FAULT_RETRY) {
 507                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 508                        flags |= FAULT_FLAG_TRIED;
 509
 510                        /*
 511                         * No need to up_read(&mm->mmap_sem) as we would
 512                         * have already released it in __lock_page_or_retry
 513                         * in mm/filemap.c.
 514                         */
 515                        goto retry;
 516                }
 517        }
 518
 519        up_read(&mm->mmap_sem);
 520}
 521