linux/arch/sparc/mm/fault_64.c
<<
>>
Prefs
   1/*
   2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
   3 *
   4 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
   5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
   6 */
   7
   8#include <asm/head.h>
   9
  10#include <linux/string.h>
  11#include <linux/types.h>
  12#include <linux/sched.h>
  13#include <linux/ptrace.h>
  14#include <linux/mman.h>
  15#include <linux/signal.h>
  16#include <linux/mm.h>
  17#include <linux/extable.h>
  18#include <linux/init.h>
  19#include <linux/perf_event.h>
  20#include <linux/interrupt.h>
  21#include <linux/kprobes.h>
  22#include <linux/kdebug.h>
  23#include <linux/percpu.h>
  24#include <linux/context_tracking.h>
  25#include <linux/uaccess.h>
  26
  27#include <asm/page.h>
  28#include <asm/pgtable.h>
  29#include <asm/openprom.h>
  30#include <asm/oplib.h>
  31#include <asm/asi.h>
  32#include <asm/lsu.h>
  33#include <asm/sections.h>
  34#include <asm/mmu_context.h>
  35#include <asm/setup.h>
  36
  37int show_unhandled_signals = 1;
  38
  39static inline __kprobes int notify_page_fault(struct pt_regs *regs)
  40{
  41        int ret = 0;
  42
  43        /* kprobe_running() needs smp_processor_id() */
  44        if (kprobes_built_in() && !user_mode(regs)) {
  45                preempt_disable();
  46                if (kprobe_running() && kprobe_fault_handler(regs, 0))
  47                        ret = 1;
  48                preempt_enable();
  49        }
  50        return ret;
  51}
  52
  53static void __kprobes unhandled_fault(unsigned long address,
  54                                      struct task_struct *tsk,
  55                                      struct pt_regs *regs)
  56{
  57        if ((unsigned long) address < PAGE_SIZE) {
  58                printk(KERN_ALERT "Unable to handle kernel NULL "
  59                       "pointer dereference\n");
  60        } else {
  61                printk(KERN_ALERT "Unable to handle kernel paging request "
  62                       "at virtual address %016lx\n", (unsigned long)address);
  63        }
  64        printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
  65               (tsk->mm ?
  66                CTX_HWBITS(tsk->mm->context) :
  67                CTX_HWBITS(tsk->active_mm->context)));
  68        printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
  69               (tsk->mm ? (unsigned long) tsk->mm->pgd :
  70                          (unsigned long) tsk->active_mm->pgd));
  71        die_if_kernel("Oops", regs);
  72}
  73
  74static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
  75{
  76        printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
  77               regs->tpc);
  78        printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
  79        printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
  80        printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
  81        dump_stack();
  82        unhandled_fault(regs->tpc, current, regs);
  83}
  84
  85/*
  86 * We now make sure that mmap_sem is held in all paths that call 
  87 * this. Additionally, to prevent kswapd from ripping ptes from
  88 * under us, raise interrupts around the time that we look at the
  89 * pte, kswapd will have to wait to get his smp ipi response from
  90 * us. vmtruncate likewise. This saves us having to get pte lock.
  91 */
  92static unsigned int get_user_insn(unsigned long tpc)
  93{
  94        pgd_t *pgdp = pgd_offset(current->mm, tpc);
  95        pud_t *pudp;
  96        pmd_t *pmdp;
  97        pte_t *ptep, pte;
  98        unsigned long pa;
  99        u32 insn = 0;
 100
 101        if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
 102                goto out;
 103        pudp = pud_offset(pgdp, tpc);
 104        if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
 105                goto out;
 106
 107        /* This disables preemption for us as well. */
 108        local_irq_disable();
 109
 110        pmdp = pmd_offset(pudp, tpc);
 111        if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
 112                goto out_irq_enable;
 113
 114#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 115        if (is_hugetlb_pmd(*pmdp)) {
 116                pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
 117                pa += tpc & ~HPAGE_MASK;
 118
 119                /* Use phys bypass so we don't pollute dtlb/dcache. */
 120                __asm__ __volatile__("lduwa [%1] %2, %0"
 121                                     : "=r" (insn)
 122                                     : "r" (pa), "i" (ASI_PHYS_USE_EC));
 123        } else
 124#endif
 125        {
 126                ptep = pte_offset_map(pmdp, tpc);
 127                pte = *ptep;
 128                if (pte_present(pte)) {
 129                        pa  = (pte_pfn(pte) << PAGE_SHIFT);
 130                        pa += (tpc & ~PAGE_MASK);
 131
 132                        /* Use phys bypass so we don't pollute dtlb/dcache. */
 133                        __asm__ __volatile__("lduwa [%1] %2, %0"
 134                                             : "=r" (insn)
 135                                             : "r" (pa), "i" (ASI_PHYS_USE_EC));
 136                }
 137                pte_unmap(ptep);
 138        }
 139out_irq_enable:
 140        local_irq_enable();
 141out:
 142        return insn;
 143}
 144
 145static inline void
 146show_signal_msg(struct pt_regs *regs, int sig, int code,
 147                unsigned long address, struct task_struct *tsk)
 148{
 149        if (!unhandled_signal(tsk, sig))
 150                return;
 151
 152        if (!printk_ratelimit())
 153                return;
 154
 155        printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
 156               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 157               tsk->comm, task_pid_nr(tsk), address,
 158               (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
 159               (void *)regs->u_regs[UREG_FP], code);
 160
 161        print_vma_addr(KERN_CONT " in ", regs->tpc);
 162
 163        printk(KERN_CONT "\n");
 164}
 165
 166static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 167                             unsigned long fault_addr, unsigned int insn,
 168                             int fault_code)
 169{
 170        unsigned long addr;
 171        siginfo_t info;
 172
 173        info.si_code = code;
 174        info.si_signo = sig;
 175        info.si_errno = 0;
 176        if (fault_code & FAULT_CODE_ITLB) {
 177                addr = regs->tpc;
 178        } else {
 179                /* If we were able to probe the faulting instruction, use it
 180                 * to compute a precise fault address.  Otherwise use the fault
 181                 * time provided address which may only have page granularity.
 182                 */
 183                if (insn)
 184                        addr = compute_effective_address(regs, insn, 0);
 185                else
 186                        addr = fault_addr;
 187        }
 188        info.si_addr = (void __user *) addr;
 189        info.si_trapno = 0;
 190
 191        if (unlikely(show_unhandled_signals))
 192                show_signal_msg(regs, sig, code, addr, current);
 193
 194        force_sig_info(sig, &info, current);
 195}
 196
 197static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
 198{
 199        if (!insn) {
 200                if (!regs->tpc || (regs->tpc & 0x3))
 201                        return 0;
 202                if (regs->tstate & TSTATE_PRIV) {
 203                        insn = *(unsigned int *) regs->tpc;
 204                } else {
 205                        insn = get_user_insn(regs->tpc);
 206                }
 207        }
 208        return insn;
 209}
 210
 211static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
 212                                      int fault_code, unsigned int insn,
 213                                      unsigned long address)
 214{
 215        unsigned char asi = ASI_P;
 216 
 217        if ((!insn) && (regs->tstate & TSTATE_PRIV))
 218                goto cannot_handle;
 219
 220        /* If user insn could be read (thus insn is zero), that
 221         * is fine.  We will just gun down the process with a signal
 222         * in that case.
 223         */
 224
 225        if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
 226            (insn & 0xc0800000) == 0xc0800000) {
 227                if (insn & 0x2000)
 228                        asi = (regs->tstate >> 24);
 229                else
 230                        asi = (insn >> 5);
 231                if ((asi & 0xf2) == 0x82) {
 232                        if (insn & 0x1000000) {
 233                                handle_ldf_stq(insn, regs);
 234                        } else {
 235                                /* This was a non-faulting load. Just clear the
 236                                 * destination register(s) and continue with the next
 237                                 * instruction. -jj
 238                                 */
 239                                handle_ld_nf(insn, regs);
 240                        }
 241                        return;
 242                }
 243        }
 244                
 245        /* Is this in ex_table? */
 246        if (regs->tstate & TSTATE_PRIV) {
 247                const struct exception_table_entry *entry;
 248
 249                entry = search_exception_tables(regs->tpc);
 250                if (entry) {
 251                        regs->tpc = entry->fixup;
 252                        regs->tnpc = regs->tpc + 4;
 253                        return;
 254                }
 255        } else {
 256                /* The si_code was set to make clear whether
 257                 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
 258                 */
 259                do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
 260                return;
 261        }
 262
 263cannot_handle:
 264        unhandled_fault (address, current, regs);
 265}
 266
 267static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
 268{
 269        static int times;
 270
 271        if (times++ < 10)
 272                printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
 273                       "64-bit TPC [%lx]\n",
 274                       current->comm, current->pid,
 275                       regs->tpc);
 276        show_regs(regs);
 277}
 278
 279asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 280{
 281        enum ctx_state prev_state = exception_enter();
 282        struct mm_struct *mm = current->mm;
 283        struct vm_area_struct *vma;
 284        unsigned int insn = 0;
 285        int si_code, fault_code, fault;
 286        unsigned long address, mm_rss;
 287        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 288
 289        fault_code = get_thread_fault_code();
 290
 291        if (notify_page_fault(regs))
 292                goto exit_exception;
 293
 294        si_code = SEGV_MAPERR;
 295        address = current_thread_info()->fault_address;
 296
 297        if ((fault_code & FAULT_CODE_ITLB) &&
 298            (fault_code & FAULT_CODE_DTLB))
 299                BUG();
 300
 301        if (test_thread_flag(TIF_32BIT)) {
 302                if (!(regs->tstate & TSTATE_PRIV)) {
 303                        if (unlikely((regs->tpc >> 32) != 0)) {
 304                                bogus_32bit_fault_tpc(regs);
 305                                goto intr_or_no_mm;
 306                        }
 307                }
 308                if (unlikely((address >> 32) != 0))
 309                        goto intr_or_no_mm;
 310        }
 311
 312        if (regs->tstate & TSTATE_PRIV) {
 313                unsigned long tpc = regs->tpc;
 314
 315                /* Sanity check the PC. */
 316                if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
 317                    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
 318                        /* Valid, no problems... */
 319                } else {
 320                        bad_kernel_pc(regs, address);
 321                        goto exit_exception;
 322                }
 323        } else
 324                flags |= FAULT_FLAG_USER;
 325
 326        /*
 327         * If we're in an interrupt or have no user
 328         * context, we must not take the fault..
 329         */
 330        if (faulthandler_disabled() || !mm)
 331                goto intr_or_no_mm;
 332
 333        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 334
 335        if (!down_read_trylock(&mm->mmap_sem)) {
 336                if ((regs->tstate & TSTATE_PRIV) &&
 337                    !search_exception_tables(regs->tpc)) {
 338                        insn = get_fault_insn(regs, insn);
 339                        goto handle_kernel_fault;
 340                }
 341
 342retry:
 343                down_read(&mm->mmap_sem);
 344        }
 345
 346        if (fault_code & FAULT_CODE_BAD_RA)
 347                goto do_sigbus;
 348
 349        vma = find_vma(mm, address);
 350        if (!vma)
 351                goto bad_area;
 352
 353        /* Pure DTLB misses do not tell us whether the fault causing
 354         * load/store/atomic was a write or not, it only says that there
 355         * was no match.  So in such a case we (carefully) read the
 356         * instruction to try and figure this out.  It's an optimization
 357         * so it's ok if we can't do this.
 358         *
 359         * Special hack, window spill/fill knows the exact fault type.
 360         */
 361        if (((fault_code &
 362              (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
 363            (vma->vm_flags & VM_WRITE) != 0) {
 364                insn = get_fault_insn(regs, 0);
 365                if (!insn)
 366                        goto continue_fault;
 367                /* All loads, stores and atomics have bits 30 and 31 both set
 368                 * in the instruction.  Bit 21 is set in all stores, but we
 369                 * have to avoid prefetches which also have bit 21 set.
 370                 */
 371                if ((insn & 0xc0200000) == 0xc0200000 &&
 372                    (insn & 0x01780000) != 0x01680000) {
 373                        /* Don't bother updating thread struct value,
 374                         * because update_mmu_cache only cares which tlb
 375                         * the access came from.
 376                         */
 377                        fault_code |= FAULT_CODE_WRITE;
 378                }
 379        }
 380continue_fault:
 381
 382        if (vma->vm_start <= address)
 383                goto good_area;
 384        if (!(vma->vm_flags & VM_GROWSDOWN))
 385                goto bad_area;
 386        if (!(fault_code & FAULT_CODE_WRITE)) {
 387                /* Non-faulting loads shouldn't expand stack. */
 388                insn = get_fault_insn(regs, insn);
 389                if ((insn & 0xc0800000) == 0xc0800000) {
 390                        unsigned char asi;
 391
 392                        if (insn & 0x2000)
 393                                asi = (regs->tstate >> 24);
 394                        else
 395                                asi = (insn >> 5);
 396                        if ((asi & 0xf2) == 0x82)
 397                                goto bad_area;
 398                }
 399        }
 400        if (expand_stack(vma, address))
 401                goto bad_area;
 402        /*
 403         * Ok, we have a good vm_area for this memory access, so
 404         * we can handle it..
 405         */
 406good_area:
 407        si_code = SEGV_ACCERR;
 408
 409        /* If we took a ITLB miss on a non-executable page, catch
 410         * that here.
 411         */
 412        if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
 413                WARN(address != regs->tpc,
 414                     "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc);
 415                WARN_ON(regs->tstate & TSTATE_PRIV);
 416                goto bad_area;
 417        }
 418
 419        if (fault_code & FAULT_CODE_WRITE) {
 420                if (!(vma->vm_flags & VM_WRITE))
 421                        goto bad_area;
 422
 423                /* Spitfire has an icache which does not snoop
 424                 * processor stores.  Later processors do...
 425                 */
 426                if (tlb_type == spitfire &&
 427                    (vma->vm_flags & VM_EXEC) != 0 &&
 428                    vma->vm_file != NULL)
 429                        set_thread_fault_code(fault_code |
 430                                              FAULT_CODE_BLKCOMMIT);
 431
 432                flags |= FAULT_FLAG_WRITE;
 433        } else {
 434                /* Allow reads even for write-only mappings */
 435                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 436                        goto bad_area;
 437        }
 438
 439        fault = handle_mm_fault(vma, address, flags);
 440
 441        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 442                goto exit_exception;
 443
 444        if (unlikely(fault & VM_FAULT_ERROR)) {
 445                if (fault & VM_FAULT_OOM)
 446                        goto out_of_memory;
 447                else if (fault & VM_FAULT_SIGSEGV)
 448                        goto bad_area;
 449                else if (fault & VM_FAULT_SIGBUS)
 450                        goto do_sigbus;
 451                BUG();
 452        }
 453
 454        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 455                if (fault & VM_FAULT_MAJOR) {
 456                        current->maj_flt++;
 457                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
 458                                      1, regs, address);
 459                } else {
 460                        current->min_flt++;
 461                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
 462                                      1, regs, address);
 463                }
 464                if (fault & VM_FAULT_RETRY) {
 465                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 466                        flags |= FAULT_FLAG_TRIED;
 467
 468                        /* No need to up_read(&mm->mmap_sem) as we would
 469                         * have already released it in __lock_page_or_retry
 470                         * in mm/filemap.c.
 471                         */
 472
 473                        goto retry;
 474                }
 475        }
 476        up_read(&mm->mmap_sem);
 477
 478        mm_rss = get_mm_rss(mm);
 479#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
 480        mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
 481#endif
 482        if (unlikely(mm_rss >
 483                     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
 484                tsb_grow(mm, MM_TSB_BASE, mm_rss);
 485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 486        mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
 487        mm_rss *= REAL_HPAGE_PER_HPAGE;
 488        if (unlikely(mm_rss >
 489                     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
 490                if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
 491                        tsb_grow(mm, MM_TSB_HUGE, mm_rss);
 492                else
 493                        hugetlb_setup(regs);
 494
 495        }
 496#endif
 497exit_exception:
 498        exception_exit(prev_state);
 499        return;
 500
 501        /*
 502         * Something tried to access memory that isn't in our memory map..
 503         * Fix it, but check if it's kernel or user first..
 504         */
 505bad_area:
 506        insn = get_fault_insn(regs, insn);
 507        up_read(&mm->mmap_sem);
 508
 509handle_kernel_fault:
 510        do_kernel_fault(regs, si_code, fault_code, insn, address);
 511        goto exit_exception;
 512
 513/*
 514 * We ran out of memory, or some other thing happened to us that made
 515 * us unable to handle the page fault gracefully.
 516 */
 517out_of_memory:
 518        insn = get_fault_insn(regs, insn);
 519        up_read(&mm->mmap_sem);
 520        if (!(regs->tstate & TSTATE_PRIV)) {
 521                pagefault_out_of_memory();
 522                goto exit_exception;
 523        }
 524        goto handle_kernel_fault;
 525
 526intr_or_no_mm:
 527        insn = get_fault_insn(regs, 0);
 528        goto handle_kernel_fault;
 529
 530do_sigbus:
 531        insn = get_fault_insn(regs, insn);
 532        up_read(&mm->mmap_sem);
 533
 534        /*
 535         * Send a sigbus, regardless of whether we were in kernel
 536         * or user mode.
 537         */
 538        do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
 539
 540        /* Kernel mode? Handle exceptions or die */
 541        if (regs->tstate & TSTATE_PRIV)
 542                goto handle_kernel_fault;
 543}
 544