linux/arch/sparc/mm/fault_32.c
<<
>>
Prefs
   1/*
   2 * fault.c:  Page fault handlers for the Sparc.
   3 *
   4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
   6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   7 */
   8
   9#include <asm/head.h>
  10
  11#include <linux/string.h>
  12#include <linux/types.h>
  13#include <linux/sched.h>
  14#include <linux/ptrace.h>
  15#include <linux/mman.h>
  16#include <linux/threads.h>
  17#include <linux/kernel.h>
  18#include <linux/signal.h>
  19#include <linux/mm.h>
  20#include <linux/smp.h>
  21#include <linux/perf_event.h>
  22#include <linux/interrupt.h>
  23#include <linux/module.h>
  24#include <linux/kdebug.h>
  25
  26#include <asm/system.h>
  27#include <asm/page.h>
  28#include <asm/pgtable.h>
  29#include <asm/memreg.h>
  30#include <asm/openprom.h>
  31#include <asm/oplib.h>
  32#include <asm/smp.h>
  33#include <asm/traps.h>
  34#include <asm/uaccess.h>
  35
  36extern int prom_node_root;
  37
  38int show_unhandled_signals = 1;
  39
  40/* At boot time we determine these two values necessary for setting
  41 * up the segment maps and page table entries (pte's).
  42 */
  43
  44int num_segmaps, num_contexts;
  45int invalid_segment;
  46
  47/* various Virtual Address Cache parameters we find at boot time... */
  48
  49int vac_size, vac_linesize, vac_do_hw_vac_flushes;
  50int vac_entries_per_context, vac_entries_per_segment;
  51int vac_entries_per_page;
  52
  53/* Return how much physical memory we have.  */
  54unsigned long probe_memory(void)
  55{
  56        unsigned long total = 0;
  57        int i;
  58
  59        for (i = 0; sp_banks[i].num_bytes; i++)
  60                total += sp_banks[i].num_bytes;
  61
  62        return total;
  63}
  64
  65extern void sun4c_complete_all_stores(void);
  66
  67/* Whee, a level 15 NMI interrupt memory error.  Let's have fun... */
  68asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
  69                                unsigned long svaddr, unsigned long aerr,
  70                                unsigned long avaddr)
  71{
  72        sun4c_complete_all_stores();
  73        printk("FAULT: NMI received\n");
  74        printk("SREGS: Synchronous Error %08lx\n", serr);
  75        printk("       Synchronous Vaddr %08lx\n", svaddr);
  76        printk("      Asynchronous Error %08lx\n", aerr);
  77        printk("      Asynchronous Vaddr %08lx\n", avaddr);
  78        if (sun4c_memerr_reg)
  79                printk("     Memory Parity Error %08lx\n", *sun4c_memerr_reg);
  80        printk("REGISTER DUMP:\n");
  81        show_regs(regs);
  82        prom_halt();
  83}
  84
  85static void unhandled_fault(unsigned long, struct task_struct *,
  86                struct pt_regs *) __attribute__ ((noreturn));
  87
  88static void unhandled_fault(unsigned long address, struct task_struct *tsk,
  89                     struct pt_regs *regs)
  90{
  91        if((unsigned long) address < PAGE_SIZE) {
  92                printk(KERN_ALERT
  93                    "Unable to handle kernel NULL pointer dereference\n");
  94        } else {
  95                printk(KERN_ALERT "Unable to handle kernel paging request "
  96                       "at virtual address %08lx\n", address);
  97        }
  98        printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
  99                (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
 100        printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
 101                (tsk->mm ? (unsigned long) tsk->mm->pgd :
 102                        (unsigned long) tsk->active_mm->pgd));
 103        die_if_kernel("Oops", regs);
 104}
 105
 106asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, 
 107                            unsigned long address)
 108{
 109        struct pt_regs regs;
 110        unsigned long g2;
 111        unsigned int insn;
 112        int i;
 113        
 114        i = search_extables_range(ret_pc, &g2);
 115        switch (i) {
 116        case 3:
 117                /* load & store will be handled by fixup */
 118                return 3;
 119
 120        case 1:
 121                /* store will be handled by fixup, load will bump out */
 122                /* for _to_ macros */
 123                insn = *((unsigned int *) pc);
 124                if ((insn >> 21) & 1)
 125                        return 1;
 126                break;
 127
 128        case 2:
 129                /* load will be handled by fixup, store will bump out */
 130                /* for _from_ macros */
 131                insn = *((unsigned int *) pc);
 132                if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
 133                        return 2; 
 134                break; 
 135
 136        default:
 137                break;
 138        };
 139
 140        memset(&regs, 0, sizeof (regs));
 141        regs.pc = pc;
 142        regs.npc = pc + 4;
 143        __asm__ __volatile__(
 144                "rd %%psr, %0\n\t"
 145                "nop\n\t"
 146                "nop\n\t"
 147                "nop\n" : "=r" (regs.psr));
 148        unhandled_fault(address, current, &regs);
 149
 150        /* Not reached */
 151        return 0;
 152}
 153
 154static inline void
 155show_signal_msg(struct pt_regs *regs, int sig, int code,
 156                unsigned long address, struct task_struct *tsk)
 157{
 158        if (!unhandled_signal(tsk, sig))
 159                return;
 160
 161        if (!printk_ratelimit())
 162                return;
 163
 164        printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
 165               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 166               tsk->comm, task_pid_nr(tsk), address,
 167               (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
 168               (void *)regs->u_regs[UREG_FP], code);
 169
 170        print_vma_addr(KERN_CONT " in ", regs->pc);
 171
 172        printk(KERN_CONT "\n");
 173}
 174
 175static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 176                               unsigned long addr)
 177{
 178        siginfo_t info;
 179
 180        info.si_signo = sig;
 181        info.si_code = code;
 182        info.si_errno = 0;
 183        info.si_addr = (void __user *) addr;
 184        info.si_trapno = 0;
 185
 186        if (unlikely(show_unhandled_signals))
 187                show_signal_msg(regs, sig, info.si_code,
 188                                addr, current);
 189
 190        force_sig_info (sig, &info, current);
 191}
 192
 193extern unsigned long safe_compute_effective_address(struct pt_regs *,
 194                                                    unsigned int);
 195
 196static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
 197{
 198        unsigned int insn;
 199
 200        if (text_fault)
 201                return regs->pc;
 202
 203        if (regs->psr & PSR_PS) {
 204                insn = *(unsigned int *) regs->pc;
 205        } else {
 206                __get_user(insn, (unsigned int *) regs->pc);
 207        }
 208
 209        return safe_compute_effective_address(regs, insn);
 210}
 211
 212static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 213                                      int text_fault)
 214{
 215        unsigned long addr = compute_si_addr(regs, text_fault);
 216
 217        __do_fault_siginfo(code, sig, regs, addr);
 218}
 219
 220asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
 221                               unsigned long address)
 222{
 223        struct vm_area_struct *vma;
 224        struct task_struct *tsk = current;
 225        struct mm_struct *mm = tsk->mm;
 226        unsigned int fixup;
 227        unsigned long g2;
 228        int from_user = !(regs->psr & PSR_PS);
 229        int fault, code;
 230
 231        if(text_fault)
 232                address = regs->pc;
 233
 234        /*
 235         * We fault-in kernel-space virtual memory on-demand. The
 236         * 'reference' page table is init_mm.pgd.
 237         *
 238         * NOTE! We MUST NOT take any locks for this case. We may
 239         * be in an interrupt or a critical region, and should
 240         * only copy the information from the master page table,
 241         * nothing more.
 242         */
 243        if (!ARCH_SUN4C && address >= TASK_SIZE)
 244                goto vmalloc_fault;
 245
 246        code = SEGV_MAPERR;
 247
 248        /*
 249         * If we're in an interrupt or have no user
 250         * context, we must not take the fault..
 251         */
 252        if (in_atomic() || !mm)
 253                goto no_context;
 254
 255        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
 256
 257        down_read(&mm->mmap_sem);
 258
 259        /*
 260         * The kernel referencing a bad kernel pointer can lock up
 261         * a sun4c machine completely, so we must attempt recovery.
 262         */
 263        if(!from_user && address >= PAGE_OFFSET)
 264                goto bad_area;
 265
 266        vma = find_vma(mm, address);
 267        if(!vma)
 268                goto bad_area;
 269        if(vma->vm_start <= address)
 270                goto good_area;
 271        if(!(vma->vm_flags & VM_GROWSDOWN))
 272                goto bad_area;
 273        if(expand_stack(vma, address))
 274                goto bad_area;
 275        /*
 276         * Ok, we have a good vm_area for this memory access, so
 277         * we can handle it..
 278         */
 279good_area:
 280        code = SEGV_ACCERR;
 281        if(write) {
 282                if(!(vma->vm_flags & VM_WRITE))
 283                        goto bad_area;
 284        } else {
 285                /* Allow reads even for write-only mappings */
 286                if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
 287                        goto bad_area;
 288        }
 289
 290        /*
 291         * If for any reason at all we couldn't handle the fault,
 292         * make sure we exit gracefully rather than endlessly redo
 293         * the fault.
 294         */
 295        fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
 296        if (unlikely(fault & VM_FAULT_ERROR)) {
 297                if (fault & VM_FAULT_OOM)
 298                        goto out_of_memory;
 299                else if (fault & VM_FAULT_SIGBUS)
 300                        goto do_sigbus;
 301                BUG();
 302        }
 303        if (fault & VM_FAULT_MAJOR) {
 304                current->maj_flt++;
 305                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
 306                              regs, address);
 307        } else {
 308                current->min_flt++;
 309                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
 310                              regs, address);
 311        }
 312        up_read(&mm->mmap_sem);
 313        return;
 314
 315        /*
 316         * Something tried to access memory that isn't in our memory map..
 317         * Fix it, but check if it's kernel or user first..
 318         */
 319bad_area:
 320        up_read(&mm->mmap_sem);
 321
 322bad_area_nosemaphore:
 323        /* User mode accesses just cause a SIGSEGV */
 324        if (from_user) {
 325                do_fault_siginfo(code, SIGSEGV, regs, text_fault);
 326                return;
 327        }
 328
 329        /* Is this in ex_table? */
 330no_context:
 331        g2 = regs->u_regs[UREG_G2];
 332        if (!from_user) {
 333                fixup = search_extables_range(regs->pc, &g2);
 334                if (fixup > 10) { /* Values below are reserved for other things */
 335                        extern const unsigned __memset_start[];
 336                        extern const unsigned __memset_end[];
 337                        extern const unsigned __csum_partial_copy_start[];
 338                        extern const unsigned __csum_partial_copy_end[];
 339
 340#ifdef DEBUG_EXCEPTIONS
 341                        printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
 342                        printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
 343                                regs->pc, fixup, g2);
 344#endif
 345                        if ((regs->pc >= (unsigned long)__memset_start &&
 346                             regs->pc < (unsigned long)__memset_end) ||
 347                            (regs->pc >= (unsigned long)__csum_partial_copy_start &&
 348                             regs->pc < (unsigned long)__csum_partial_copy_end)) {
 349                                regs->u_regs[UREG_I4] = address;
 350                                regs->u_regs[UREG_I5] = regs->pc;
 351                        }
 352                        regs->u_regs[UREG_G2] = g2;
 353                        regs->pc = fixup;
 354                        regs->npc = regs->pc + 4;
 355                        return;
 356                }
 357        }
 358        
 359        unhandled_fault (address, tsk, regs);
 360        do_exit(SIGKILL);
 361
 362/*
 363 * We ran out of memory, or some other thing happened to us that made
 364 * us unable to handle the page fault gracefully.
 365 */
 366out_of_memory:
 367        up_read(&mm->mmap_sem);
 368        if (from_user) {
 369                pagefault_out_of_memory();
 370                return;
 371        }
 372        goto no_context;
 373
 374do_sigbus:
 375        up_read(&mm->mmap_sem);
 376        do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
 377        if (!from_user)
 378                goto no_context;
 379
 380vmalloc_fault:
 381        {
 382                /*
 383                 * Synchronize this task's top level page-table
 384                 * with the 'reference' page table.
 385                 */
 386                int offset = pgd_index(address);
 387                pgd_t *pgd, *pgd_k;
 388                pmd_t *pmd, *pmd_k;
 389
 390                pgd = tsk->active_mm->pgd + offset;
 391                pgd_k = init_mm.pgd + offset;
 392
 393                if (!pgd_present(*pgd)) {
 394                        if (!pgd_present(*pgd_k))
 395                                goto bad_area_nosemaphore;
 396                        pgd_val(*pgd) = pgd_val(*pgd_k);
 397                        return;
 398                }
 399
 400                pmd = pmd_offset(pgd, address);
 401                pmd_k = pmd_offset(pgd_k, address);
 402
 403                if (pmd_present(*pmd) || !pmd_present(*pmd_k))
 404                        goto bad_area_nosemaphore;
 405                *pmd = *pmd_k;
 406                return;
 407        }
 408}
 409
 410asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
 411                               unsigned long address)
 412{
 413        extern void sun4c_update_mmu_cache(struct vm_area_struct *,
 414                                           unsigned long,pte_t *);
 415        extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
 416        struct task_struct *tsk = current;
 417        struct mm_struct *mm = tsk->mm;
 418        pgd_t *pgdp;
 419        pte_t *ptep;
 420
 421        if (text_fault) {
 422                address = regs->pc;
 423        } else if (!write &&
 424                   !(regs->psr & PSR_PS)) {
 425                unsigned int insn, __user *ip;
 426
 427                ip = (unsigned int __user *)regs->pc;
 428                if (!get_user(insn, ip)) {
 429                        if ((insn & 0xc1680000) == 0xc0680000)
 430                                write = 1;
 431                }
 432        }
 433
 434        if (!mm) {
 435                /* We are oopsing. */
 436                do_sparc_fault(regs, text_fault, write, address);
 437                BUG();  /* P3 Oops already, you bitch */
 438        }
 439
 440        pgdp = pgd_offset(mm, address);
 441        ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
 442
 443        if (pgd_val(*pgdp)) {
 444            if (write) {
 445                if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
 446                                   == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
 447                        unsigned long flags;
 448
 449                        *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
 450                                      _SUN4C_PAGE_MODIFIED |
 451                                      _SUN4C_PAGE_VALID |
 452                                      _SUN4C_PAGE_DIRTY);
 453
 454                        local_irq_save(flags);
 455                        if (sun4c_get_segmap(address) != invalid_segment) {
 456                                sun4c_put_pte(address, pte_val(*ptep));
 457                                local_irq_restore(flags);
 458                                return;
 459                        }
 460                        local_irq_restore(flags);
 461                }
 462            } else {
 463                if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
 464                                   == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
 465                        unsigned long flags;
 466
 467                        *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
 468                                      _SUN4C_PAGE_VALID);
 469
 470                        local_irq_save(flags);
 471                        if (sun4c_get_segmap(address) != invalid_segment) {
 472                                sun4c_put_pte(address, pte_val(*ptep));
 473                                local_irq_restore(flags);
 474                                return;
 475                        }
 476                        local_irq_restore(flags);
 477                }
 478            }
 479        }
 480
 481        /* This conditional is 'interesting'. */
 482        if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
 483            && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
 484                /* Note: It is safe to not grab the MMAP semaphore here because
 485                 *       we know that update_mmu_cache() will not sleep for
 486                 *       any reason (at least not in the current implementation)
 487                 *       and therefore there is no danger of another thread getting
 488                 *       on the CPU and doing a shrink_mmap() on this vma.
 489                 */
 490                sun4c_update_mmu_cache (find_vma(current->mm, address), address,
 491                                        ptep);
 492        else
 493                do_sparc_fault(regs, text_fault, write, address);
 494}
 495
 496/* This always deals with user addresses. */
 497static void force_user_fault(unsigned long address, int write)
 498{
 499        struct vm_area_struct *vma;
 500        struct task_struct *tsk = current;
 501        struct mm_struct *mm = tsk->mm;
 502        int code;
 503
 504        code = SEGV_MAPERR;
 505
 506        down_read(&mm->mmap_sem);
 507        vma = find_vma(mm, address);
 508        if(!vma)
 509                goto bad_area;
 510        if(vma->vm_start <= address)
 511                goto good_area;
 512        if(!(vma->vm_flags & VM_GROWSDOWN))
 513                goto bad_area;
 514        if(expand_stack(vma, address))
 515                goto bad_area;
 516good_area:
 517        code = SEGV_ACCERR;
 518        if(write) {
 519                if(!(vma->vm_flags & VM_WRITE))
 520                        goto bad_area;
 521        } else {
 522                if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
 523                        goto bad_area;
 524        }
 525        switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
 526        case VM_FAULT_SIGBUS:
 527        case VM_FAULT_OOM:
 528                goto do_sigbus;
 529        }
 530        up_read(&mm->mmap_sem);
 531        return;
 532bad_area:
 533        up_read(&mm->mmap_sem);
 534        __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
 535        return;
 536
 537do_sigbus:
 538        up_read(&mm->mmap_sem);
 539        __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
 540}
 541
 542static void check_stack_aligned(unsigned long sp)
 543{
 544        if (sp & 0x7UL)
 545                force_sig(SIGILL, current);
 546}
 547
 548void window_overflow_fault(void)
 549{
 550        unsigned long sp;
 551
 552        sp = current_thread_info()->rwbuf_stkptrs[0];
 553        if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 554                force_user_fault(sp + 0x38, 1);
 555        force_user_fault(sp, 1);
 556
 557        check_stack_aligned(sp);
 558}
 559
 560void window_underflow_fault(unsigned long sp)
 561{
 562        if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 563                force_user_fault(sp + 0x38, 0);
 564        force_user_fault(sp, 0);
 565
 566        check_stack_aligned(sp);
 567}
 568
 569void window_ret_fault(struct pt_regs *regs)
 570{
 571        unsigned long sp;
 572
 573        sp = regs->u_regs[UREG_FP];
 574        if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 575                force_user_fault(sp + 0x38, 0);
 576        force_user_fault(sp, 0);
 577
 578        check_stack_aligned(sp);
 579}
 580