linux/arch/sparc/mm/fault_32.c
<<
>>
Prefs
   1/*
   2 * fault.c:  Page fault handlers for the Sparc.
   3 *
   4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
   6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   7 */
   8
   9#include <asm/head.h>
  10
  11#include <linux/string.h>
  12#include <linux/types.h>
  13#include <linux/sched.h>
  14#include <linux/ptrace.h>
  15#include <linux/mman.h>
  16#include <linux/threads.h>
  17#include <linux/kernel.h>
  18#include <linux/signal.h>
  19#include <linux/mm.h>
  20#include <linux/smp.h>
  21#include <linux/interrupt.h>
  22#include <linux/module.h>
  23#include <linux/kdebug.h>
  24
  25#include <asm/system.h>
  26#include <asm/page.h>
  27#include <asm/pgtable.h>
  28#include <asm/memreg.h>
  29#include <asm/openprom.h>
  30#include <asm/oplib.h>
  31#include <asm/smp.h>
  32#include <asm/traps.h>
  33#include <asm/uaccess.h>
  34
  35extern int prom_node_root;
  36
  37/* At boot time we determine these two values necessary for setting
  38 * up the segment maps and page table entries (pte's).
  39 */
  40
  41int num_segmaps, num_contexts;
  42int invalid_segment;
  43
  44/* various Virtual Address Cache parameters we find at boot time... */
  45
  46int vac_size, vac_linesize, vac_do_hw_vac_flushes;
  47int vac_entries_per_context, vac_entries_per_segment;
  48int vac_entries_per_page;
  49
  50/* Return how much physical memory we have.  */
  51unsigned long probe_memory(void)
  52{
  53        unsigned long total = 0;
  54        int i;
  55
  56        for (i = 0; sp_banks[i].num_bytes; i++)
  57                total += sp_banks[i].num_bytes;
  58
  59        return total;
  60}
  61
  62extern void sun4c_complete_all_stores(void);
  63
  64/* Whee, a level 15 NMI interrupt memory error.  Let's have fun... */
  65asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
  66                                unsigned long svaddr, unsigned long aerr,
  67                                unsigned long avaddr)
  68{
  69        sun4c_complete_all_stores();
  70        printk("FAULT: NMI received\n");
  71        printk("SREGS: Synchronous Error %08lx\n", serr);
  72        printk("       Synchronous Vaddr %08lx\n", svaddr);
  73        printk("      Asynchronous Error %08lx\n", aerr);
  74        printk("      Asynchronous Vaddr %08lx\n", avaddr);
  75        if (sun4c_memerr_reg)
  76                printk("     Memory Parity Error %08lx\n", *sun4c_memerr_reg);
  77        printk("REGISTER DUMP:\n");
  78        show_regs(regs);
  79        prom_halt();
  80}
  81
  82static void unhandled_fault(unsigned long, struct task_struct *,
  83                struct pt_regs *) __attribute__ ((noreturn));
  84
  85static void unhandled_fault(unsigned long address, struct task_struct *tsk,
  86                     struct pt_regs *regs)
  87{
  88        if((unsigned long) address < PAGE_SIZE) {
  89                printk(KERN_ALERT
  90                    "Unable to handle kernel NULL pointer dereference\n");
  91        } else {
  92                printk(KERN_ALERT "Unable to handle kernel paging request "
  93                       "at virtual address %08lx\n", address);
  94        }
  95        printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
  96                (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
  97        printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
  98                (tsk->mm ? (unsigned long) tsk->mm->pgd :
  99                        (unsigned long) tsk->active_mm->pgd));
 100        die_if_kernel("Oops", regs);
 101}
 102
 103asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, 
 104                            unsigned long address)
 105{
 106        struct pt_regs regs;
 107        unsigned long g2;
 108        unsigned int insn;
 109        int i;
 110        
 111        i = search_extables_range(ret_pc, &g2);
 112        switch (i) {
 113        case 3:
 114                /* load & store will be handled by fixup */
 115                return 3;
 116
 117        case 1:
 118                /* store will be handled by fixup, load will bump out */
 119                /* for _to_ macros */
 120                insn = *((unsigned int *) pc);
 121                if ((insn >> 21) & 1)
 122                        return 1;
 123                break;
 124
 125        case 2:
 126                /* load will be handled by fixup, store will bump out */
 127                /* for _from_ macros */
 128                insn = *((unsigned int *) pc);
 129                if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
 130                        return 2; 
 131                break; 
 132
 133        default:
 134                break;
 135        };
 136
 137        memset(&regs, 0, sizeof (regs));
 138        regs.pc = pc;
 139        regs.npc = pc + 4;
 140        __asm__ __volatile__(
 141                "rd %%psr, %0\n\t"
 142                "nop\n\t"
 143                "nop\n\t"
 144                "nop\n" : "=r" (regs.psr));
 145        unhandled_fault(address, current, &regs);
 146
 147        /* Not reached */
 148        return 0;
 149}
 150
 151extern unsigned long safe_compute_effective_address(struct pt_regs *,
 152                                                    unsigned int);
 153
 154static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
 155{
 156        unsigned int insn;
 157
 158        if (text_fault)
 159                return regs->pc;
 160
 161        if (regs->psr & PSR_PS) {
 162                insn = *(unsigned int *) regs->pc;
 163        } else {
 164                __get_user(insn, (unsigned int *) regs->pc);
 165        }
 166
 167        return safe_compute_effective_address(regs, insn);
 168}
 169
 170asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
 171                               unsigned long address)
 172{
 173        struct vm_area_struct *vma;
 174        struct task_struct *tsk = current;
 175        struct mm_struct *mm = tsk->mm;
 176        unsigned int fixup;
 177        unsigned long g2;
 178        siginfo_t info;
 179        int from_user = !(regs->psr & PSR_PS);
 180        int fault;
 181
 182        if(text_fault)
 183                address = regs->pc;
 184
 185        /*
 186         * We fault-in kernel-space virtual memory on-demand. The
 187         * 'reference' page table is init_mm.pgd.
 188         *
 189         * NOTE! We MUST NOT take any locks for this case. We may
 190         * be in an interrupt or a critical region, and should
 191         * only copy the information from the master page table,
 192         * nothing more.
 193         */
 194        if (!ARCH_SUN4C && address >= TASK_SIZE)
 195                goto vmalloc_fault;
 196
 197        info.si_code = SEGV_MAPERR;
 198
 199        /*
 200         * If we're in an interrupt or have no user
 201         * context, we must not take the fault..
 202         */
 203        if (in_atomic() || !mm)
 204                goto no_context;
 205
 206        down_read(&mm->mmap_sem);
 207
 208        /*
 209         * The kernel referencing a bad kernel pointer can lock up
 210         * a sun4c machine completely, so we must attempt recovery.
 211         */
 212        if(!from_user && address >= PAGE_OFFSET)
 213                goto bad_area;
 214
 215        vma = find_vma(mm, address);
 216        if(!vma)
 217                goto bad_area;
 218        if(vma->vm_start <= address)
 219                goto good_area;
 220        if(!(vma->vm_flags & VM_GROWSDOWN))
 221                goto bad_area;
 222        if(expand_stack(vma, address))
 223                goto bad_area;
 224        /*
 225         * Ok, we have a good vm_area for this memory access, so
 226         * we can handle it..
 227         */
 228good_area:
 229        info.si_code = SEGV_ACCERR;
 230        if(write) {
 231                if(!(vma->vm_flags & VM_WRITE))
 232                        goto bad_area;
 233        } else {
 234                /* Allow reads even for write-only mappings */
 235                if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
 236                        goto bad_area;
 237        }
 238
 239        /*
 240         * If for any reason at all we couldn't handle the fault,
 241         * make sure we exit gracefully rather than endlessly redo
 242         * the fault.
 243         */
 244        fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
 245        if (unlikely(fault & VM_FAULT_ERROR)) {
 246                if (fault & VM_FAULT_OOM)
 247                        goto out_of_memory;
 248                else if (fault & VM_FAULT_SIGBUS)
 249                        goto do_sigbus;
 250                BUG();
 251        }
 252        if (fault & VM_FAULT_MAJOR)
 253                current->maj_flt++;
 254        else
 255                current->min_flt++;
 256        up_read(&mm->mmap_sem);
 257        return;
 258
 259        /*
 260         * Something tried to access memory that isn't in our memory map..
 261         * Fix it, but check if it's kernel or user first..
 262         */
 263bad_area:
 264        up_read(&mm->mmap_sem);
 265
 266bad_area_nosemaphore:
 267        /* User mode accesses just cause a SIGSEGV */
 268        if(from_user) {
 269#if 0
 270                printk("Fault whee %s [%d]: segfaults at %08lx pc=%08lx\n",
 271                       tsk->comm, tsk->pid, address, regs->pc);
 272#endif
 273                info.si_signo = SIGSEGV;
 274                info.si_errno = 0;
 275                /* info.si_code set above to make clear whether
 276                   this was a SEGV_MAPERR or SEGV_ACCERR fault.  */
 277                info.si_addr = (void __user *)compute_si_addr(regs, text_fault);
 278                info.si_trapno = 0;
 279                force_sig_info (SIGSEGV, &info, tsk);
 280                return;
 281        }
 282
 283        /* Is this in ex_table? */
 284no_context:
 285        g2 = regs->u_regs[UREG_G2];
 286        if (!from_user) {
 287                fixup = search_extables_range(regs->pc, &g2);
 288                if (fixup > 10) { /* Values below are reserved for other things */
 289                        extern const unsigned __memset_start[];
 290                        extern const unsigned __memset_end[];
 291                        extern const unsigned __csum_partial_copy_start[];
 292                        extern const unsigned __csum_partial_copy_end[];
 293
 294#ifdef DEBUG_EXCEPTIONS
 295                        printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
 296                        printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
 297                                regs->pc, fixup, g2);
 298#endif
 299                        if ((regs->pc >= (unsigned long)__memset_start &&
 300                             regs->pc < (unsigned long)__memset_end) ||
 301                            (regs->pc >= (unsigned long)__csum_partial_copy_start &&
 302                             regs->pc < (unsigned long)__csum_partial_copy_end)) {
 303                                regs->u_regs[UREG_I4] = address;
 304                                regs->u_regs[UREG_I5] = regs->pc;
 305                        }
 306                        regs->u_regs[UREG_G2] = g2;
 307                        regs->pc = fixup;
 308                        regs->npc = regs->pc + 4;
 309                        return;
 310                }
 311        }
 312        
 313        unhandled_fault (address, tsk, regs);
 314        do_exit(SIGKILL);
 315
 316/*
 317 * We ran out of memory, or some other thing happened to us that made
 318 * us unable to handle the page fault gracefully.
 319 */
 320out_of_memory:
 321        up_read(&mm->mmap_sem);
 322        if (from_user) {
 323                pagefault_out_of_memory();
 324                return;
 325        }
 326        goto no_context;
 327
 328do_sigbus:
 329        up_read(&mm->mmap_sem);
 330        info.si_signo = SIGBUS;
 331        info.si_errno = 0;
 332        info.si_code = BUS_ADRERR;
 333        info.si_addr = (void __user *) compute_si_addr(regs, text_fault);
 334        info.si_trapno = 0;
 335        force_sig_info (SIGBUS, &info, tsk);
 336        if (!from_user)
 337                goto no_context;
 338
 339vmalloc_fault:
 340        {
 341                /*
 342                 * Synchronize this task's top level page-table
 343                 * with the 'reference' page table.
 344                 */
 345                int offset = pgd_index(address);
 346                pgd_t *pgd, *pgd_k;
 347                pmd_t *pmd, *pmd_k;
 348
 349                pgd = tsk->active_mm->pgd + offset;
 350                pgd_k = init_mm.pgd + offset;
 351
 352                if (!pgd_present(*pgd)) {
 353                        if (!pgd_present(*pgd_k))
 354                                goto bad_area_nosemaphore;
 355                        pgd_val(*pgd) = pgd_val(*pgd_k);
 356                        return;
 357                }
 358
 359                pmd = pmd_offset(pgd, address);
 360                pmd_k = pmd_offset(pgd_k, address);
 361
 362                if (pmd_present(*pmd) || !pmd_present(*pmd_k))
 363                        goto bad_area_nosemaphore;
 364                *pmd = *pmd_k;
 365                return;
 366        }
 367}
 368
 369asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
 370                               unsigned long address)
 371{
 372        extern void sun4c_update_mmu_cache(struct vm_area_struct *,
 373                                           unsigned long,pte_t);
 374        extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
 375        struct task_struct *tsk = current;
 376        struct mm_struct *mm = tsk->mm;
 377        pgd_t *pgdp;
 378        pte_t *ptep;
 379
 380        if (text_fault) {
 381                address = regs->pc;
 382        } else if (!write &&
 383                   !(regs->psr & PSR_PS)) {
 384                unsigned int insn, __user *ip;
 385
 386                ip = (unsigned int __user *)regs->pc;
 387                if (!get_user(insn, ip)) {
 388                        if ((insn & 0xc1680000) == 0xc0680000)
 389                                write = 1;
 390                }
 391        }
 392
 393        if (!mm) {
 394                /* We are oopsing. */
 395                do_sparc_fault(regs, text_fault, write, address);
 396                BUG();  /* P3 Oops already, you bitch */
 397        }
 398
 399        pgdp = pgd_offset(mm, address);
 400        ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
 401
 402        if (pgd_val(*pgdp)) {
 403            if (write) {
 404                if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
 405                                   == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
 406                        unsigned long flags;
 407
 408                        *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
 409                                      _SUN4C_PAGE_MODIFIED |
 410                                      _SUN4C_PAGE_VALID |
 411                                      _SUN4C_PAGE_DIRTY);
 412
 413                        local_irq_save(flags);
 414                        if (sun4c_get_segmap(address) != invalid_segment) {
 415                                sun4c_put_pte(address, pte_val(*ptep));
 416                                local_irq_restore(flags);
 417                                return;
 418                        }
 419                        local_irq_restore(flags);
 420                }
 421            } else {
 422                if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
 423                                   == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
 424                        unsigned long flags;
 425
 426                        *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
 427                                      _SUN4C_PAGE_VALID);
 428
 429                        local_irq_save(flags);
 430                        if (sun4c_get_segmap(address) != invalid_segment) {
 431                                sun4c_put_pte(address, pte_val(*ptep));
 432                                local_irq_restore(flags);
 433                                return;
 434                        }
 435                        local_irq_restore(flags);
 436                }
 437            }
 438        }
 439
 440        /* This conditional is 'interesting'. */
 441        if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
 442            && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
 443                /* Note: It is safe to not grab the MMAP semaphore here because
 444                 *       we know that update_mmu_cache() will not sleep for
 445                 *       any reason (at least not in the current implementation)
 446                 *       and therefore there is no danger of another thread getting
 447                 *       on the CPU and doing a shrink_mmap() on this vma.
 448                 */
 449                sun4c_update_mmu_cache (find_vma(current->mm, address), address,
 450                                        *ptep);
 451        else
 452                do_sparc_fault(regs, text_fault, write, address);
 453}
 454
 455/* This always deals with user addresses. */
 456static void force_user_fault(unsigned long address, int write)
 457{
 458        struct vm_area_struct *vma;
 459        struct task_struct *tsk = current;
 460        struct mm_struct *mm = tsk->mm;
 461        siginfo_t info;
 462
 463        info.si_code = SEGV_MAPERR;
 464
 465#if 0
 466        printk("wf<pid=%d,wr=%d,addr=%08lx>\n",
 467               tsk->pid, write, address);
 468#endif
 469        down_read(&mm->mmap_sem);
 470        vma = find_vma(mm, address);
 471        if(!vma)
 472                goto bad_area;
 473        if(vma->vm_start <= address)
 474                goto good_area;
 475        if(!(vma->vm_flags & VM_GROWSDOWN))
 476                goto bad_area;
 477        if(expand_stack(vma, address))
 478                goto bad_area;
 479good_area:
 480        info.si_code = SEGV_ACCERR;
 481        if(write) {
 482                if(!(vma->vm_flags & VM_WRITE))
 483                        goto bad_area;
 484        } else {
 485                if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
 486                        goto bad_area;
 487        }
 488        switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
 489        case VM_FAULT_SIGBUS:
 490        case VM_FAULT_OOM:
 491                goto do_sigbus;
 492        }
 493        up_read(&mm->mmap_sem);
 494        return;
 495bad_area:
 496        up_read(&mm->mmap_sem);
 497#if 0
 498        printk("Window whee %s [%d]: segfaults at %08lx\n",
 499               tsk->comm, tsk->pid, address);
 500#endif
 501        info.si_signo = SIGSEGV;
 502        info.si_errno = 0;
 503        /* info.si_code set above to make clear whether
 504           this was a SEGV_MAPERR or SEGV_ACCERR fault.  */
 505        info.si_addr = (void __user *) address;
 506        info.si_trapno = 0;
 507        force_sig_info (SIGSEGV, &info, tsk);
 508        return;
 509
 510do_sigbus:
 511        up_read(&mm->mmap_sem);
 512        info.si_signo = SIGBUS;
 513        info.si_errno = 0;
 514        info.si_code = BUS_ADRERR;
 515        info.si_addr = (void __user *) address;
 516        info.si_trapno = 0;
 517        force_sig_info (SIGBUS, &info, tsk);
 518}
 519
 520void window_overflow_fault(void)
 521{
 522        unsigned long sp;
 523
 524        sp = current_thread_info()->rwbuf_stkptrs[0];
 525        if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 526                force_user_fault(sp + 0x38, 1);
 527        force_user_fault(sp, 1);
 528}
 529
 530void window_underflow_fault(unsigned long sp)
 531{
 532        if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 533                force_user_fault(sp + 0x38, 0);
 534        force_user_fault(sp, 0);
 535}
 536
 537void window_ret_fault(struct pt_regs *regs)
 538{
 539        unsigned long sp;
 540
 541        sp = regs->u_regs[UREG_FP];
 542        if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 543                force_user_fault(sp + 0x38, 0);
 544        force_user_fault(sp, 0);
 545}
 546