linux/arch/m32r/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/m32r/mm/fault.c
   3 *
   4 *  Copyright (c) 2001, 2002  Hitoshi Yamamoto, and H. Kondo
   5 *  Copyright (c) 2004  Naoto Sugai, NIIBE Yutaka
   6 *
   7 *  Some code taken from i386 version.
   8 *    Copyright (C) 1995  Linus Torvalds
   9 */
  10
  11#include <linux/signal.h>
  12#include <linux/sched.h>
  13#include <linux/kernel.h>
  14#include <linux/errno.h>
  15#include <linux/string.h>
  16#include <linux/types.h>
  17#include <linux/ptrace.h>
  18#include <linux/mman.h>
  19#include <linux/mm.h>
  20#include <linux/smp.h>
  21#include <linux/interrupt.h>
  22#include <linux/init.h>
  23#include <linux/tty.h>
  24#include <linux/vt_kern.h>              /* For unblank_screen() */
  25#include <linux/highmem.h>
  26#include <linux/module.h>
  27
  28#include <asm/m32r.h>
  29#include <asm/uaccess.h>
  30#include <asm/hardirq.h>
  31#include <asm/mmu_context.h>
  32#include <asm/tlbflush.h>
  33
  34extern void die(const char *, struct pt_regs *, long);
  35
  36#ifndef CONFIG_SMP
  37asmlinkage unsigned int tlb_entry_i_dat;
  38asmlinkage unsigned int tlb_entry_d_dat;
  39#define tlb_entry_i tlb_entry_i_dat
  40#define tlb_entry_d tlb_entry_d_dat
  41#else
  42unsigned int tlb_entry_i_dat[NR_CPUS];
  43unsigned int tlb_entry_d_dat[NR_CPUS];
  44#define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]
  45#define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]
  46#endif
  47
  48extern void init_tlb(void);
  49
  50/*======================================================================*
  51 * do_page_fault()
  52 *======================================================================*
  53 * This routine handles page faults.  It determines the address,
  54 * and the problem, and then passes it off to one of the appropriate
  55 * routines.
  56 *
  57 * ARGUMENT:
  58 *  regs       : M32R SP reg.
  59 *  error_code : See below
  60 *  address    : M32R MMU MDEVA reg. (Operand ACE)
  61 *             : M32R BPC reg. (Instruction ACE)
  62 *
  63 * error_code :
  64 *  bit 0 == 0 means no page found, 1 means protection fault
  65 *  bit 1 == 0 means read, 1 means write
  66 *  bit 2 == 0 means kernel, 1 means user-mode
  67 *  bit 3 == 0 means data, 1 means instruction
  68 *======================================================================*/
  69#define ACE_PROTECTION          1
  70#define ACE_WRITE               2
  71#define ACE_USERMODE            4
  72#define ACE_INSTRUCTION         8
  73
  74asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
  75  unsigned long address)
  76{
  77        struct task_struct *tsk;
  78        struct mm_struct *mm;
  79        struct vm_area_struct * vma;
  80        unsigned long page, addr;
  81        unsigned long flags = 0;
  82        int fault;
  83        siginfo_t info;
  84
  85        /*
  86         * If BPSW IE bit enable --> set PSW IE bit
  87         */
  88        if (regs->psw & M32R_PSW_BIE)
  89                local_irq_enable();
  90
  91        tsk = current;
  92
  93        info.si_code = SEGV_MAPERR;
  94
  95        /*
  96         * We fault-in kernel-space virtual memory on-demand. The
  97         * 'reference' page table is init_mm.pgd.
  98         *
  99         * NOTE! We MUST NOT take any locks for this case. We may
 100         * be in an interrupt or a critical region, and should
 101         * only copy the information from the master page table,
 102         * nothing more.
 103         *
 104         * This verifies that the fault happens in kernel space
 105         * (error_code & ACE_USERMODE) == 0, and that the fault was not a
 106         * protection error (error_code & ACE_PROTECTION) == 0.
 107         */
 108        if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
 109                goto vmalloc_fault;
 110
 111        mm = tsk->mm;
 112
 113        /*
 114         * If we're in an interrupt or have no user context or are running in an
 115         * atomic region then we must not take the fault..
 116         */
 117        if (in_atomic() || !mm)
 118                goto bad_area_nosemaphore;
 119
 120        if (error_code & ACE_USERMODE)
 121                flags |= FAULT_FLAG_USER;
 122
 123        /* When running in the kernel we expect faults to occur only to
 124         * addresses in user space.  All other faults represent errors in the
 125         * kernel and should generate an OOPS.  Unfortunately, in the case of an
 126         * erroneous fault occurring in a code path which already holds mmap_sem
 127         * we will deadlock attempting to validate the fault against the
 128         * address space.  Luckily the kernel only validly references user
 129         * space from well defined areas of code, which are listed in the
 130         * exceptions table.
 131         *
 132         * As the vast majority of faults will be valid we will only perform
 133         * the source reference check when there is a possibility of a deadlock.
 134         * Attempt to lock the address space, if we cannot we then validate the
 135         * source.  If this is invalid we can skip the address space check,
 136         * thus avoiding the deadlock.
 137         */
 138        if (!down_read_trylock(&mm->mmap_sem)) {
 139                if ((error_code & ACE_USERMODE) == 0 &&
 140                    !search_exception_tables(regs->psw))
 141                        goto bad_area_nosemaphore;
 142                down_read(&mm->mmap_sem);
 143        }
 144
 145        vma = find_vma(mm, address);
 146        if (!vma)
 147                goto bad_area;
 148        if (vma->vm_start <= address)
 149                goto good_area;
 150        if (!(vma->vm_flags & VM_GROWSDOWN))
 151                goto bad_area;
 152
 153        if (error_code & ACE_USERMODE) {
 154                /*
 155                 * accessing the stack below "spu" is always a bug.
 156                 * The "+ 4" is there due to the push instruction
 157                 * doing pre-decrement on the stack and that
 158                 * doesn't show up until later..
 159                 */
 160                if (address + 4 < regs->spu)
 161                        goto bad_area;
 162        }
 163
 164        if (expand_stack(vma, address))
 165                goto bad_area;
 166/*
 167 * Ok, we have a good vm_area for this memory access, so
 168 * we can handle it..
 169 */
 170good_area:
 171        info.si_code = SEGV_ACCERR;
 172        switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
 173                default:        /* 3: write, present */
 174                        /* fall through */
 175                case ACE_WRITE: /* write, not present */
 176                        if (!(vma->vm_flags & VM_WRITE))
 177                                goto bad_area;
 178                        flags |= FAULT_FLAG_WRITE;
 179                        break;
 180                case ACE_PROTECTION:    /* read, present */
 181                case 0:         /* read, not present */
 182                        if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 183                                goto bad_area;
 184        }
 185
 186        /*
 187         * For instruction access exception, check if the area is executable
 188         */
 189        if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
 190          goto bad_area;
 191
 192        /*
 193         * If for any reason at all we couldn't handle the fault,
 194         * make sure we exit gracefully rather than endlessly redo
 195         * the fault.
 196         */
 197        addr = (address & PAGE_MASK);
 198        set_thread_fault_code(error_code);
 199        fault = handle_mm_fault(mm, vma, addr, flags);
 200        if (unlikely(fault & VM_FAULT_ERROR)) {
 201                if (fault & VM_FAULT_OOM)
 202                        goto out_of_memory;
 203                else if (fault & VM_FAULT_SIGBUS)
 204                        goto do_sigbus;
 205                BUG();
 206        }
 207        if (fault & VM_FAULT_MAJOR)
 208                tsk->maj_flt++;
 209        else
 210                tsk->min_flt++;
 211        set_thread_fault_code(0);
 212        up_read(&mm->mmap_sem);
 213        return;
 214
 215/*
 216 * Something tried to access memory that isn't in our memory map..
 217 * Fix it, but check if it's kernel or user first..
 218 */
 219bad_area:
 220        up_read(&mm->mmap_sem);
 221
 222bad_area_nosemaphore:
 223        /* User mode accesses just cause a SIGSEGV */
 224        if (error_code & ACE_USERMODE) {
 225                tsk->thread.address = address;
 226                tsk->thread.error_code = error_code | (address >= TASK_SIZE);
 227                tsk->thread.trap_no = 14;
 228                info.si_signo = SIGSEGV;
 229                info.si_errno = 0;
 230                /* info.si_code has been set above */
 231                info.si_addr = (void __user *)address;
 232                force_sig_info(SIGSEGV, &info, tsk);
 233                return;
 234        }
 235
 236no_context:
 237        /* Are we prepared to handle this kernel fault?  */
 238        if (fixup_exception(regs))
 239                return;
 240
 241/*
 242 * Oops. The kernel tried to access some bad page. We'll have to
 243 * terminate things with extreme prejudice.
 244 */
 245
 246        bust_spinlocks(1);
 247
 248        if (address < PAGE_SIZE)
 249                printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
 250        else
 251                printk(KERN_ALERT "Unable to handle kernel paging request");
 252        printk(" at virtual address %08lx\n",address);
 253        printk(KERN_ALERT " printing bpc:\n");
 254        printk("%08lx\n", regs->bpc);
 255        page = *(unsigned long *)MPTB;
 256        page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
 257        printk(KERN_ALERT "*pde = %08lx\n", page);
 258        if (page & _PAGE_PRESENT) {
 259                page &= PAGE_MASK;
 260                address &= 0x003ff000;
 261                page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
 262                printk(KERN_ALERT "*pte = %08lx\n", page);
 263        }
 264        die("Oops", regs, error_code);
 265        bust_spinlocks(0);
 266        do_exit(SIGKILL);
 267
 268/*
 269 * We ran out of memory, or some other thing happened to us that made
 270 * us unable to handle the page fault gracefully.
 271 */
 272out_of_memory:
 273        up_read(&mm->mmap_sem);
 274        if (!(error_code & ACE_USERMODE))
 275                goto no_context;
 276        pagefault_out_of_memory();
 277        return;
 278
 279do_sigbus:
 280        up_read(&mm->mmap_sem);
 281
 282        /* Kernel mode? Handle exception or die */
 283        if (!(error_code & ACE_USERMODE))
 284                goto no_context;
 285
 286        tsk->thread.address = address;
 287        tsk->thread.error_code = error_code;
 288        tsk->thread.trap_no = 14;
 289        info.si_signo = SIGBUS;
 290        info.si_errno = 0;
 291        info.si_code = BUS_ADRERR;
 292        info.si_addr = (void __user *)address;
 293        force_sig_info(SIGBUS, &info, tsk);
 294        return;
 295
 296vmalloc_fault:
 297        {
 298                /*
 299                 * Synchronize this task's top level page-table
 300                 * with the 'reference' page table.
 301                 *
 302                 * Do _not_ use "tsk" here. We might be inside
 303                 * an interrupt in the middle of a task switch..
 304                 */
 305                int offset = pgd_index(address);
 306                pgd_t *pgd, *pgd_k;
 307                pmd_t *pmd, *pmd_k;
 308                pte_t *pte_k;
 309
 310                pgd = (pgd_t *)*(unsigned long *)MPTB;
 311                pgd = offset + (pgd_t *)pgd;
 312                pgd_k = init_mm.pgd + offset;
 313
 314                if (!pgd_present(*pgd_k))
 315                        goto no_context;
 316
 317                /*
 318                 * set_pgd(pgd, *pgd_k); here would be useless on PAE
 319                 * and redundant with the set_pmd() on non-PAE.
 320                 */
 321
 322                pmd = pmd_offset(pgd, address);
 323                pmd_k = pmd_offset(pgd_k, address);
 324                if (!pmd_present(*pmd_k))
 325                        goto no_context;
 326                set_pmd(pmd, *pmd_k);
 327
 328                pte_k = pte_offset_kernel(pmd_k, address);
 329                if (!pte_present(*pte_k))
 330                        goto no_context;
 331
 332                addr = (address & PAGE_MASK);
 333                set_thread_fault_code(error_code);
 334                update_mmu_cache(NULL, addr, pte_k);
 335                set_thread_fault_code(0);
 336                return;
 337        }
 338}
 339
 340/*======================================================================*
 341 * update_mmu_cache()
 342 *======================================================================*/
 343#define TLB_MASK        (NR_TLB_ENTRIES - 1)
 344#define ITLB_END        (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
 345#define DTLB_END        (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
 346void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
 347        pte_t *ptep)
 348{
 349        volatile unsigned long *entry1, *entry2;
 350        unsigned long pte_data, flags;
 351        unsigned int *entry_dat;
 352        int inst = get_thread_fault_code() & ACE_INSTRUCTION;
 353        int i;
 354
 355        /* Ptrace may call this routine. */
 356        if (vma && current->active_mm != vma->vm_mm)
 357                return;
 358
 359        local_irq_save(flags);
 360
 361        vaddr = (vaddr & PAGE_MASK) | get_asid();
 362
 363        pte_data = pte_val(*ptep);
 364
 365#ifdef CONFIG_CHIP_OPSP
 366        entry1 = (unsigned long *)ITLB_BASE;
 367        for (i = 0; i < NR_TLB_ENTRIES; i++) {
 368                if (*entry1++ == vaddr) {
 369                        set_tlb_data(entry1, pte_data);
 370                        break;
 371                }
 372                entry1++;
 373        }
 374        entry2 = (unsigned long *)DTLB_BASE;
 375        for (i = 0; i < NR_TLB_ENTRIES; i++) {
 376                if (*entry2++ == vaddr) {
 377                        set_tlb_data(entry2, pte_data);
 378                        break;
 379                }
 380                entry2++;
 381        }
 382#else
 383        /*
 384         * Update TLB entries
 385         *  entry1: ITLB entry address
 386         *  entry2: DTLB entry address
 387         */
 388        __asm__ __volatile__ (
 389                "seth   %0, #high(%4)   \n\t"
 390                "st     %2, @(%5, %0)   \n\t"
 391                "ldi    %1, #1          \n\t"
 392                "st     %1, @(%6, %0)   \n\t"
 393                "add3   r4, %0, %7      \n\t"
 394                ".fillinsn              \n"
 395                "1:                     \n\t"
 396                "ld     %1, @(%6, %0)   \n\t"
 397                "bnez   %1, 1b          \n\t"
 398                "ld     %0, @r4+        \n\t"
 399                "ld     %1, @r4         \n\t"
 400                "st     %3, @+%0        \n\t"
 401                "st     %3, @+%1        \n\t"
 402                : "=&r" (entry1), "=&r" (entry2)
 403                : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE),
 404                "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset)
 405                : "r4", "memory"
 406        );
 407#endif
 408
 409        if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END))
 410                goto notfound;
 411
 412found:
 413        local_irq_restore(flags);
 414
 415        return;
 416
 417        /* Valid entry not found */
 418notfound:
 419        /*
 420         * Update ITLB or DTLB entry
 421         *  entry1: TLB entry address
 422         *  entry2: TLB base address
 423         */
 424        if (!inst) {
 425                entry2 = (unsigned long *)DTLB_BASE;
 426                entry_dat = &tlb_entry_d;
 427        } else {
 428                entry2 = (unsigned long *)ITLB_BASE;
 429                entry_dat = &tlb_entry_i;
 430        }
 431        entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1);
 432
 433        for (i = 0 ; i < NR_TLB_ENTRIES ; i++) {
 434                if (!(entry1[1] & 2))   /* Valid bit check */
 435                        break;
 436
 437                if (entry1 != entry2)
 438                        entry1 -= 2;
 439                else
 440                        entry1 += TLB_MASK << 1;
 441        }
 442
 443        if (i >= NR_TLB_ENTRIES) {      /* Empty entry not found */
 444                entry1 = entry2 + (*entry_dat << 1);
 445                *entry_dat = (*entry_dat + 1) & TLB_MASK;
 446        }
 447        *entry1++ = vaddr;      /* Set TLB tag */
 448        set_tlb_data(entry1, pte_data);
 449
 450        goto found;
 451}
 452
 453/*======================================================================*
 454 * flush_tlb_page() : flushes one page
 455 *======================================================================*/
 456void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 457{
 458        if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) {
 459                unsigned long flags;
 460
 461                local_irq_save(flags);
 462                page &= PAGE_MASK;
 463                page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK);
 464                __flush_tlb_page(page);
 465                local_irq_restore(flags);
 466        }
 467}
 468
 469/*======================================================================*
 470 * flush_tlb_range() : flushes a range of pages
 471 *======================================================================*/
 472void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 473        unsigned long end)
 474{
 475        struct mm_struct *mm;
 476
 477        mm = vma->vm_mm;
 478        if (mm_context(mm) != NO_CONTEXT) {
 479                unsigned long flags;
 480                int size;
 481
 482                local_irq_save(flags);
 483                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 484                if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */
 485                        mm_context(mm) = NO_CONTEXT;
 486                        if (mm == current->mm)
 487                                activate_context(mm);
 488                } else {
 489                        unsigned long asid;
 490
 491                        asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK;
 492                        start &= PAGE_MASK;
 493                        end += (PAGE_SIZE - 1);
 494                        end &= PAGE_MASK;
 495
 496                        start |= asid;
 497                        end   |= asid;
 498                        while (start < end) {
 499                                __flush_tlb_page(start);
 500                                start += PAGE_SIZE;
 501                        }
 502                }
 503                local_irq_restore(flags);
 504        }
 505}
 506
 507/*======================================================================*
 508 * flush_tlb_mm() : flushes the specified mm context TLB's
 509 *======================================================================*/
 510void local_flush_tlb_mm(struct mm_struct *mm)
 511{
 512        /* Invalidate all TLB of this process. */
 513        /* Instead of invalidating each TLB, we get new MMU context. */
 514        if (mm_context(mm) != NO_CONTEXT) {
 515                unsigned long flags;
 516
 517                local_irq_save(flags);
 518                mm_context(mm) = NO_CONTEXT;
 519                if (mm == current->mm)
 520                        activate_context(mm);
 521                local_irq_restore(flags);
 522        }
 523}
 524
 525/*======================================================================*
 526 * flush_tlb_all() : flushes all processes TLBs
 527 *======================================================================*/
 528void local_flush_tlb_all(void)
 529{
 530        unsigned long flags;
 531
 532        local_irq_save(flags);
 533        __flush_tlb_all();
 534        local_irq_restore(flags);
 535}
 536
 537/*======================================================================*
 538 * init_mmu()
 539 *======================================================================*/
 540void __init init_mmu(void)
 541{
 542        tlb_entry_i = 0;
 543        tlb_entry_d = 0;
 544        mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
 545        set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
 546        *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir;
 547}
 548