linux/arch/xtensa/mm/fault.c
<<
>>
Prefs
   1// TODO VM_EXEC flag work-around, cache aliasing
   2/*
   3 * arch/xtensa/mm/fault.c
   4 *
   5 * This file is subject to the terms and conditions of the GNU General Public
   6 * License.  See the file "COPYING" in the main directory of this archive
   7 * for more details.
   8 *
   9 * Copyright (C) 2001 - 2010 Tensilica Inc.
  10 *
  11 * Chris Zankel <chris@zankel.net>
  12 * Joe Taylor   <joe@tensilica.com, joetylr@yahoo.com>
  13 */
  14
  15#include <linux/mm.h>
  16#include <linux/module.h>
  17#include <linux/hardirq.h>
  18#include <linux/perf_event.h>
  19#include <linux/uaccess.h>
  20#include <asm/mmu_context.h>
  21#include <asm/cacheflush.h>
  22#include <asm/hardirq.h>
  23#include <asm/pgalloc.h>
  24
  25DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
  26void bad_page_fault(struct pt_regs*, unsigned long, int);
  27
  28#undef DEBUG_PAGE_FAULT
  29
  30/*
  31 * This routine handles page faults.  It determines the address,
  32 * and the problem, and then passes it off to one of the appropriate
  33 * routines.
  34 *
  35 * Note: does not handle Miss and MultiHit.
  36 */
  37
  38void do_page_fault(struct pt_regs *regs)
  39{
  40        struct vm_area_struct * vma;
  41        struct mm_struct *mm = current->mm;
  42        unsigned int exccause = regs->exccause;
  43        unsigned int address = regs->excvaddr;
  44        siginfo_t info;
  45
  46        int is_write, is_exec;
  47        int fault;
  48        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  49
  50        info.si_code = SEGV_MAPERR;
  51
  52        /* We fault-in kernel-space virtual memory on-demand. The
  53         * 'reference' page table is init_mm.pgd.
  54         */
  55        if (address >= TASK_SIZE && !user_mode(regs))
  56                goto vmalloc_fault;
  57
  58        /* If we're in an interrupt or have no user
  59         * context, we must not take the fault..
  60         */
  61        if (faulthandler_disabled() || !mm) {
  62                bad_page_fault(regs, address, SIGSEGV);
  63                return;
  64        }
  65
  66        is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
  67        is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
  68                    exccause == EXCCAUSE_ITLB_MISS ||
  69                    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
  70
  71#ifdef DEBUG_PAGE_FAULT
  72        printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
  73               address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
  74#endif
  75
  76        if (user_mode(regs))
  77                flags |= FAULT_FLAG_USER;
  78retry:
  79        down_read(&mm->mmap_sem);
  80        vma = find_vma(mm, address);
  81
  82        if (!vma)
  83                goto bad_area;
  84        if (vma->vm_start <= address)
  85                goto good_area;
  86        if (!(vma->vm_flags & VM_GROWSDOWN))
  87                goto bad_area;
  88        if (expand_stack(vma, address))
  89                goto bad_area;
  90
  91        /* Ok, we have a good vm_area for this memory access, so
  92         * we can handle it..
  93         */
  94
  95good_area:
  96        info.si_code = SEGV_ACCERR;
  97
  98        if (is_write) {
  99                if (!(vma->vm_flags & VM_WRITE))
 100                        goto bad_area;
 101                flags |= FAULT_FLAG_WRITE;
 102        } else if (is_exec) {
 103                if (!(vma->vm_flags & VM_EXEC))
 104                        goto bad_area;
 105        } else  /* Allow read even from write-only pages. */
 106                if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
 107                        goto bad_area;
 108
 109        /* If for any reason at all we couldn't handle the fault,
 110         * make sure we exit gracefully rather than endlessly redo
 111         * the fault.
 112         */
 113        fault = handle_mm_fault(mm, vma, address, flags);
 114
 115        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 116                return;
 117
 118        if (unlikely(fault & VM_FAULT_ERROR)) {
 119                if (fault & VM_FAULT_OOM)
 120                        goto out_of_memory;
 121                else if (fault & VM_FAULT_SIGSEGV)
 122                        goto bad_area;
 123                else if (fault & VM_FAULT_SIGBUS)
 124                        goto do_sigbus;
 125                BUG();
 126        }
 127        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 128                if (fault & VM_FAULT_MAJOR)
 129                        current->maj_flt++;
 130                else
 131                        current->min_flt++;
 132                if (fault & VM_FAULT_RETRY) {
 133                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 134                        flags |= FAULT_FLAG_TRIED;
 135
 136                         /* No need to up_read(&mm->mmap_sem) as we would
 137                         * have already released it in __lock_page_or_retry
 138                         * in mm/filemap.c.
 139                         */
 140
 141                        goto retry;
 142                }
 143        }
 144
 145        up_read(&mm->mmap_sem);
 146        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 147        if (flags & VM_FAULT_MAJOR)
 148                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
 149        else
 150                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
 151
 152        return;
 153
 154        /* Something tried to access memory that isn't in our memory map..
 155         * Fix it, but check if it's kernel or user first..
 156         */
 157bad_area:
 158        up_read(&mm->mmap_sem);
 159        if (user_mode(regs)) {
 160                current->thread.bad_vaddr = address;
 161                current->thread.error_code = is_write;
 162                info.si_signo = SIGSEGV;
 163                info.si_errno = 0;
 164                /* info.si_code has been set above */
 165                info.si_addr = (void *) address;
 166                force_sig_info(SIGSEGV, &info, current);
 167                return;
 168        }
 169        bad_page_fault(regs, address, SIGSEGV);
 170        return;
 171
 172
 173        /* We ran out of memory, or some other thing happened to us that made
 174         * us unable to handle the page fault gracefully.
 175         */
 176out_of_memory:
 177        up_read(&mm->mmap_sem);
 178        if (!user_mode(regs))
 179                bad_page_fault(regs, address, SIGKILL);
 180        else
 181                pagefault_out_of_memory();
 182        return;
 183
 184do_sigbus:
 185        up_read(&mm->mmap_sem);
 186
 187        /* Send a sigbus, regardless of whether we were in kernel
 188         * or user mode.
 189         */
 190        current->thread.bad_vaddr = address;
 191        info.si_code = SIGBUS;
 192        info.si_errno = 0;
 193        info.si_code = BUS_ADRERR;
 194        info.si_addr = (void *) address;
 195        force_sig_info(SIGBUS, &info, current);
 196
 197        /* Kernel mode? Handle exceptions or die */
 198        if (!user_mode(regs))
 199                bad_page_fault(regs, address, SIGBUS);
 200        return;
 201
 202vmalloc_fault:
 203        {
 204                /* Synchronize this task's top level page-table
 205                 * with the 'reference' page table.
 206                 */
 207                struct mm_struct *act_mm = current->active_mm;
 208                int index = pgd_index(address);
 209                pgd_t *pgd, *pgd_k;
 210                pmd_t *pmd, *pmd_k;
 211                pte_t *pte_k;
 212
 213                if (act_mm == NULL)
 214                        goto bad_page_fault;
 215
 216                pgd = act_mm->pgd + index;
 217                pgd_k = init_mm.pgd + index;
 218
 219                if (!pgd_present(*pgd_k))
 220                        goto bad_page_fault;
 221
 222                pgd_val(*pgd) = pgd_val(*pgd_k);
 223
 224                pmd = pmd_offset(pgd, address);
 225                pmd_k = pmd_offset(pgd_k, address);
 226                if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
 227                        goto bad_page_fault;
 228
 229                pmd_val(*pmd) = pmd_val(*pmd_k);
 230                pte_k = pte_offset_kernel(pmd_k, address);
 231
 232                if (!pte_present(*pte_k))
 233                        goto bad_page_fault;
 234                return;
 235        }
 236bad_page_fault:
 237        bad_page_fault(regs, address, SIGKILL);
 238        return;
 239}
 240
 241
 242void
 243bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 244{
 245        extern void die(const char*, struct pt_regs*, long);
 246        const struct exception_table_entry *entry;
 247
 248        /* Are we prepared to handle this kernel fault?  */
 249        if ((entry = search_exception_tables(regs->pc)) != NULL) {
 250#ifdef DEBUG_PAGE_FAULT
 251                printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
 252                                current->comm, regs->pc, entry->fixup);
 253#endif
 254                current->thread.bad_uaddr = address;
 255                regs->pc = entry->fixup;
 256                return;
 257        }
 258
 259        /* Oops. The kernel tried to access some bad page. We'll have to
 260         * terminate things with extreme prejudice.
 261         */
 262        printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
 263               "address %08lx\n pc = %08lx, ra = %08lx\n",
 264               address, regs->pc, regs->areg[0]);
 265        die("Oops", regs, sig);
 266        do_exit(sig);
 267}
 268