linux/arch/ia64/mm/fault.c
<<
>>
Prefs
   1/*
   2 * MMU fault handling support.
   3 *
   4 * Copyright (C) 1998-2002 Hewlett-Packard Co
   5 *      David Mosberger-Tang <davidm@hpl.hp.com>
   6 */
   7#include <linux/sched.h>
   8#include <linux/kernel.h>
   9#include <linux/mm.h>
  10#include <linux/interrupt.h>
  11#include <linux/kprobes.h>
  12#include <linux/kdebug.h>
  13#include <linux/prefetch.h>
  14#include <linux/uaccess.h>
  15
  16#include <asm/pgtable.h>
  17#include <asm/processor.h>
  18
  19extern int die(char *, struct pt_regs *, long);
  20
  21#ifdef CONFIG_KPROBES
  22static inline int notify_page_fault(struct pt_regs *regs, int trap)
  23{
  24        int ret = 0;
  25
  26        if (!user_mode(regs)) {
  27                /* kprobe_running() needs smp_processor_id() */
  28                preempt_disable();
  29                if (kprobe_running() && kprobe_fault_handler(regs, trap))
  30                        ret = 1;
  31                preempt_enable();
  32        }
  33
  34        return ret;
  35}
  36#else
  37static inline int notify_page_fault(struct pt_regs *regs, int trap)
  38{
  39        return 0;
  40}
  41#endif
  42
  43/*
  44 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
  45 * (inside region 5, on ia64) and that page is present.
  46 */
  47static int
  48mapped_kernel_page_is_present (unsigned long address)
  49{
  50        pgd_t *pgd;
  51        pud_t *pud;
  52        pmd_t *pmd;
  53        pte_t *ptep, pte;
  54
  55        pgd = pgd_offset_k(address);
  56        if (pgd_none(*pgd) || pgd_bad(*pgd))
  57                return 0;
  58
  59        pud = pud_offset(pgd, address);
  60        if (pud_none(*pud) || pud_bad(*pud))
  61                return 0;
  62
  63        pmd = pmd_offset(pud, address);
  64        if (pmd_none(*pmd) || pmd_bad(*pmd))
  65                return 0;
  66
  67        ptep = pte_offset_kernel(pmd, address);
  68        if (!ptep)
  69                return 0;
  70
  71        pte = *ptep;
  72        return pte_present(pte);
  73}
  74
  75#       define VM_READ_BIT      0
  76#       define VM_WRITE_BIT     1
  77#       define VM_EXEC_BIT      2
  78
  79void __kprobes
  80ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
  81{
  82        int signal = SIGSEGV, code = SEGV_MAPERR;
  83        struct vm_area_struct *vma, *prev_vma;
  84        struct mm_struct *mm = current->mm;
  85        struct siginfo si;
  86        unsigned long mask;
  87        int fault;
  88        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  89
  90        mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
  91                | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
  92
  93        /* mmap_sem is performance critical.... */
  94        prefetchw(&mm->mmap_sem);
  95
  96        /*
  97         * If we're in an interrupt or have no user context, we must not take the fault..
  98         */
  99        if (faulthandler_disabled() || !mm)
 100                goto no_context;
 101
 102#ifdef CONFIG_VIRTUAL_MEM_MAP
 103        /*
 104         * If fault is in region 5 and we are in the kernel, we may already
 105         * have the mmap_sem (pfn_valid macro is called during mmap). There
 106         * is no vma for region 5 addr's anyway, so skip getting the semaphore
 107         * and go directly to the exception handling code.
 108         */
 109
 110        if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
 111                goto bad_area_no_up;
 112#endif
 113
 114        /*
 115         * This is to handle the kprobes on user space access instructions
 116         */
 117        if (notify_page_fault(regs, TRAP_BRKPT))
 118                return;
 119
 120        if (user_mode(regs))
 121                flags |= FAULT_FLAG_USER;
 122        if (mask & VM_WRITE)
 123                flags |= FAULT_FLAG_WRITE;
 124retry:
 125        down_read(&mm->mmap_sem);
 126
 127        vma = find_vma_prev(mm, address, &prev_vma);
 128        if (!vma && !prev_vma )
 129                goto bad_area;
 130
 131        /*
 132         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
 133         *
 134         * May find no vma, but could be that the last vm area is the
 135         * register backing store that needs to expand upwards, in
 136         * this case vma will be null, but prev_vma will ne non-null
 137         */
 138        if (( !vma && prev_vma ) || (address < vma->vm_start) )
 139                goto check_expansion;
 140
 141  good_area:
 142        code = SEGV_ACCERR;
 143
 144        /* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
 145
 146#       if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
 147            || (1 << VM_EXEC_BIT) != VM_EXEC)
 148#               error File is out of sync with <linux/mm.h>.  Please update.
 149#       endif
 150
 151        if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
 152                goto bad_area;
 153
 154        if ((vma->vm_flags & mask) != mask)
 155                goto bad_area;
 156
 157        /*
 158         * If for any reason at all we couldn't handle the fault, make
 159         * sure we exit gracefully rather than endlessly redo the
 160         * fault.
 161         */
 162        fault = handle_mm_fault(vma, address, flags);
 163
 164        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 165                return;
 166
 167        if (unlikely(fault & VM_FAULT_ERROR)) {
 168                /*
 169                 * We ran out of memory, or some other thing happened
 170                 * to us that made us unable to handle the page fault
 171                 * gracefully.
 172                 */
 173                if (fault & VM_FAULT_OOM) {
 174                        goto out_of_memory;
 175                } else if (fault & VM_FAULT_SIGSEGV) {
 176                        goto bad_area;
 177                } else if (fault & VM_FAULT_SIGBUS) {
 178                        signal = SIGBUS;
 179                        goto bad_area;
 180                }
 181                BUG();
 182        }
 183
 184        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 185                if (fault & VM_FAULT_MAJOR)
 186                        current->maj_flt++;
 187                else
 188                        current->min_flt++;
 189                if (fault & VM_FAULT_RETRY) {
 190                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 191                        flags |= FAULT_FLAG_TRIED;
 192
 193                         /* No need to up_read(&mm->mmap_sem) as we would
 194                         * have already released it in __lock_page_or_retry
 195                         * in mm/filemap.c.
 196                         */
 197
 198                        goto retry;
 199                }
 200        }
 201
 202        up_read(&mm->mmap_sem);
 203        return;
 204
 205  check_expansion:
 206        if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
 207                if (!vma)
 208                        goto bad_area;
 209                if (!(vma->vm_flags & VM_GROWSDOWN))
 210                        goto bad_area;
 211                if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
 212                    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
 213                        goto bad_area;
 214                if (expand_stack(vma, address))
 215                        goto bad_area;
 216        } else {
 217                vma = prev_vma;
 218                if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
 219                    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
 220                        goto bad_area;
 221                /*
 222                 * Since the register backing store is accessed sequentially,
 223                 * we disallow growing it by more than a page at a time.
 224                 */
 225                if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
 226                        goto bad_area;
 227                if (expand_upwards(vma, address))
 228                        goto bad_area;
 229        }
 230        goto good_area;
 231
 232  bad_area:
 233        up_read(&mm->mmap_sem);
 234#ifdef CONFIG_VIRTUAL_MEM_MAP
 235  bad_area_no_up:
 236#endif
 237        if ((isr & IA64_ISR_SP)
 238            || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
 239        {
 240                /*
 241                 * This fault was due to a speculative load or lfetch.fault, set the "ed"
 242                 * bit in the psr to ensure forward progress.  (Target register will get a
 243                 * NaT for ld.s, lfetch will be canceled.)
 244                 */
 245                ia64_psr(regs)->ed = 1;
 246                return;
 247        }
 248        if (user_mode(regs)) {
 249                si.si_signo = signal;
 250                si.si_errno = 0;
 251                si.si_code = code;
 252                si.si_addr = (void __user *) address;
 253                si.si_isr = isr;
 254                si.si_flags = __ISR_VALID;
 255                force_sig_info(signal, &si, current);
 256                return;
 257        }
 258
 259  no_context:
 260        if ((isr & IA64_ISR_SP)
 261            || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
 262        {
 263                /*
 264                 * This fault was due to a speculative load or lfetch.fault, set the "ed"
 265                 * bit in the psr to ensure forward progress.  (Target register will get a
 266                 * NaT for ld.s, lfetch will be canceled.)
 267                 */
 268                ia64_psr(regs)->ed = 1;
 269                return;
 270        }
 271
 272        /*
 273         * Since we have no vma's for region 5, we might get here even if the address is
 274         * valid, due to the VHPT walker inserting a non present translation that becomes
 275         * stale. If that happens, the non present fault handler already purged the stale
 276         * translation, which fixed the problem. So, we check to see if the translation is
 277         * valid, and return if it is.
 278         */
 279        if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
 280                return;
 281
 282        if (ia64_done_with_exception(regs))
 283                return;
 284
 285        /*
 286         * Oops. The kernel tried to access some bad page. We'll have to terminate things
 287         * with extreme prejudice.
 288         */
 289        bust_spinlocks(1);
 290
 291        if (address < PAGE_SIZE)
 292                printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
 293        else
 294                printk(KERN_ALERT "Unable to handle kernel paging request at "
 295                       "virtual address %016lx\n", address);
 296        if (die("Oops", regs, isr))
 297                regs = NULL;
 298        bust_spinlocks(0);
 299        if (regs)
 300                do_exit(SIGKILL);
 301        return;
 302
 303  out_of_memory:
 304        up_read(&mm->mmap_sem);
 305        if (!user_mode(regs))
 306                goto no_context;
 307        pagefault_out_of_memory();
 308}
 309