linux/arch/ia64/mm/fault.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MMU fault handling support.
   4 *
   5 * Copyright (C) 1998-2002 Hewlett-Packard Co
   6 *      David Mosberger-Tang <davidm@hpl.hp.com>
   7 */
   8#include <linux/sched/signal.h>
   9#include <linux/kernel.h>
  10#include <linux/mm.h>
  11#include <linux/extable.h>
  12#include <linux/interrupt.h>
  13#include <linux/kprobes.h>
  14#include <linux/kdebug.h>
  15#include <linux/prefetch.h>
  16#include <linux/uaccess.h>
  17
  18#include <asm/pgtable.h>
  19#include <asm/processor.h>
  20#include <asm/exception.h>
  21
  22extern int die(char *, struct pt_regs *, long);
  23
  24#ifdef CONFIG_KPROBES
  25static inline int notify_page_fault(struct pt_regs *regs, int trap)
  26{
  27        int ret = 0;
  28
  29        if (!user_mode(regs)) {
  30                /* kprobe_running() needs smp_processor_id() */
  31                preempt_disable();
  32                if (kprobe_running() && kprobe_fault_handler(regs, trap))
  33                        ret = 1;
  34                preempt_enable();
  35        }
  36
  37        return ret;
  38}
  39#else
  40static inline int notify_page_fault(struct pt_regs *regs, int trap)
  41{
  42        return 0;
  43}
  44#endif
  45
  46/*
  47 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
  48 * (inside region 5, on ia64) and that page is present.
  49 */
  50static int
  51mapped_kernel_page_is_present (unsigned long address)
  52{
  53        pgd_t *pgd;
  54        pud_t *pud;
  55        pmd_t *pmd;
  56        pte_t *ptep, pte;
  57
  58        pgd = pgd_offset_k(address);
  59        if (pgd_none(*pgd) || pgd_bad(*pgd))
  60                return 0;
  61
  62        pud = pud_offset(pgd, address);
  63        if (pud_none(*pud) || pud_bad(*pud))
  64                return 0;
  65
  66        pmd = pmd_offset(pud, address);
  67        if (pmd_none(*pmd) || pmd_bad(*pmd))
  68                return 0;
  69
  70        ptep = pte_offset_kernel(pmd, address);
  71        if (!ptep)
  72                return 0;
  73
  74        pte = *ptep;
  75        return pte_present(pte);
  76}
  77
  78#       define VM_READ_BIT      0
  79#       define VM_WRITE_BIT     1
  80#       define VM_EXEC_BIT      2
  81
  82void __kprobes
  83ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
  84{
  85        int signal = SIGSEGV, code = SEGV_MAPERR;
  86        struct vm_area_struct *vma, *prev_vma;
  87        struct mm_struct *mm = current->mm;
  88        struct siginfo si;
  89        unsigned long mask;
  90        int fault;
  91        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  92
  93        mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
  94                | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
  95
  96        /* mmap_sem is performance critical.... */
  97        prefetchw(&mm->mmap_sem);
  98
  99        /*
 100         * If we're in an interrupt or have no user context, we must not take the fault..
 101         */
 102        if (faulthandler_disabled() || !mm)
 103                goto no_context;
 104
 105#ifdef CONFIG_VIRTUAL_MEM_MAP
 106        /*
 107         * If fault is in region 5 and we are in the kernel, we may already
 108         * have the mmap_sem (pfn_valid macro is called during mmap). There
 109         * is no vma for region 5 addr's anyway, so skip getting the semaphore
 110         * and go directly to the exception handling code.
 111         */
 112
 113        if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
 114                goto bad_area_no_up;
 115#endif
 116
 117        /*
 118         * This is to handle the kprobes on user space access instructions
 119         */
 120        if (notify_page_fault(regs, TRAP_BRKPT))
 121                return;
 122
 123        if (user_mode(regs))
 124                flags |= FAULT_FLAG_USER;
 125        if (mask & VM_WRITE)
 126                flags |= FAULT_FLAG_WRITE;
 127retry:
 128        down_read(&mm->mmap_sem);
 129
 130        vma = find_vma_prev(mm, address, &prev_vma);
 131        if (!vma && !prev_vma )
 132                goto bad_area;
 133
 134        /*
 135         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
 136         *
 137         * May find no vma, but could be that the last vm area is the
 138         * register backing store that needs to expand upwards, in
 139         * this case vma will be null, but prev_vma will ne non-null
 140         */
 141        if (( !vma && prev_vma ) || (address < vma->vm_start) )
 142                goto check_expansion;
 143
 144  good_area:
 145        code = SEGV_ACCERR;
 146
 147        /* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
 148
 149#       if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
 150            || (1 << VM_EXEC_BIT) != VM_EXEC)
 151#               error File is out of sync with <linux/mm.h>.  Please update.
 152#       endif
 153
 154        if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
 155                goto bad_area;
 156
 157        if ((vma->vm_flags & mask) != mask)
 158                goto bad_area;
 159
 160        /*
 161         * If for any reason at all we couldn't handle the fault, make
 162         * sure we exit gracefully rather than endlessly redo the
 163         * fault.
 164         */
 165        fault = handle_mm_fault(vma, address, flags);
 166
 167        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 168                return;
 169
 170        if (unlikely(fault & VM_FAULT_ERROR)) {
 171                /*
 172                 * We ran out of memory, or some other thing happened
 173                 * to us that made us unable to handle the page fault
 174                 * gracefully.
 175                 */
 176                if (fault & VM_FAULT_OOM) {
 177                        goto out_of_memory;
 178                } else if (fault & VM_FAULT_SIGSEGV) {
 179                        goto bad_area;
 180                } else if (fault & VM_FAULT_SIGBUS) {
 181                        signal = SIGBUS;
 182                        goto bad_area;
 183                }
 184                BUG();
 185        }
 186
 187        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 188                if (fault & VM_FAULT_MAJOR)
 189                        current->maj_flt++;
 190                else
 191                        current->min_flt++;
 192                if (fault & VM_FAULT_RETRY) {
 193                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 194                        flags |= FAULT_FLAG_TRIED;
 195
 196                         /* No need to up_read(&mm->mmap_sem) as we would
 197                         * have already released it in __lock_page_or_retry
 198                         * in mm/filemap.c.
 199                         */
 200
 201                        goto retry;
 202                }
 203        }
 204
 205        up_read(&mm->mmap_sem);
 206        return;
 207
 208  check_expansion:
 209        if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
 210                if (!vma)
 211                        goto bad_area;
 212                if (!(vma->vm_flags & VM_GROWSDOWN))
 213                        goto bad_area;
 214                if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
 215                    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
 216                        goto bad_area;
 217                if (expand_stack(vma, address))
 218                        goto bad_area;
 219        } else {
 220                vma = prev_vma;
 221                if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
 222                    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
 223                        goto bad_area;
 224                /*
 225                 * Since the register backing store is accessed sequentially,
 226                 * we disallow growing it by more than a page at a time.
 227                 */
 228                if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
 229                        goto bad_area;
 230                if (expand_upwards(vma, address))
 231                        goto bad_area;
 232        }
 233        goto good_area;
 234
 235  bad_area:
 236        up_read(&mm->mmap_sem);
 237#ifdef CONFIG_VIRTUAL_MEM_MAP
 238  bad_area_no_up:
 239#endif
 240        if ((isr & IA64_ISR_SP)
 241            || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
 242        {
 243                /*
 244                 * This fault was due to a speculative load or lfetch.fault, set the "ed"
 245                 * bit in the psr to ensure forward progress.  (Target register will get a
 246                 * NaT for ld.s, lfetch will be canceled.)
 247                 */
 248                ia64_psr(regs)->ed = 1;
 249                return;
 250        }
 251        if (user_mode(regs)) {
 252                si.si_signo = signal;
 253                si.si_errno = 0;
 254                si.si_code = code;
 255                si.si_addr = (void __user *) address;
 256                si.si_isr = isr;
 257                si.si_flags = __ISR_VALID;
 258                force_sig_info(signal, &si, current);
 259                return;
 260        }
 261
 262  no_context:
 263        if ((isr & IA64_ISR_SP)
 264            || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
 265        {
 266                /*
 267                 * This fault was due to a speculative load or lfetch.fault, set the "ed"
 268                 * bit in the psr to ensure forward progress.  (Target register will get a
 269                 * NaT for ld.s, lfetch will be canceled.)
 270                 */
 271                ia64_psr(regs)->ed = 1;
 272                return;
 273        }
 274
 275        /*
 276         * Since we have no vma's for region 5, we might get here even if the address is
 277         * valid, due to the VHPT walker inserting a non present translation that becomes
 278         * stale. If that happens, the non present fault handler already purged the stale
 279         * translation, which fixed the problem. So, we check to see if the translation is
 280         * valid, and return if it is.
 281         */
 282        if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
 283                return;
 284
 285        if (ia64_done_with_exception(regs))
 286                return;
 287
 288        /*
 289         * Oops. The kernel tried to access some bad page. We'll have to terminate things
 290         * with extreme prejudice.
 291         */
 292        bust_spinlocks(1);
 293
 294        if (address < PAGE_SIZE)
 295                printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
 296        else
 297                printk(KERN_ALERT "Unable to handle kernel paging request at "
 298                       "virtual address %016lx\n", address);
 299        if (die("Oops", regs, isr))
 300                regs = NULL;
 301        bust_spinlocks(0);
 302        if (regs)
 303                do_exit(SIGKILL);
 304        return;
 305
 306  out_of_memory:
 307        up_read(&mm->mmap_sem);
 308        if (!user_mode(regs))
 309                goto no_context;
 310        pagefault_out_of_memory();
 311}
 312