linux/arch/ia64/mm/fault.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MMU fault handling support.
   4 *
   5 * Copyright (C) 1998-2002 Hewlett-Packard Co
   6 *      David Mosberger-Tang <davidm@hpl.hp.com>
   7 */
   8#include <linux/sched/signal.h>
   9#include <linux/kernel.h>
  10#include <linux/mm.h>
  11#include <linux/extable.h>
  12#include <linux/interrupt.h>
  13#include <linux/kprobes.h>
  14#include <linux/kdebug.h>
  15#include <linux/prefetch.h>
  16#include <linux/uaccess.h>
  17
  18#include <asm/pgtable.h>
  19#include <asm/processor.h>
  20#include <asm/exception.h>
  21
  22extern int die(char *, struct pt_regs *, long);
  23
  24/*
  25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
  26 * (inside region 5, on ia64) and that page is present.
  27 */
  28static int
  29mapped_kernel_page_is_present (unsigned long address)
  30{
  31        pgd_t *pgd;
  32        pud_t *pud;
  33        pmd_t *pmd;
  34        pte_t *ptep, pte;
  35
  36        pgd = pgd_offset_k(address);
  37        if (pgd_none(*pgd) || pgd_bad(*pgd))
  38                return 0;
  39
  40        pud = pud_offset(pgd, address);
  41        if (pud_none(*pud) || pud_bad(*pud))
  42                return 0;
  43
  44        pmd = pmd_offset(pud, address);
  45        if (pmd_none(*pmd) || pmd_bad(*pmd))
  46                return 0;
  47
  48        ptep = pte_offset_kernel(pmd, address);
  49        if (!ptep)
  50                return 0;
  51
  52        pte = *ptep;
  53        return pte_present(pte);
  54}
  55
  56#       define VM_READ_BIT      0
  57#       define VM_WRITE_BIT     1
  58#       define VM_EXEC_BIT      2
  59
  60void __kprobes
  61ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
  62{
  63        int signal = SIGSEGV, code = SEGV_MAPERR;
  64        struct vm_area_struct *vma, *prev_vma;
  65        struct mm_struct *mm = current->mm;
  66        unsigned long mask;
  67        vm_fault_t fault;
  68        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  69
  70        mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
  71                | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
  72
  73        /* mmap_sem is performance critical.... */
  74        prefetchw(&mm->mmap_sem);
  75
  76        /*
  77         * If we're in an interrupt or have no user context, we must not take the fault..
  78         */
  79        if (faulthandler_disabled() || !mm)
  80                goto no_context;
  81
  82#ifdef CONFIG_VIRTUAL_MEM_MAP
  83        /*
  84         * If fault is in region 5 and we are in the kernel, we may already
  85         * have the mmap_sem (pfn_valid macro is called during mmap). There
  86         * is no vma for region 5 addr's anyway, so skip getting the semaphore
  87         * and go directly to the exception handling code.
  88         */
  89
  90        if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
  91                goto bad_area_no_up;
  92#endif
  93
  94        /*
  95         * This is to handle the kprobes on user space access instructions
  96         */
  97        if (kprobe_page_fault(regs, TRAP_BRKPT))
  98                return;
  99
 100        if (user_mode(regs))
 101                flags |= FAULT_FLAG_USER;
 102        if (mask & VM_WRITE)
 103                flags |= FAULT_FLAG_WRITE;
 104retry:
 105        down_read(&mm->mmap_sem);
 106
 107        vma = find_vma_prev(mm, address, &prev_vma);
 108        if (!vma && !prev_vma )
 109                goto bad_area;
 110
 111        /*
 112         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
 113         *
 114         * May find no vma, but could be that the last vm area is the
 115         * register backing store that needs to expand upwards, in
 116         * this case vma will be null, but prev_vma will ne non-null
 117         */
 118        if (( !vma && prev_vma ) || (address < vma->vm_start) )
 119                goto check_expansion;
 120
 121  good_area:
 122        code = SEGV_ACCERR;
 123
 124        /* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
 125
 126#       if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
 127            || (1 << VM_EXEC_BIT) != VM_EXEC)
 128#               error File is out of sync with <linux/mm.h>.  Please update.
 129#       endif
 130
 131        if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
 132                goto bad_area;
 133
 134        if ((vma->vm_flags & mask) != mask)
 135                goto bad_area;
 136
 137        /*
 138         * If for any reason at all we couldn't handle the fault, make
 139         * sure we exit gracefully rather than endlessly redo the
 140         * fault.
 141         */
 142        fault = handle_mm_fault(vma, address, flags);
 143
 144        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 145                return;
 146
 147        if (unlikely(fault & VM_FAULT_ERROR)) {
 148                /*
 149                 * We ran out of memory, or some other thing happened
 150                 * to us that made us unable to handle the page fault
 151                 * gracefully.
 152                 */
 153                if (fault & VM_FAULT_OOM) {
 154                        goto out_of_memory;
 155                } else if (fault & VM_FAULT_SIGSEGV) {
 156                        goto bad_area;
 157                } else if (fault & VM_FAULT_SIGBUS) {
 158                        signal = SIGBUS;
 159                        goto bad_area;
 160                }
 161                BUG();
 162        }
 163
 164        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 165                if (fault & VM_FAULT_MAJOR)
 166                        current->maj_flt++;
 167                else
 168                        current->min_flt++;
 169                if (fault & VM_FAULT_RETRY) {
 170                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 171                        flags |= FAULT_FLAG_TRIED;
 172
 173                         /* No need to up_read(&mm->mmap_sem) as we would
 174                         * have already released it in __lock_page_or_retry
 175                         * in mm/filemap.c.
 176                         */
 177
 178                        goto retry;
 179                }
 180        }
 181
 182        up_read(&mm->mmap_sem);
 183        return;
 184
 185  check_expansion:
 186        if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
 187                if (!vma)
 188                        goto bad_area;
 189                if (!(vma->vm_flags & VM_GROWSDOWN))
 190                        goto bad_area;
 191                if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
 192                    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
 193                        goto bad_area;
 194                if (expand_stack(vma, address))
 195                        goto bad_area;
 196        } else {
 197                vma = prev_vma;
 198                if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
 199                    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
 200                        goto bad_area;
 201                /*
 202                 * Since the register backing store is accessed sequentially,
 203                 * we disallow growing it by more than a page at a time.
 204                 */
 205                if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
 206                        goto bad_area;
 207                if (expand_upwards(vma, address))
 208                        goto bad_area;
 209        }
 210        goto good_area;
 211
 212  bad_area:
 213        up_read(&mm->mmap_sem);
 214#ifdef CONFIG_VIRTUAL_MEM_MAP
 215  bad_area_no_up:
 216#endif
 217        if ((isr & IA64_ISR_SP)
 218            || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
 219        {
 220                /*
 221                 * This fault was due to a speculative load or lfetch.fault, set the "ed"
 222                 * bit in the psr to ensure forward progress.  (Target register will get a
 223                 * NaT for ld.s, lfetch will be canceled.)
 224                 */
 225                ia64_psr(regs)->ed = 1;
 226                return;
 227        }
 228        if (user_mode(regs)) {
 229                force_sig_fault(signal, code, (void __user *) address,
 230                                0, __ISR_VALID, isr);
 231                return;
 232        }
 233
 234  no_context:
 235        if ((isr & IA64_ISR_SP)
 236            || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
 237        {
 238                /*
 239                 * This fault was due to a speculative load or lfetch.fault, set the "ed"
 240                 * bit in the psr to ensure forward progress.  (Target register will get a
 241                 * NaT for ld.s, lfetch will be canceled.)
 242                 */
 243                ia64_psr(regs)->ed = 1;
 244                return;
 245        }
 246
 247        /*
 248         * Since we have no vma's for region 5, we might get here even if the address is
 249         * valid, due to the VHPT walker inserting a non present translation that becomes
 250         * stale. If that happens, the non present fault handler already purged the stale
 251         * translation, which fixed the problem. So, we check to see if the translation is
 252         * valid, and return if it is.
 253         */
 254        if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
 255                return;
 256
 257        if (ia64_done_with_exception(regs))
 258                return;
 259
 260        /*
 261         * Oops. The kernel tried to access some bad page. We'll have to terminate things
 262         * with extreme prejudice.
 263         */
 264        bust_spinlocks(1);
 265
 266        if (address < PAGE_SIZE)
 267                printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
 268        else
 269                printk(KERN_ALERT "Unable to handle kernel paging request at "
 270                       "virtual address %016lx\n", address);
 271        if (die("Oops", regs, isr))
 272                regs = NULL;
 273        bust_spinlocks(0);
 274        if (regs)
 275                do_exit(SIGKILL);
 276        return;
 277
 278  out_of_memory:
 279        up_read(&mm->mmap_sem);
 280        if (!user_mode(regs))
 281                goto no_context;
 282        pagefault_out_of_memory();
 283}
 284