linux/arch/alpha/mm/fault.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/arch/alpha/mm/fault.c
   4 *
   5 *  Copyright (C) 1995  Linus Torvalds
   6 */
   7
   8#include <linux/sched/signal.h>
   9#include <linux/kernel.h>
  10#include <linux/mm.h>
  11#include <asm/io.h>
  12
  13#define __EXTERN_INLINE inline
  14#include <asm/mmu_context.h>
  15#include <asm/tlbflush.h>
  16#undef  __EXTERN_INLINE
  17
  18#include <linux/signal.h>
  19#include <linux/errno.h>
  20#include <linux/string.h>
  21#include <linux/types.h>
  22#include <linux/ptrace.h>
  23#include <linux/mman.h>
  24#include <linux/smp.h>
  25#include <linux/interrupt.h>
  26#include <linux/extable.h>
  27#include <linux/uaccess.h>
  28#include <linux/perf_event.h>
  29
  30extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
  31
  32
  33/*
  34 * Force a new ASN for a task.
  35 */
  36
  37#ifndef CONFIG_SMP
  38unsigned long last_asn = ASN_FIRST_VERSION;
  39#endif
  40
  41void
  42__load_new_mm_context(struct mm_struct *next_mm)
  43{
  44        unsigned long mmc;
  45        struct pcb_struct *pcb;
  46
  47        mmc = __get_new_mm_context(next_mm, smp_processor_id());
  48        next_mm->context[smp_processor_id()] = mmc;
  49
  50        pcb = &current_thread_info()->pcb;
  51        pcb->asn = mmc & HARDWARE_ASN_MASK;
  52        pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
  53
  54        __reload_thread(pcb);
  55}
  56
  57
  58/*
  59 * This routine handles page faults.  It determines the address,
  60 * and the problem, and then passes it off to handle_mm_fault().
  61 *
  62 * mmcsr:
  63 *      0 = translation not valid
  64 *      1 = access violation
  65 *      2 = fault-on-read
  66 *      3 = fault-on-execute
  67 *      4 = fault-on-write
  68 *
  69 * cause:
  70 *      -1 = instruction fetch
  71 *      0 = load
  72 *      1 = store
  73 *
  74 * Registers $9 through $15 are saved in a block just prior to `regs' and
  75 * are saved and restored around the call to allow exception code to
  76 * modify them.
  77 */
  78
  79/* Macro for exception fixup code to access integer registers.  */
  80#define dpf_reg(r)                                                      \
  81        (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :  \
  82                                 (r) <= 18 ? (r)+10 : (r)-10])
  83
  84asmlinkage void
  85do_page_fault(unsigned long address, unsigned long mmcsr,
  86              long cause, struct pt_regs *regs)
  87{
  88        struct vm_area_struct * vma;
  89        struct mm_struct *mm = current->mm;
  90        const struct exception_table_entry *fixup;
  91        int si_code = SEGV_MAPERR;
  92        vm_fault_t fault;
  93        unsigned int flags = FAULT_FLAG_DEFAULT;
  94
  95        /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
  96           (or is suppressed by the PALcode).  Support that for older CPUs
  97           by ignoring such an instruction.  */
  98        if (cause == 0) {
  99                unsigned int insn;
 100                __get_user(insn, (unsigned int __user *)regs->pc);
 101                if ((insn >> 21 & 0x1f) == 0x1f &&
 102                    /* ldq ldl ldt lds ldg ldf ldwu ldbu */
 103                    (1ul << (insn >> 26) & 0x30f00001400ul)) {
 104                        regs->pc += 4;
 105                        return;
 106                }
 107        }
 108
 109        /* If we're in an interrupt context, or have no user context,
 110           we must not take the fault.  */
 111        if (!mm || faulthandler_disabled())
 112                goto no_context;
 113
 114#ifdef CONFIG_ALPHA_LARGE_VMALLOC
 115        if (address >= TASK_SIZE)
 116                goto vmalloc_fault;
 117#endif
 118        if (user_mode(regs))
 119                flags |= FAULT_FLAG_USER;
 120        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 121retry:
 122        mmap_read_lock(mm);
 123        vma = find_vma(mm, address);
 124        if (!vma)
 125                goto bad_area;
 126        if (vma->vm_start <= address)
 127                goto good_area;
 128        if (!(vma->vm_flags & VM_GROWSDOWN))
 129                goto bad_area;
 130        if (expand_stack(vma, address))
 131                goto bad_area;
 132
 133        /* Ok, we have a good vm_area for this memory access, so
 134           we can handle it.  */
 135 good_area:
 136        si_code = SEGV_ACCERR;
 137        if (cause < 0) {
 138                if (!(vma->vm_flags & VM_EXEC))
 139                        goto bad_area;
 140        } else if (!cause) {
 141                /* Allow reads even for write-only mappings */
 142                if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
 143                        goto bad_area;
 144        } else {
 145                if (!(vma->vm_flags & VM_WRITE))
 146                        goto bad_area;
 147                flags |= FAULT_FLAG_WRITE;
 148        }
 149
 150        /* If for any reason at all we couldn't handle the fault,
 151           make sure we exit gracefully rather than endlessly redo
 152           the fault.  */
 153        fault = handle_mm_fault(vma, address, flags, regs);
 154
 155        if (fault_signal_pending(fault, regs))
 156                return;
 157
 158        if (unlikely(fault & VM_FAULT_ERROR)) {
 159                if (fault & VM_FAULT_OOM)
 160                        goto out_of_memory;
 161                else if (fault & VM_FAULT_SIGSEGV)
 162                        goto bad_area;
 163                else if (fault & VM_FAULT_SIGBUS)
 164                        goto do_sigbus;
 165                BUG();
 166        }
 167
 168        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 169                if (fault & VM_FAULT_RETRY) {
 170                        flags |= FAULT_FLAG_TRIED;
 171
 172                         /* No need to mmap_read_unlock(mm) as we would
 173                         * have already released it in __lock_page_or_retry
 174                         * in mm/filemap.c.
 175                         */
 176
 177                        goto retry;
 178                }
 179        }
 180
 181        mmap_read_unlock(mm);
 182
 183        return;
 184
 185        /* Something tried to access memory that isn't in our memory map.
 186           Fix it, but check if it's kernel or user first.  */
 187 bad_area:
 188        mmap_read_unlock(mm);
 189
 190        if (user_mode(regs))
 191                goto do_sigsegv;
 192
 193 no_context:
 194        /* Are we prepared to handle this fault as an exception?  */
 195        if ((fixup = search_exception_tables(regs->pc)) != 0) {
 196                unsigned long newpc;
 197                newpc = fixup_exception(dpf_reg, fixup, regs->pc);
 198                regs->pc = newpc;
 199                return;
 200        }
 201
 202        /* Oops. The kernel tried to access some bad page. We'll have to
 203           terminate things with extreme prejudice.  */
 204        printk(KERN_ALERT "Unable to handle kernel paging request at "
 205               "virtual address %016lx\n", address);
 206        die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
 207        do_exit(SIGKILL);
 208
 209        /* We ran out of memory, or some other thing happened to us that
 210           made us unable to handle the page fault gracefully.  */
 211 out_of_memory:
 212        mmap_read_unlock(mm);
 213        if (!user_mode(regs))
 214                goto no_context;
 215        pagefault_out_of_memory();
 216        return;
 217
 218 do_sigbus:
 219        mmap_read_unlock(mm);
 220        /* Send a sigbus, regardless of whether we were in kernel
 221           or user mode.  */
 222        force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address);
 223        if (!user_mode(regs))
 224                goto no_context;
 225        return;
 226
 227 do_sigsegv:
 228        force_sig_fault(SIGSEGV, si_code, (void __user *) address);
 229        return;
 230
 231#ifdef CONFIG_ALPHA_LARGE_VMALLOC
 232 vmalloc_fault:
 233        if (user_mode(regs))
 234                goto do_sigsegv;
 235        else {
 236                /* Synchronize this task's top level page-table
 237                   with the "reference" page table from init.  */
 238                long index = pgd_index(address);
 239                pgd_t *pgd, *pgd_k;
 240
 241                pgd = current->active_mm->pgd + index;
 242                pgd_k = swapper_pg_dir + index;
 243                if (!pgd_present(*pgd) && pgd_present(*pgd_k)) {
 244                        pgd_val(*pgd) = pgd_val(*pgd_k);
 245                        return;
 246                }
 247                goto no_context;
 248        }
 249#endif
 250}
 251