linux/arch/sparc/mm/fault_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fault.c:  Page fault handlers for the Sparc.
   4 *
   5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
   7 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   8 */
   9
  10#include <asm/head.h>
  11
  12#include <linux/string.h>
  13#include <linux/types.h>
  14#include <linux/sched.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/threads.h>
  18#include <linux/kernel.h>
  19#include <linux/signal.h>
  20#include <linux/mm.h>
  21#include <linux/smp.h>
  22#include <linux/perf_event.h>
  23#include <linux/interrupt.h>
  24#include <linux/kdebug.h>
  25#include <linux/uaccess.h>
  26
  27#include <asm/page.h>
  28#include <asm/openprom.h>
  29#include <asm/oplib.h>
  30#include <asm/setup.h>
  31#include <asm/smp.h>
  32#include <asm/traps.h>
  33
  34#include "mm_32.h"
  35
  36int show_unhandled_signals = 1;
  37
  38static void __noreturn unhandled_fault(unsigned long address,
  39                                       struct task_struct *tsk,
  40                                       struct pt_regs *regs)
  41{
  42        if ((unsigned long) address < PAGE_SIZE) {
  43                printk(KERN_ALERT
  44                    "Unable to handle kernel NULL pointer dereference\n");
  45        } else {
  46                printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
  47                       address);
  48        }
  49        printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
  50                (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
  51        printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
  52                (tsk->mm ? (unsigned long) tsk->mm->pgd :
  53                        (unsigned long) tsk->active_mm->pgd));
  54        die_if_kernel("Oops", regs);
  55}
  56
  57asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
  58                            unsigned long address)
  59{
  60        struct pt_regs regs;
  61        unsigned long g2;
  62        unsigned int insn;
  63        int i;
  64
  65        i = search_extables_range(ret_pc, &g2);
  66        switch (i) {
  67        case 3:
  68                /* load & store will be handled by fixup */
  69                return 3;
  70
  71        case 1:
  72                /* store will be handled by fixup, load will bump out */
  73                /* for _to_ macros */
  74                insn = *((unsigned int *) pc);
  75                if ((insn >> 21) & 1)
  76                        return 1;
  77                break;
  78
  79        case 2:
  80                /* load will be handled by fixup, store will bump out */
  81                /* for _from_ macros */
  82                insn = *((unsigned int *) pc);
  83                if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
  84                        return 2;
  85                break;
  86
  87        default:
  88                break;
  89        }
  90
  91        memset(&regs, 0, sizeof(regs));
  92        regs.pc = pc;
  93        regs.npc = pc + 4;
  94        __asm__ __volatile__(
  95                "rd %%psr, %0\n\t"
  96                "nop\n\t"
  97                "nop\n\t"
  98                "nop\n" : "=r" (regs.psr));
  99        unhandled_fault(address, current, &regs);
 100
 101        /* Not reached */
 102        return 0;
 103}
 104
 105static inline void
 106show_signal_msg(struct pt_regs *regs, int sig, int code,
 107                unsigned long address, struct task_struct *tsk)
 108{
 109        if (!unhandled_signal(tsk, sig))
 110                return;
 111
 112        if (!printk_ratelimit())
 113                return;
 114
 115        printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
 116               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 117               tsk->comm, task_pid_nr(tsk), address,
 118               (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
 119               (void *)regs->u_regs[UREG_FP], code);
 120
 121        print_vma_addr(KERN_CONT " in ", regs->pc);
 122
 123        printk(KERN_CONT "\n");
 124}
 125
 126static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 127                               unsigned long addr)
 128{
 129        if (unlikely(show_unhandled_signals))
 130                show_signal_msg(regs, sig, code,
 131                                addr, current);
 132
 133        force_sig_fault(sig, code, (void __user *) addr, 0);
 134}
 135
 136static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
 137{
 138        unsigned int insn;
 139
 140        if (text_fault)
 141                return regs->pc;
 142
 143        if (regs->psr & PSR_PS)
 144                insn = *(unsigned int *) regs->pc;
 145        else
 146                __get_user(insn, (unsigned int *) regs->pc);
 147
 148        return safe_compute_effective_address(regs, insn);
 149}
 150
 151static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 152                                      int text_fault)
 153{
 154        unsigned long addr = compute_si_addr(regs, text_fault);
 155
 156        __do_fault_siginfo(code, sig, regs, addr);
 157}
 158
 159asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
 160                               unsigned long address)
 161{
 162        struct vm_area_struct *vma;
 163        struct task_struct *tsk = current;
 164        struct mm_struct *mm = tsk->mm;
 165        unsigned int fixup;
 166        unsigned long g2;
 167        int from_user = !(regs->psr & PSR_PS);
 168        int code;
 169        vm_fault_t fault;
 170        unsigned int flags = FAULT_FLAG_DEFAULT;
 171
 172        if (text_fault)
 173                address = regs->pc;
 174
 175        /*
 176         * We fault-in kernel-space virtual memory on-demand. The
 177         * 'reference' page table is init_mm.pgd.
 178         *
 179         * NOTE! We MUST NOT take any locks for this case. We may
 180         * be in an interrupt or a critical region, and should
 181         * only copy the information from the master page table,
 182         * nothing more.
 183         */
 184        code = SEGV_MAPERR;
 185        if (address >= TASK_SIZE)
 186                goto vmalloc_fault;
 187
 188        /*
 189         * If we're in an interrupt or have no user
 190         * context, we must not take the fault..
 191         */
 192        if (pagefault_disabled() || !mm)
 193                goto no_context;
 194
 195        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 196
 197retry:
 198        mmap_read_lock(mm);
 199
 200        if (!from_user && address >= PAGE_OFFSET)
 201                goto bad_area;
 202
 203        vma = find_vma(mm, address);
 204        if (!vma)
 205                goto bad_area;
 206        if (vma->vm_start <= address)
 207                goto good_area;
 208        if (!(vma->vm_flags & VM_GROWSDOWN))
 209                goto bad_area;
 210        if (expand_stack(vma, address))
 211                goto bad_area;
 212        /*
 213         * Ok, we have a good vm_area for this memory access, so
 214         * we can handle it..
 215         */
 216good_area:
 217        code = SEGV_ACCERR;
 218        if (write) {
 219                if (!(vma->vm_flags & VM_WRITE))
 220                        goto bad_area;
 221        } else {
 222                /* Allow reads even for write-only mappings */
 223                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 224                        goto bad_area;
 225        }
 226
 227        if (from_user)
 228                flags |= FAULT_FLAG_USER;
 229        if (write)
 230                flags |= FAULT_FLAG_WRITE;
 231
 232        /*
 233         * If for any reason at all we couldn't handle the fault,
 234         * make sure we exit gracefully rather than endlessly redo
 235         * the fault.
 236         */
 237        fault = handle_mm_fault(vma, address, flags, regs);
 238
 239        if (fault_signal_pending(fault, regs))
 240                return;
 241
 242        if (unlikely(fault & VM_FAULT_ERROR)) {
 243                if (fault & VM_FAULT_OOM)
 244                        goto out_of_memory;
 245                else if (fault & VM_FAULT_SIGSEGV)
 246                        goto bad_area;
 247                else if (fault & VM_FAULT_SIGBUS)
 248                        goto do_sigbus;
 249                BUG();
 250        }
 251
 252        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 253                if (fault & VM_FAULT_RETRY) {
 254                        flags |= FAULT_FLAG_TRIED;
 255
 256                        /* No need to mmap_read_unlock(mm) as we would
 257                         * have already released it in __lock_page_or_retry
 258                         * in mm/filemap.c.
 259                         */
 260
 261                        goto retry;
 262                }
 263        }
 264
 265        mmap_read_unlock(mm);
 266        return;
 267
 268        /*
 269         * Something tried to access memory that isn't in our memory map..
 270         * Fix it, but check if it's kernel or user first..
 271         */
 272bad_area:
 273        mmap_read_unlock(mm);
 274
 275bad_area_nosemaphore:
 276        /* User mode accesses just cause a SIGSEGV */
 277        if (from_user) {
 278                do_fault_siginfo(code, SIGSEGV, regs, text_fault);
 279                return;
 280        }
 281
 282        /* Is this in ex_table? */
 283no_context:
 284        g2 = regs->u_regs[UREG_G2];
 285        if (!from_user) {
 286                fixup = search_extables_range(regs->pc, &g2);
 287                /* Values below 10 are reserved for other things */
 288                if (fixup > 10) {
 289                        extern const unsigned int __memset_start[];
 290                        extern const unsigned int __memset_end[];
 291
 292#ifdef DEBUG_EXCEPTIONS
 293                        printk("Exception: PC<%08lx> faddr<%08lx>\n",
 294                               regs->pc, address);
 295                        printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
 296                                regs->pc, fixup, g2);
 297#endif
 298                        if ((regs->pc >= (unsigned long)__memset_start &&
 299                             regs->pc < (unsigned long)__memset_end)) {
 300                                regs->u_regs[UREG_I4] = address;
 301                                regs->u_regs[UREG_I5] = regs->pc;
 302                        }
 303                        regs->u_regs[UREG_G2] = g2;
 304                        regs->pc = fixup;
 305                        regs->npc = regs->pc + 4;
 306                        return;
 307                }
 308        }
 309
 310        unhandled_fault(address, tsk, regs);
 311        do_exit(SIGKILL);
 312
 313/*
 314 * We ran out of memory, or some other thing happened to us that made
 315 * us unable to handle the page fault gracefully.
 316 */
 317out_of_memory:
 318        mmap_read_unlock(mm);
 319        if (from_user) {
 320                pagefault_out_of_memory();
 321                return;
 322        }
 323        goto no_context;
 324
 325do_sigbus:
 326        mmap_read_unlock(mm);
 327        do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
 328        if (!from_user)
 329                goto no_context;
 330
 331vmalloc_fault:
 332        {
 333                /*
 334                 * Synchronize this task's top level page-table
 335                 * with the 'reference' page table.
 336                 */
 337                int offset = pgd_index(address);
 338                pgd_t *pgd, *pgd_k;
 339                p4d_t *p4d, *p4d_k;
 340                pud_t *pud, *pud_k;
 341                pmd_t *pmd, *pmd_k;
 342
 343                pgd = tsk->active_mm->pgd + offset;
 344                pgd_k = init_mm.pgd + offset;
 345
 346                if (!pgd_present(*pgd)) {
 347                        if (!pgd_present(*pgd_k))
 348                                goto bad_area_nosemaphore;
 349                        pgd_val(*pgd) = pgd_val(*pgd_k);
 350                        return;
 351                }
 352
 353                p4d = p4d_offset(pgd, address);
 354                pud = pud_offset(p4d, address);
 355                pmd = pmd_offset(pud, address);
 356
 357                p4d_k = p4d_offset(pgd_k, address);
 358                pud_k = pud_offset(p4d_k, address);
 359                pmd_k = pmd_offset(pud_k, address);
 360
 361                if (pmd_present(*pmd) || !pmd_present(*pmd_k))
 362                        goto bad_area_nosemaphore;
 363
 364                *pmd = *pmd_k;
 365                return;
 366        }
 367}
 368
 369/* This always deals with user addresses. */
 370static void force_user_fault(unsigned long address, int write)
 371{
 372        struct vm_area_struct *vma;
 373        struct task_struct *tsk = current;
 374        struct mm_struct *mm = tsk->mm;
 375        unsigned int flags = FAULT_FLAG_USER;
 376        int code;
 377
 378        code = SEGV_MAPERR;
 379
 380        mmap_read_lock(mm);
 381        vma = find_vma(mm, address);
 382        if (!vma)
 383                goto bad_area;
 384        if (vma->vm_start <= address)
 385                goto good_area;
 386        if (!(vma->vm_flags & VM_GROWSDOWN))
 387                goto bad_area;
 388        if (expand_stack(vma, address))
 389                goto bad_area;
 390good_area:
 391        code = SEGV_ACCERR;
 392        if (write) {
 393                if (!(vma->vm_flags & VM_WRITE))
 394                        goto bad_area;
 395                flags |= FAULT_FLAG_WRITE;
 396        } else {
 397                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 398                        goto bad_area;
 399        }
 400        switch (handle_mm_fault(vma, address, flags, NULL)) {
 401        case VM_FAULT_SIGBUS:
 402        case VM_FAULT_OOM:
 403                goto do_sigbus;
 404        }
 405        mmap_read_unlock(mm);
 406        return;
 407bad_area:
 408        mmap_read_unlock(mm);
 409        __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
 410        return;
 411
 412do_sigbus:
 413        mmap_read_unlock(mm);
 414        __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
 415}
 416
 417static void check_stack_aligned(unsigned long sp)
 418{
 419        if (sp & 0x7UL)
 420                force_sig(SIGILL);
 421}
 422
 423void window_overflow_fault(void)
 424{
 425        unsigned long sp;
 426
 427        sp = current_thread_info()->rwbuf_stkptrs[0];
 428        if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 429                force_user_fault(sp + 0x38, 1);
 430        force_user_fault(sp, 1);
 431
 432        check_stack_aligned(sp);
 433}
 434
 435void window_underflow_fault(unsigned long sp)
 436{
 437        if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 438                force_user_fault(sp + 0x38, 0);
 439        force_user_fault(sp, 0);
 440
 441        check_stack_aligned(sp);
 442}
 443
 444void window_ret_fault(struct pt_regs *regs)
 445{
 446        unsigned long sp;
 447
 448        sp = regs->u_regs[UREG_FP];
 449        if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 450                force_user_fault(sp + 0x38, 0);
 451        force_user_fault(sp, 0);
 452
 453        check_stack_aligned(sp);
 454}
 455