linux/arch/sparc/mm/fault_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
   4 *
   5 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
   6 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
   7 */
   8
   9#include <asm/head.h>
  10
  11#include <linux/string.h>
  12#include <linux/types.h>
  13#include <linux/sched.h>
  14#include <linux/sched/debug.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/signal.h>
  18#include <linux/mm.h>
  19#include <linux/extable.h>
  20#include <linux/init.h>
  21#include <linux/perf_event.h>
  22#include <linux/interrupt.h>
  23#include <linux/kprobes.h>
  24#include <linux/kdebug.h>
  25#include <linux/percpu.h>
  26#include <linux/context_tracking.h>
  27#include <linux/uaccess.h>
  28
  29#include <asm/page.h>
  30#include <asm/pgtable.h>
  31#include <asm/openprom.h>
  32#include <asm/oplib.h>
  33#include <asm/asi.h>
  34#include <asm/lsu.h>
  35#include <asm/sections.h>
  36#include <asm/mmu_context.h>
  37#include <asm/setup.h>
  38
  39int show_unhandled_signals = 1;
  40
  41static inline __kprobes int notify_page_fault(struct pt_regs *regs)
  42{
  43        int ret = 0;
  44
  45        /* kprobe_running() needs smp_processor_id() */
  46        if (kprobes_built_in() && !user_mode(regs)) {
  47                preempt_disable();
  48                if (kprobe_running() && kprobe_fault_handler(regs, 0))
  49                        ret = 1;
  50                preempt_enable();
  51        }
  52        return ret;
  53}
  54
  55static void __kprobes unhandled_fault(unsigned long address,
  56                                      struct task_struct *tsk,
  57                                      struct pt_regs *regs)
  58{
  59        if ((unsigned long) address < PAGE_SIZE) {
  60                printk(KERN_ALERT "Unable to handle kernel NULL "
  61                       "pointer dereference\n");
  62        } else {
  63                printk(KERN_ALERT "Unable to handle kernel paging request "
  64                       "at virtual address %016lx\n", (unsigned long)address);
  65        }
  66        printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
  67               (tsk->mm ?
  68                CTX_HWBITS(tsk->mm->context) :
  69                CTX_HWBITS(tsk->active_mm->context)));
  70        printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
  71               (tsk->mm ? (unsigned long) tsk->mm->pgd :
  72                          (unsigned long) tsk->active_mm->pgd));
  73        die_if_kernel("Oops", regs);
  74}
  75
  76static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
  77{
  78        printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
  79               regs->tpc);
  80        printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
  81        printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
  82        printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
  83        dump_stack();
  84        unhandled_fault(regs->tpc, current, regs);
  85}
  86
  87/*
  88 * We now make sure that mmap_sem is held in all paths that call 
  89 * this. Additionally, to prevent kswapd from ripping ptes from
  90 * under us, raise interrupts around the time that we look at the
  91 * pte, kswapd will have to wait to get his smp ipi response from
  92 * us. vmtruncate likewise. This saves us having to get pte lock.
  93 */
  94static unsigned int get_user_insn(unsigned long tpc)
  95{
  96        pgd_t *pgdp = pgd_offset(current->mm, tpc);
  97        pud_t *pudp;
  98        pmd_t *pmdp;
  99        pte_t *ptep, pte;
 100        unsigned long pa;
 101        u32 insn = 0;
 102
 103        if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
 104                goto out;
 105        pudp = pud_offset(pgdp, tpc);
 106        if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
 107                goto out;
 108
 109        /* This disables preemption for us as well. */
 110        local_irq_disable();
 111
 112        pmdp = pmd_offset(pudp, tpc);
 113        if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
 114                goto out_irq_enable;
 115
 116#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 117        if (is_hugetlb_pmd(*pmdp)) {
 118                pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
 119                pa += tpc & ~HPAGE_MASK;
 120
 121                /* Use phys bypass so we don't pollute dtlb/dcache. */
 122                __asm__ __volatile__("lduwa [%1] %2, %0"
 123                                     : "=r" (insn)
 124                                     : "r" (pa), "i" (ASI_PHYS_USE_EC));
 125        } else
 126#endif
 127        {
 128                ptep = pte_offset_map(pmdp, tpc);
 129                pte = *ptep;
 130                if (pte_present(pte)) {
 131                        pa  = (pte_pfn(pte) << PAGE_SHIFT);
 132                        pa += (tpc & ~PAGE_MASK);
 133
 134                        /* Use phys bypass so we don't pollute dtlb/dcache. */
 135                        __asm__ __volatile__("lduwa [%1] %2, %0"
 136                                             : "=r" (insn)
 137                                             : "r" (pa), "i" (ASI_PHYS_USE_EC));
 138                }
 139                pte_unmap(ptep);
 140        }
 141out_irq_enable:
 142        local_irq_enable();
 143out:
 144        return insn;
 145}
 146
 147static inline void
 148show_signal_msg(struct pt_regs *regs, int sig, int code,
 149                unsigned long address, struct task_struct *tsk)
 150{
 151        if (!unhandled_signal(tsk, sig))
 152                return;
 153
 154        if (!printk_ratelimit())
 155                return;
 156
 157        printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
 158               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 159               tsk->comm, task_pid_nr(tsk), address,
 160               (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
 161               (void *)regs->u_regs[UREG_FP], code);
 162
 163        print_vma_addr(KERN_CONT " in ", regs->tpc);
 164
 165        printk(KERN_CONT "\n");
 166}
 167
 168static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 169                             unsigned long fault_addr, unsigned int insn,
 170                             int fault_code)
 171{
 172        unsigned long addr;
 173        siginfo_t info;
 174
 175        info.si_code = code;
 176        info.si_signo = sig;
 177        info.si_errno = 0;
 178        if (fault_code & FAULT_CODE_ITLB) {
 179                addr = regs->tpc;
 180        } else {
 181                /* If we were able to probe the faulting instruction, use it
 182                 * to compute a precise fault address.  Otherwise use the fault
 183                 * time provided address which may only have page granularity.
 184                 */
 185                if (insn)
 186                        addr = compute_effective_address(regs, insn, 0);
 187                else
 188                        addr = fault_addr;
 189        }
 190        info.si_addr = (void __user *) addr;
 191        info.si_trapno = 0;
 192
 193        if (unlikely(show_unhandled_signals))
 194                show_signal_msg(regs, sig, code, addr, current);
 195
 196        force_sig_info(sig, &info, current);
 197}
 198
 199static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
 200{
 201        if (!insn) {
 202                if (!regs->tpc || (regs->tpc & 0x3))
 203                        return 0;
 204                if (regs->tstate & TSTATE_PRIV) {
 205                        insn = *(unsigned int *) regs->tpc;
 206                } else {
 207                        insn = get_user_insn(regs->tpc);
 208                }
 209        }
 210        return insn;
 211}
 212
 213static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
 214                                      int fault_code, unsigned int insn,
 215                                      unsigned long address)
 216{
 217        unsigned char asi = ASI_P;
 218 
 219        if ((!insn) && (regs->tstate & TSTATE_PRIV))
 220                goto cannot_handle;
 221
 222        /* If user insn could be read (thus insn is zero), that
 223         * is fine.  We will just gun down the process with a signal
 224         * in that case.
 225         */
 226
 227        if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
 228            (insn & 0xc0800000) == 0xc0800000) {
 229                if (insn & 0x2000)
 230                        asi = (regs->tstate >> 24);
 231                else
 232                        asi = (insn >> 5);
 233                if ((asi & 0xf2) == 0x82) {
 234                        if (insn & 0x1000000) {
 235                                handle_ldf_stq(insn, regs);
 236                        } else {
 237                                /* This was a non-faulting load. Just clear the
 238                                 * destination register(s) and continue with the next
 239                                 * instruction. -jj
 240                                 */
 241                                handle_ld_nf(insn, regs);
 242                        }
 243                        return;
 244                }
 245        }
 246                
 247        /* Is this in ex_table? */
 248        if (regs->tstate & TSTATE_PRIV) {
 249                const struct exception_table_entry *entry;
 250
 251                entry = search_exception_tables(regs->tpc);
 252                if (entry) {
 253                        regs->tpc = entry->fixup;
 254                        regs->tnpc = regs->tpc + 4;
 255                        return;
 256                }
 257        } else {
 258                /* The si_code was set to make clear whether
 259                 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
 260                 */
 261                do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
 262                return;
 263        }
 264
 265cannot_handle:
 266        unhandled_fault (address, current, regs);
 267}
 268
 269static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
 270{
 271        static int times;
 272
 273        if (times++ < 10)
 274                printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
 275                       "64-bit TPC [%lx]\n",
 276                       current->comm, current->pid,
 277                       regs->tpc);
 278        show_regs(regs);
 279}
 280
 281asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 282{
 283        enum ctx_state prev_state = exception_enter();
 284        struct mm_struct *mm = current->mm;
 285        struct vm_area_struct *vma;
 286        unsigned int insn = 0;
 287        int si_code, fault_code, fault;
 288        unsigned long address, mm_rss;
 289        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 290
 291        fault_code = get_thread_fault_code();
 292
 293        if (notify_page_fault(regs))
 294                goto exit_exception;
 295
 296        si_code = SEGV_MAPERR;
 297        address = current_thread_info()->fault_address;
 298
 299        if ((fault_code & FAULT_CODE_ITLB) &&
 300            (fault_code & FAULT_CODE_DTLB))
 301                BUG();
 302
 303        if (test_thread_flag(TIF_32BIT)) {
 304                if (!(regs->tstate & TSTATE_PRIV)) {
 305                        if (unlikely((regs->tpc >> 32) != 0)) {
 306                                bogus_32bit_fault_tpc(regs);
 307                                goto intr_or_no_mm;
 308                        }
 309                }
 310                if (unlikely((address >> 32) != 0))
 311                        goto intr_or_no_mm;
 312        }
 313
 314        if (regs->tstate & TSTATE_PRIV) {
 315                unsigned long tpc = regs->tpc;
 316
 317                /* Sanity check the PC. */
 318                if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
 319                    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
 320                        /* Valid, no problems... */
 321                } else {
 322                        bad_kernel_pc(regs, address);
 323                        goto exit_exception;
 324                }
 325        } else
 326                flags |= FAULT_FLAG_USER;
 327
 328        /*
 329         * If we're in an interrupt or have no user
 330         * context, we must not take the fault..
 331         */
 332        if (faulthandler_disabled() || !mm)
 333                goto intr_or_no_mm;
 334
 335        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 336
 337        if (!down_read_trylock(&mm->mmap_sem)) {
 338                if ((regs->tstate & TSTATE_PRIV) &&
 339                    !search_exception_tables(regs->tpc)) {
 340                        insn = get_fault_insn(regs, insn);
 341                        goto handle_kernel_fault;
 342                }
 343
 344retry:
 345                down_read(&mm->mmap_sem);
 346        }
 347
 348        if (fault_code & FAULT_CODE_BAD_RA)
 349                goto do_sigbus;
 350
 351        vma = find_vma(mm, address);
 352        if (!vma)
 353                goto bad_area;
 354
 355        /* Pure DTLB misses do not tell us whether the fault causing
 356         * load/store/atomic was a write or not, it only says that there
 357         * was no match.  So in such a case we (carefully) read the
 358         * instruction to try and figure this out.  It's an optimization
 359         * so it's ok if we can't do this.
 360         *
 361         * Special hack, window spill/fill knows the exact fault type.
 362         */
 363        if (((fault_code &
 364              (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
 365            (vma->vm_flags & VM_WRITE) != 0) {
 366                insn = get_fault_insn(regs, 0);
 367                if (!insn)
 368                        goto continue_fault;
 369                /* All loads, stores and atomics have bits 30 and 31 both set
 370                 * in the instruction.  Bit 21 is set in all stores, but we
 371                 * have to avoid prefetches which also have bit 21 set.
 372                 */
 373                if ((insn & 0xc0200000) == 0xc0200000 &&
 374                    (insn & 0x01780000) != 0x01680000) {
 375                        /* Don't bother updating thread struct value,
 376                         * because update_mmu_cache only cares which tlb
 377                         * the access came from.
 378                         */
 379                        fault_code |= FAULT_CODE_WRITE;
 380                }
 381        }
 382continue_fault:
 383
 384        if (vma->vm_start <= address)
 385                goto good_area;
 386        if (!(vma->vm_flags & VM_GROWSDOWN))
 387                goto bad_area;
 388        if (!(fault_code & FAULT_CODE_WRITE)) {
 389                /* Non-faulting loads shouldn't expand stack. */
 390                insn = get_fault_insn(regs, insn);
 391                if ((insn & 0xc0800000) == 0xc0800000) {
 392                        unsigned char asi;
 393
 394                        if (insn & 0x2000)
 395                                asi = (regs->tstate >> 24);
 396                        else
 397                                asi = (insn >> 5);
 398                        if ((asi & 0xf2) == 0x82)
 399                                goto bad_area;
 400                }
 401        }
 402        if (expand_stack(vma, address))
 403                goto bad_area;
 404        /*
 405         * Ok, we have a good vm_area for this memory access, so
 406         * we can handle it..
 407         */
 408good_area:
 409        si_code = SEGV_ACCERR;
 410
 411        /* If we took a ITLB miss on a non-executable page, catch
 412         * that here.
 413         */
 414        if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
 415                WARN(address != regs->tpc,
 416                     "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc);
 417                WARN_ON(regs->tstate & TSTATE_PRIV);
 418                goto bad_area;
 419        }
 420
 421        if (fault_code & FAULT_CODE_WRITE) {
 422                if (!(vma->vm_flags & VM_WRITE))
 423                        goto bad_area;
 424
 425                /* Spitfire has an icache which does not snoop
 426                 * processor stores.  Later processors do...
 427                 */
 428                if (tlb_type == spitfire &&
 429                    (vma->vm_flags & VM_EXEC) != 0 &&
 430                    vma->vm_file != NULL)
 431                        set_thread_fault_code(fault_code |
 432                                              FAULT_CODE_BLKCOMMIT);
 433
 434                flags |= FAULT_FLAG_WRITE;
 435        } else {
 436                /* Allow reads even for write-only mappings */
 437                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 438                        goto bad_area;
 439        }
 440
 441        fault = handle_mm_fault(vma, address, flags);
 442
 443        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 444                goto exit_exception;
 445
 446        if (unlikely(fault & VM_FAULT_ERROR)) {
 447                if (fault & VM_FAULT_OOM)
 448                        goto out_of_memory;
 449                else if (fault & VM_FAULT_SIGSEGV)
 450                        goto bad_area;
 451                else if (fault & VM_FAULT_SIGBUS)
 452                        goto do_sigbus;
 453                BUG();
 454        }
 455
 456        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 457                if (fault & VM_FAULT_MAJOR) {
 458                        current->maj_flt++;
 459                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
 460                                      1, regs, address);
 461                } else {
 462                        current->min_flt++;
 463                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
 464                                      1, regs, address);
 465                }
 466                if (fault & VM_FAULT_RETRY) {
 467                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 468                        flags |= FAULT_FLAG_TRIED;
 469
 470                        /* No need to up_read(&mm->mmap_sem) as we would
 471                         * have already released it in __lock_page_or_retry
 472                         * in mm/filemap.c.
 473                         */
 474
 475                        goto retry;
 476                }
 477        }
 478        up_read(&mm->mmap_sem);
 479
 480        mm_rss = get_mm_rss(mm);
 481#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
 482        mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
 483#endif
 484        if (unlikely(mm_rss >
 485                     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
 486                tsb_grow(mm, MM_TSB_BASE, mm_rss);
 487#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 488        mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
 489        mm_rss *= REAL_HPAGE_PER_HPAGE;
 490        if (unlikely(mm_rss >
 491                     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
 492                if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
 493                        tsb_grow(mm, MM_TSB_HUGE, mm_rss);
 494                else
 495                        hugetlb_setup(regs);
 496
 497        }
 498#endif
 499exit_exception:
 500        exception_exit(prev_state);
 501        return;
 502
 503        /*
 504         * Something tried to access memory that isn't in our memory map..
 505         * Fix it, but check if it's kernel or user first..
 506         */
 507bad_area:
 508        insn = get_fault_insn(regs, insn);
 509        up_read(&mm->mmap_sem);
 510
 511handle_kernel_fault:
 512        do_kernel_fault(regs, si_code, fault_code, insn, address);
 513        goto exit_exception;
 514
 515/*
 516 * We ran out of memory, or some other thing happened to us that made
 517 * us unable to handle the page fault gracefully.
 518 */
 519out_of_memory:
 520        insn = get_fault_insn(regs, insn);
 521        up_read(&mm->mmap_sem);
 522        if (!(regs->tstate & TSTATE_PRIV)) {
 523                pagefault_out_of_memory();
 524                goto exit_exception;
 525        }
 526        goto handle_kernel_fault;
 527
 528intr_or_no_mm:
 529        insn = get_fault_insn(regs, 0);
 530        goto handle_kernel_fault;
 531
 532do_sigbus:
 533        insn = get_fault_insn(regs, insn);
 534        up_read(&mm->mmap_sem);
 535
 536        /*
 537         * Send a sigbus, regardless of whether we were in kernel
 538         * or user mode.
 539         */
 540        do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
 541
 542        /* Kernel mode? Handle exceptions or die */
 543        if (regs->tstate & TSTATE_PRIV)
 544                goto handle_kernel_fault;
 545}
 546