linux/arch/sparc/mm/fault_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
   4 *
   5 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
   6 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
   7 */
   8
   9#include <asm/head.h>
  10
  11#include <linux/string.h>
  12#include <linux/types.h>
  13#include <linux/sched.h>
  14#include <linux/sched/debug.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/signal.h>
  18#include <linux/mm.h>
  19#include <linux/extable.h>
  20#include <linux/init.h>
  21#include <linux/perf_event.h>
  22#include <linux/interrupt.h>
  23#include <linux/kprobes.h>
  24#include <linux/kdebug.h>
  25#include <linux/percpu.h>
  26#include <linux/context_tracking.h>
  27#include <linux/uaccess.h>
  28
  29#include <asm/page.h>
  30#include <asm/pgtable.h>
  31#include <asm/openprom.h>
  32#include <asm/oplib.h>
  33#include <asm/asi.h>
  34#include <asm/lsu.h>
  35#include <asm/sections.h>
  36#include <asm/mmu_context.h>
  37#include <asm/setup.h>
  38
  39int show_unhandled_signals = 1;
  40
  41static inline __kprobes int notify_page_fault(struct pt_regs *regs)
  42{
  43        int ret = 0;
  44
  45        /* kprobe_running() needs smp_processor_id() */
  46        if (kprobes_built_in() && !user_mode(regs)) {
  47                preempt_disable();
  48                if (kprobe_running() && kprobe_fault_handler(regs, 0))
  49                        ret = 1;
  50                preempt_enable();
  51        }
  52        return ret;
  53}
  54
  55static void __kprobes unhandled_fault(unsigned long address,
  56                                      struct task_struct *tsk,
  57                                      struct pt_regs *regs)
  58{
  59        if ((unsigned long) address < PAGE_SIZE) {
  60                printk(KERN_ALERT "Unable to handle kernel NULL "
  61                       "pointer dereference\n");
  62        } else {
  63                printk(KERN_ALERT "Unable to handle kernel paging request "
  64                       "at virtual address %016lx\n", (unsigned long)address);
  65        }
  66        printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
  67               (tsk->mm ?
  68                CTX_HWBITS(tsk->mm->context) :
  69                CTX_HWBITS(tsk->active_mm->context)));
  70        printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
  71               (tsk->mm ? (unsigned long) tsk->mm->pgd :
  72                          (unsigned long) tsk->active_mm->pgd));
  73        die_if_kernel("Oops", regs);
  74}
  75
  76static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
  77{
  78        printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
  79               regs->tpc);
  80        printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
  81        printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
  82        printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
  83        dump_stack();
  84        unhandled_fault(regs->tpc, current, regs);
  85}
  86
  87/*
  88 * We now make sure that mmap_sem is held in all paths that call 
  89 * this. Additionally, to prevent kswapd from ripping ptes from
  90 * under us, raise interrupts around the time that we look at the
  91 * pte, kswapd will have to wait to get his smp ipi response from
  92 * us. vmtruncate likewise. This saves us having to get pte lock.
  93 */
  94static unsigned int get_user_insn(unsigned long tpc)
  95{
  96        pgd_t *pgdp = pgd_offset(current->mm, tpc);
  97        pud_t *pudp;
  98        pmd_t *pmdp;
  99        pte_t *ptep, pte;
 100        unsigned long pa;
 101        u32 insn = 0;
 102
 103        if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
 104                goto out;
 105        pudp = pud_offset(pgdp, tpc);
 106        if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
 107                goto out;
 108
 109        /* This disables preemption for us as well. */
 110        local_irq_disable();
 111
 112        pmdp = pmd_offset(pudp, tpc);
 113        if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
 114                goto out_irq_enable;
 115
 116#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 117        if (is_hugetlb_pmd(*pmdp)) {
 118                pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
 119                pa += tpc & ~HPAGE_MASK;
 120
 121                /* Use phys bypass so we don't pollute dtlb/dcache. */
 122                __asm__ __volatile__("lduwa [%1] %2, %0"
 123                                     : "=r" (insn)
 124                                     : "r" (pa), "i" (ASI_PHYS_USE_EC));
 125        } else
 126#endif
 127        {
 128                ptep = pte_offset_map(pmdp, tpc);
 129                pte = *ptep;
 130                if (pte_present(pte)) {
 131                        pa  = (pte_pfn(pte) << PAGE_SHIFT);
 132                        pa += (tpc & ~PAGE_MASK);
 133
 134                        /* Use phys bypass so we don't pollute dtlb/dcache. */
 135                        __asm__ __volatile__("lduwa [%1] %2, %0"
 136                                             : "=r" (insn)
 137                                             : "r" (pa), "i" (ASI_PHYS_USE_EC));
 138                }
 139                pte_unmap(ptep);
 140        }
 141out_irq_enable:
 142        local_irq_enable();
 143out:
 144        return insn;
 145}
 146
 147static inline void
 148show_signal_msg(struct pt_regs *regs, int sig, int code,
 149                unsigned long address, struct task_struct *tsk)
 150{
 151        if (!unhandled_signal(tsk, sig))
 152                return;
 153
 154        if (!printk_ratelimit())
 155                return;
 156
 157        printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
 158               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 159               tsk->comm, task_pid_nr(tsk), address,
 160               (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
 161               (void *)regs->u_regs[UREG_FP], code);
 162
 163        print_vma_addr(KERN_CONT " in ", regs->tpc);
 164
 165        printk(KERN_CONT "\n");
 166}
 167
 168static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 169                             unsigned long fault_addr, unsigned int insn,
 170                             int fault_code)
 171{
 172        unsigned long addr;
 173
 174        if (fault_code & FAULT_CODE_ITLB) {
 175                addr = regs->tpc;
 176        } else {
 177                /* If we were able to probe the faulting instruction, use it
 178                 * to compute a precise fault address.  Otherwise use the fault
 179                 * time provided address which may only have page granularity.
 180                 */
 181                if (insn)
 182                        addr = compute_effective_address(regs, insn, 0);
 183                else
 184                        addr = fault_addr;
 185        }
 186
 187        if (unlikely(show_unhandled_signals))
 188                show_signal_msg(regs, sig, code, addr, current);
 189
 190        force_sig_fault(sig, code, (void __user *) addr, 0, current);
 191}
 192
 193static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
 194{
 195        if (!insn) {
 196                if (!regs->tpc || (regs->tpc & 0x3))
 197                        return 0;
 198                if (regs->tstate & TSTATE_PRIV) {
 199                        insn = *(unsigned int *) regs->tpc;
 200                } else {
 201                        insn = get_user_insn(regs->tpc);
 202                }
 203        }
 204        return insn;
 205}
 206
 207static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
 208                                      int fault_code, unsigned int insn,
 209                                      unsigned long address)
 210{
 211        unsigned char asi = ASI_P;
 212 
 213        if ((!insn) && (regs->tstate & TSTATE_PRIV))
 214                goto cannot_handle;
 215
 216        /* If user insn could be read (thus insn is zero), that
 217         * is fine.  We will just gun down the process with a signal
 218         * in that case.
 219         */
 220
 221        if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
 222            (insn & 0xc0800000) == 0xc0800000) {
 223                if (insn & 0x2000)
 224                        asi = (regs->tstate >> 24);
 225                else
 226                        asi = (insn >> 5);
 227                if ((asi & 0xf2) == 0x82) {
 228                        if (insn & 0x1000000) {
 229                                handle_ldf_stq(insn, regs);
 230                        } else {
 231                                /* This was a non-faulting load. Just clear the
 232                                 * destination register(s) and continue with the next
 233                                 * instruction. -jj
 234                                 */
 235                                handle_ld_nf(insn, regs);
 236                        }
 237                        return;
 238                }
 239        }
 240                
 241        /* Is this in ex_table? */
 242        if (regs->tstate & TSTATE_PRIV) {
 243                const struct exception_table_entry *entry;
 244
 245                entry = search_exception_tables(regs->tpc);
 246                if (entry) {
 247                        regs->tpc = entry->fixup;
 248                        regs->tnpc = regs->tpc + 4;
 249                        return;
 250                }
 251        } else {
 252                /* The si_code was set to make clear whether
 253                 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
 254                 */
 255                do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
 256                return;
 257        }
 258
 259cannot_handle:
 260        unhandled_fault (address, current, regs);
 261}
 262
 263static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
 264{
 265        static int times;
 266
 267        if (times++ < 10)
 268                printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
 269                       "64-bit TPC [%lx]\n",
 270                       current->comm, current->pid,
 271                       regs->tpc);
 272        show_regs(regs);
 273}
 274
 275asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 276{
 277        enum ctx_state prev_state = exception_enter();
 278        struct mm_struct *mm = current->mm;
 279        struct vm_area_struct *vma;
 280        unsigned int insn = 0;
 281        int si_code, fault_code;
 282        vm_fault_t fault;
 283        unsigned long address, mm_rss;
 284        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 285
 286        fault_code = get_thread_fault_code();
 287
 288        if (notify_page_fault(regs))
 289                goto exit_exception;
 290
 291        si_code = SEGV_MAPERR;
 292        address = current_thread_info()->fault_address;
 293
 294        if ((fault_code & FAULT_CODE_ITLB) &&
 295            (fault_code & FAULT_CODE_DTLB))
 296                BUG();
 297
 298        if (test_thread_flag(TIF_32BIT)) {
 299                if (!(regs->tstate & TSTATE_PRIV)) {
 300                        if (unlikely((regs->tpc >> 32) != 0)) {
 301                                bogus_32bit_fault_tpc(regs);
 302                                goto intr_or_no_mm;
 303                        }
 304                }
 305                if (unlikely((address >> 32) != 0))
 306                        goto intr_or_no_mm;
 307        }
 308
 309        if (regs->tstate & TSTATE_PRIV) {
 310                unsigned long tpc = regs->tpc;
 311
 312                /* Sanity check the PC. */
 313                if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
 314                    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
 315                        /* Valid, no problems... */
 316                } else {
 317                        bad_kernel_pc(regs, address);
 318                        goto exit_exception;
 319                }
 320        } else
 321                flags |= FAULT_FLAG_USER;
 322
 323        /*
 324         * If we're in an interrupt or have no user
 325         * context, we must not take the fault..
 326         */
 327        if (faulthandler_disabled() || !mm)
 328                goto intr_or_no_mm;
 329
 330        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 331
 332        if (!down_read_trylock(&mm->mmap_sem)) {
 333                if ((regs->tstate & TSTATE_PRIV) &&
 334                    !search_exception_tables(regs->tpc)) {
 335                        insn = get_fault_insn(regs, insn);
 336                        goto handle_kernel_fault;
 337                }
 338
 339retry:
 340                down_read(&mm->mmap_sem);
 341        }
 342
 343        if (fault_code & FAULT_CODE_BAD_RA)
 344                goto do_sigbus;
 345
 346        vma = find_vma(mm, address);
 347        if (!vma)
 348                goto bad_area;
 349
 350        /* Pure DTLB misses do not tell us whether the fault causing
 351         * load/store/atomic was a write or not, it only says that there
 352         * was no match.  So in such a case we (carefully) read the
 353         * instruction to try and figure this out.  It's an optimization
 354         * so it's ok if we can't do this.
 355         *
 356         * Special hack, window spill/fill knows the exact fault type.
 357         */
 358        if (((fault_code &
 359              (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
 360            (vma->vm_flags & VM_WRITE) != 0) {
 361                insn = get_fault_insn(regs, 0);
 362                if (!insn)
 363                        goto continue_fault;
 364                /* All loads, stores and atomics have bits 30 and 31 both set
 365                 * in the instruction.  Bit 21 is set in all stores, but we
 366                 * have to avoid prefetches which also have bit 21 set.
 367                 */
 368                if ((insn & 0xc0200000) == 0xc0200000 &&
 369                    (insn & 0x01780000) != 0x01680000) {
 370                        /* Don't bother updating thread struct value,
 371                         * because update_mmu_cache only cares which tlb
 372                         * the access came from.
 373                         */
 374                        fault_code |= FAULT_CODE_WRITE;
 375                }
 376        }
 377continue_fault:
 378
 379        if (vma->vm_start <= address)
 380                goto good_area;
 381        if (!(vma->vm_flags & VM_GROWSDOWN))
 382                goto bad_area;
 383        if (!(fault_code & FAULT_CODE_WRITE)) {
 384                /* Non-faulting loads shouldn't expand stack. */
 385                insn = get_fault_insn(regs, insn);
 386                if ((insn & 0xc0800000) == 0xc0800000) {
 387                        unsigned char asi;
 388
 389                        if (insn & 0x2000)
 390                                asi = (regs->tstate >> 24);
 391                        else
 392                                asi = (insn >> 5);
 393                        if ((asi & 0xf2) == 0x82)
 394                                goto bad_area;
 395                }
 396        }
 397        if (expand_stack(vma, address))
 398                goto bad_area;
 399        /*
 400         * Ok, we have a good vm_area for this memory access, so
 401         * we can handle it..
 402         */
 403good_area:
 404        si_code = SEGV_ACCERR;
 405
 406        /* If we took a ITLB miss on a non-executable page, catch
 407         * that here.
 408         */
 409        if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
 410                WARN(address != regs->tpc,
 411                     "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc);
 412                WARN_ON(regs->tstate & TSTATE_PRIV);
 413                goto bad_area;
 414        }
 415
 416        if (fault_code & FAULT_CODE_WRITE) {
 417                if (!(vma->vm_flags & VM_WRITE))
 418                        goto bad_area;
 419
 420                /* Spitfire has an icache which does not snoop
 421                 * processor stores.  Later processors do...
 422                 */
 423                if (tlb_type == spitfire &&
 424                    (vma->vm_flags & VM_EXEC) != 0 &&
 425                    vma->vm_file != NULL)
 426                        set_thread_fault_code(fault_code |
 427                                              FAULT_CODE_BLKCOMMIT);
 428
 429                flags |= FAULT_FLAG_WRITE;
 430        } else {
 431                /* Allow reads even for write-only mappings */
 432                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 433                        goto bad_area;
 434        }
 435
 436        fault = handle_mm_fault(vma, address, flags);
 437
 438        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 439                goto exit_exception;
 440
 441        if (unlikely(fault & VM_FAULT_ERROR)) {
 442                if (fault & VM_FAULT_OOM)
 443                        goto out_of_memory;
 444                else if (fault & VM_FAULT_SIGSEGV)
 445                        goto bad_area;
 446                else if (fault & VM_FAULT_SIGBUS)
 447                        goto do_sigbus;
 448                BUG();
 449        }
 450
 451        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 452                if (fault & VM_FAULT_MAJOR) {
 453                        current->maj_flt++;
 454                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
 455                                      1, regs, address);
 456                } else {
 457                        current->min_flt++;
 458                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
 459                                      1, regs, address);
 460                }
 461                if (fault & VM_FAULT_RETRY) {
 462                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 463                        flags |= FAULT_FLAG_TRIED;
 464
 465                        /* No need to up_read(&mm->mmap_sem) as we would
 466                         * have already released it in __lock_page_or_retry
 467                         * in mm/filemap.c.
 468                         */
 469
 470                        goto retry;
 471                }
 472        }
 473        up_read(&mm->mmap_sem);
 474
 475        mm_rss = get_mm_rss(mm);
 476#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
 477        mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
 478#endif
 479        if (unlikely(mm_rss >
 480                     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
 481                tsb_grow(mm, MM_TSB_BASE, mm_rss);
 482#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 483        mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
 484        mm_rss *= REAL_HPAGE_PER_HPAGE;
 485        if (unlikely(mm_rss >
 486                     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
 487                if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
 488                        tsb_grow(mm, MM_TSB_HUGE, mm_rss);
 489                else
 490                        hugetlb_setup(regs);
 491
 492        }
 493#endif
 494exit_exception:
 495        exception_exit(prev_state);
 496        return;
 497
 498        /*
 499         * Something tried to access memory that isn't in our memory map..
 500         * Fix it, but check if it's kernel or user first..
 501         */
 502bad_area:
 503        insn = get_fault_insn(regs, insn);
 504        up_read(&mm->mmap_sem);
 505
 506handle_kernel_fault:
 507        do_kernel_fault(regs, si_code, fault_code, insn, address);
 508        goto exit_exception;
 509
 510/*
 511 * We ran out of memory, or some other thing happened to us that made
 512 * us unable to handle the page fault gracefully.
 513 */
 514out_of_memory:
 515        insn = get_fault_insn(regs, insn);
 516        up_read(&mm->mmap_sem);
 517        if (!(regs->tstate & TSTATE_PRIV)) {
 518                pagefault_out_of_memory();
 519                goto exit_exception;
 520        }
 521        goto handle_kernel_fault;
 522
 523intr_or_no_mm:
 524        insn = get_fault_insn(regs, 0);
 525        goto handle_kernel_fault;
 526
 527do_sigbus:
 528        insn = get_fault_insn(regs, insn);
 529        up_read(&mm->mmap_sem);
 530
 531        /*
 532         * Send a sigbus, regardless of whether we were in kernel
 533         * or user mode.
 534         */
 535        do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
 536
 537        /* Kernel mode? Handle exceptions or die */
 538        if (regs->tstate & TSTATE_PRIV)
 539                goto handle_kernel_fault;
 540}
 541