linux/arch/riscv/mm/fault.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
   4 *  Lennox Wu <lennox.wu@sunplusct.com>
   5 *  Chen Liqin <liqin.chen@sunplusct.com>
   6 * Copyright (C) 2012 Regents of the University of California
   7 */
   8
   9
  10#include <linux/mm.h>
  11#include <linux/kernel.h>
  12#include <linux/interrupt.h>
  13#include <linux/perf_event.h>
  14#include <linux/signal.h>
  15#include <linux/uaccess.h>
  16
  17#include <asm/pgalloc.h>
  18#include <asm/ptrace.h>
  19#include <asm/tlbflush.h>
  20
  21/*
  22 * This routine handles page faults.  It determines the address and the
  23 * problem, and then passes it off to one of the appropriate routines.
  24 */
  25asmlinkage void do_page_fault(struct pt_regs *regs)
  26{
  27        struct task_struct *tsk;
  28        struct vm_area_struct *vma;
  29        struct mm_struct *mm;
  30        unsigned long addr, cause;
  31        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  32        int code = SEGV_MAPERR;
  33        vm_fault_t fault;
  34
  35        cause = regs->scause;
  36        addr = regs->sbadaddr;
  37
  38        tsk = current;
  39        mm = tsk->mm;
  40
  41        /*
  42         * Fault-in kernel-space virtual memory on-demand.
  43         * The 'reference' page table is init_mm.pgd.
  44         *
  45         * NOTE! We MUST NOT take any locks for this case. We may
  46         * be in an interrupt or a critical region, and should
  47         * only copy the information from the master page table,
  48         * nothing more.
  49         */
  50        if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
  51                goto vmalloc_fault;
  52
  53        /* Enable interrupts if they were enabled in the parent context. */
  54        if (likely(regs->sstatus & SR_SPIE))
  55                local_irq_enable();
  56
  57        /*
  58         * If we're in an interrupt, have no user context, or are running
  59         * in an atomic region, then we must not take the fault.
  60         */
  61        if (unlikely(faulthandler_disabled() || !mm))
  62                goto no_context;
  63
  64        if (user_mode(regs))
  65                flags |= FAULT_FLAG_USER;
  66
  67        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
  68
  69retry:
  70        down_read(&mm->mmap_sem);
  71        vma = find_vma(mm, addr);
  72        if (unlikely(!vma))
  73                goto bad_area;
  74        if (likely(vma->vm_start <= addr))
  75                goto good_area;
  76        if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
  77                goto bad_area;
  78        if (unlikely(expand_stack(vma, addr)))
  79                goto bad_area;
  80
  81        /*
  82         * Ok, we have a good vm_area for this memory access, so
  83         * we can handle it.
  84         */
  85good_area:
  86        code = SEGV_ACCERR;
  87
  88        switch (cause) {
  89        case EXC_INST_PAGE_FAULT:
  90                if (!(vma->vm_flags & VM_EXEC))
  91                        goto bad_area;
  92                break;
  93        case EXC_LOAD_PAGE_FAULT:
  94                if (!(vma->vm_flags & VM_READ))
  95                        goto bad_area;
  96                break;
  97        case EXC_STORE_PAGE_FAULT:
  98                if (!(vma->vm_flags & VM_WRITE))
  99                        goto bad_area;
 100                flags |= FAULT_FLAG_WRITE;
 101                break;
 102        default:
 103                panic("%s: unhandled cause %lu", __func__, cause);
 104        }
 105
 106        /*
 107         * If for any reason at all we could not handle the fault,
 108         * make sure we exit gracefully rather than endlessly redo
 109         * the fault.
 110         */
 111        fault = handle_mm_fault(vma, addr, flags);
 112
 113        /*
 114         * If we need to retry but a fatal signal is pending, handle the
 115         * signal first. We do not need to release the mmap_sem because it
 116         * would already be released in __lock_page_or_retry in mm/filemap.c.
 117         */
 118        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
 119                return;
 120
 121        if (unlikely(fault & VM_FAULT_ERROR)) {
 122                if (fault & VM_FAULT_OOM)
 123                        goto out_of_memory;
 124                else if (fault & VM_FAULT_SIGBUS)
 125                        goto do_sigbus;
 126                BUG();
 127        }
 128
 129        /*
 130         * Major/minor page fault accounting is only done on the
 131         * initial attempt. If we go through a retry, it is extremely
 132         * likely that the page will be found in page cache at that point.
 133         */
 134        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 135                if (fault & VM_FAULT_MAJOR) {
 136                        tsk->maj_flt++;
 137                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
 138                                      1, regs, addr);
 139                } else {
 140                        tsk->min_flt++;
 141                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
 142                                      1, regs, addr);
 143                }
 144                if (fault & VM_FAULT_RETRY) {
 145                        /*
 146                         * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 147                         * of starvation.
 148                         */
 149                        flags &= ~(FAULT_FLAG_ALLOW_RETRY);
 150                        flags |= FAULT_FLAG_TRIED;
 151
 152                        /*
 153                         * No need to up_read(&mm->mmap_sem) as we would
 154                         * have already released it in __lock_page_or_retry
 155                         * in mm/filemap.c.
 156                         */
 157                        goto retry;
 158                }
 159        }
 160
 161        up_read(&mm->mmap_sem);
 162        return;
 163
 164        /*
 165         * Something tried to access memory that isn't in our memory map.
 166         * Fix it, but check if it's kernel or user first.
 167         */
 168bad_area:
 169        up_read(&mm->mmap_sem);
 170        /* User mode accesses just cause a SIGSEGV */
 171        if (user_mode(regs)) {
 172                do_trap(regs, SIGSEGV, code, addr);
 173                return;
 174        }
 175
 176no_context:
 177        /* Are we prepared to handle this kernel fault? */
 178        if (fixup_exception(regs))
 179                return;
 180
 181        /*
 182         * Oops. The kernel tried to access some bad page. We'll have to
 183         * terminate things with extreme prejudice.
 184         */
 185        bust_spinlocks(1);
 186        pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
 187                (addr < PAGE_SIZE) ? "NULL pointer dereference" :
 188                "paging request", addr);
 189        die(regs, "Oops");
 190        do_exit(SIGKILL);
 191
 192        /*
 193         * We ran out of memory, call the OOM killer, and return the userspace
 194         * (which will retry the fault, or kill us if we got oom-killed).
 195         */
 196out_of_memory:
 197        up_read(&mm->mmap_sem);
 198        if (!user_mode(regs))
 199                goto no_context;
 200        pagefault_out_of_memory();
 201        return;
 202
 203do_sigbus:
 204        up_read(&mm->mmap_sem);
 205        /* Kernel mode? Handle exceptions or die */
 206        if (!user_mode(regs))
 207                goto no_context;
 208        do_trap(regs, SIGBUS, BUS_ADRERR, addr);
 209        return;
 210
 211vmalloc_fault:
 212        {
 213                pgd_t *pgd, *pgd_k;
 214                pud_t *pud, *pud_k;
 215                p4d_t *p4d, *p4d_k;
 216                pmd_t *pmd, *pmd_k;
 217                pte_t *pte_k;
 218                int index;
 219
 220                /* User mode accesses just cause a SIGSEGV */
 221                if (user_mode(regs))
 222                        return do_trap(regs, SIGSEGV, code, addr);
 223
 224                /*
 225                 * Synchronize this task's top level page-table
 226                 * with the 'reference' page table.
 227                 *
 228                 * Do _not_ use "tsk->active_mm->pgd" here.
 229                 * We might be inside an interrupt in the middle
 230                 * of a task switch.
 231                 */
 232                index = pgd_index(addr);
 233                pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
 234                pgd_k = init_mm.pgd + index;
 235
 236                if (!pgd_present(*pgd_k))
 237                        goto no_context;
 238                set_pgd(pgd, *pgd_k);
 239
 240                p4d = p4d_offset(pgd, addr);
 241                p4d_k = p4d_offset(pgd_k, addr);
 242                if (!p4d_present(*p4d_k))
 243                        goto no_context;
 244
 245                pud = pud_offset(p4d, addr);
 246                pud_k = pud_offset(p4d_k, addr);
 247                if (!pud_present(*pud_k))
 248                        goto no_context;
 249
 250                /*
 251                 * Since the vmalloc area is global, it is unnecessary
 252                 * to copy individual PTEs
 253                 */
 254                pmd = pmd_offset(pud, addr);
 255                pmd_k = pmd_offset(pud_k, addr);
 256                if (!pmd_present(*pmd_k))
 257                        goto no_context;
 258                set_pmd(pmd, *pmd_k);
 259
 260                /*
 261                 * Make sure the actual PTE exists as well to
 262                 * catch kernel vmalloc-area accesses to non-mapped
 263                 * addresses. If we don't do this, this will just
 264                 * silently loop forever.
 265                 */
 266                pte_k = pte_offset_kernel(pmd_k, addr);
 267                if (!pte_present(*pte_k))
 268                        goto no_context;
 269
 270                /*
 271                 * The kernel assumes that TLBs don't cache invalid
 272                 * entries, but in RISC-V, SFENCE.VMA specifies an
 273                 * ordering constraint, not a cache flush; it is
 274                 * necessary even after writing invalid entries.
 275                 */
 276                local_flush_tlb_page(addr);
 277
 278                return;
 279        }
 280}
 281