linux/arch/um/kernel/trap.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   3 * Licensed under the GPL
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/sched/signal.h>
   8#include <linux/hardirq.h>
   9#include <linux/module.h>
  10#include <linux/uaccess.h>
  11#include <linux/sched/debug.h>
  12#include <asm/current.h>
  13#include <asm/pgtable.h>
  14#include <asm/tlbflush.h>
  15#include <arch.h>
  16#include <as-layout.h>
  17#include <kern_util.h>
  18#include <os.h>
  19#include <skas.h>
  20
  21/*
  22 * Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
  23 * segv().
  24 */
  25int handle_page_fault(unsigned long address, unsigned long ip,
  26                      int is_write, int is_user, int *code_out)
  27{
  28        struct mm_struct *mm = current->mm;
  29        struct vm_area_struct *vma;
  30        pgd_t *pgd;
  31        pud_t *pud;
  32        pmd_t *pmd;
  33        pte_t *pte;
  34        int err = -EFAULT;
  35        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  36
  37        *code_out = SEGV_MAPERR;
  38
  39        /*
  40         * If the fault was with pagefaults disabled, don't take the fault, just
  41         * fail.
  42         */
  43        if (faulthandler_disabled())
  44                goto out_nosemaphore;
  45
  46        if (is_user)
  47                flags |= FAULT_FLAG_USER;
  48retry:
  49        down_read(&mm->mmap_sem);
  50        vma = find_vma(mm, address);
  51        if (!vma)
  52                goto out;
  53        else if (vma->vm_start <= address)
  54                goto good_area;
  55        else if (!(vma->vm_flags & VM_GROWSDOWN))
  56                goto out;
  57        else if (is_user && !ARCH_IS_STACKGROW(address))
  58                goto out;
  59        else if (expand_stack(vma, address))
  60                goto out;
  61
  62good_area:
  63        *code_out = SEGV_ACCERR;
  64        if (is_write) {
  65                if (!(vma->vm_flags & VM_WRITE))
  66                        goto out;
  67                flags |= FAULT_FLAG_WRITE;
  68        } else {
  69                /* Don't require VM_READ|VM_EXEC for write faults! */
  70                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  71                        goto out;
  72        }
  73
  74        do {
  75                vm_fault_t fault;
  76
  77                fault = handle_mm_fault(vma, address, flags);
  78
  79                if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
  80                        goto out_nosemaphore;
  81
  82                if (unlikely(fault & VM_FAULT_ERROR)) {
  83                        if (fault & VM_FAULT_OOM) {
  84                                goto out_of_memory;
  85                        } else if (fault & VM_FAULT_SIGSEGV) {
  86                                goto out;
  87                        } else if (fault & VM_FAULT_SIGBUS) {
  88                                err = -EACCES;
  89                                goto out;
  90                        }
  91                        BUG();
  92                }
  93                if (flags & FAULT_FLAG_ALLOW_RETRY) {
  94                        if (fault & VM_FAULT_MAJOR)
  95                                current->maj_flt++;
  96                        else
  97                                current->min_flt++;
  98                        if (fault & VM_FAULT_RETRY) {
  99                                flags &= ~FAULT_FLAG_ALLOW_RETRY;
 100                                flags |= FAULT_FLAG_TRIED;
 101
 102                                goto retry;
 103                        }
 104                }
 105
 106                pgd = pgd_offset(mm, address);
 107                pud = pud_offset(pgd, address);
 108                pmd = pmd_offset(pud, address);
 109                pte = pte_offset_kernel(pmd, address);
 110        } while (!pte_present(*pte));
 111        err = 0;
 112        /*
 113         * The below warning was added in place of
 114         *      pte_mkyoung(); if (is_write) pte_mkdirty();
 115         * If it's triggered, we'd see normally a hang here (a clean pte is
 116         * marked read-only to emulate the dirty bit).
 117         * However, the generic code can mark a PTE writable but clean on a
 118         * concurrent read fault, triggering this harmlessly. So comment it out.
 119         */
 120#if 0
 121        WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
 122#endif
 123        flush_tlb_page(vma, address);
 124out:
 125        up_read(&mm->mmap_sem);
 126out_nosemaphore:
 127        return err;
 128
 129out_of_memory:
 130        /*
 131         * We ran out of memory, call the OOM killer, and return the userspace
 132         * (which will retry the fault, or kill us if we got oom-killed).
 133         */
 134        up_read(&mm->mmap_sem);
 135        if (!is_user)
 136                goto out_nosemaphore;
 137        pagefault_out_of_memory();
 138        return 0;
 139}
 140EXPORT_SYMBOL(handle_page_fault);
 141
 142static void show_segv_info(struct uml_pt_regs *regs)
 143{
 144        struct task_struct *tsk = current;
 145        struct faultinfo *fi = UPT_FAULTINFO(regs);
 146
 147        if (!unhandled_signal(tsk, SIGSEGV))
 148                return;
 149
 150        if (!printk_ratelimit())
 151                return;
 152
 153        printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
 154                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 155                tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
 156                (void *)UPT_IP(regs), (void *)UPT_SP(regs),
 157                fi->error_code);
 158
 159        print_vma_addr(KERN_CONT " in ", UPT_IP(regs));
 160        printk(KERN_CONT "\n");
 161}
 162
 163static void bad_segv(struct faultinfo fi, unsigned long ip)
 164{
 165        current->thread.arch.faultinfo = fi;
 166        force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *) FAULT_ADDRESS(fi));
 167}
 168
 169void fatal_sigsegv(void)
 170{
 171        force_sigsegv(SIGSEGV);
 172        do_signal(&current->thread.regs);
 173        /*
 174         * This is to tell gcc that we're not returning - do_signal
 175         * can, in general, return, but in this case, it's not, since
 176         * we just got a fatal SIGSEGV queued.
 177         */
 178        os_dump_core();
 179}
 180
 181/**
 182 * segv_handler() - the SIGSEGV handler
 183 * @sig:        the signal number
 184 * @unused_si:  the signal info struct; unused in this handler
 185 * @regs:       the ptrace register information
 186 *
 187 * The handler first extracts the faultinfo from the UML ptrace regs struct.
 188 * If the userfault did not happen in an UML userspace process, bad_segv is called.
 189 * Otherwise the signal did happen in a cloned userspace process, handle it.
 190 */
 191void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
 192{
 193        struct faultinfo * fi = UPT_FAULTINFO(regs);
 194
 195        if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) {
 196                show_segv_info(regs);
 197                bad_segv(*fi, UPT_IP(regs));
 198                return;
 199        }
 200        segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
 201}
 202
 203/*
 204 * We give a *copy* of the faultinfo in the regs to segv.
 205 * This must be done, since nesting SEGVs could overwrite
 206 * the info in the regs. A pointer to the info then would
 207 * give us bad data!
 208 */
 209unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
 210                   struct uml_pt_regs *regs)
 211{
 212        jmp_buf *catcher;
 213        int si_code;
 214        int err;
 215        int is_write = FAULT_WRITE(fi);
 216        unsigned long address = FAULT_ADDRESS(fi);
 217
 218        if (!is_user && regs)
 219                current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
 220
 221        if (!is_user && (address >= start_vm) && (address < end_vm)) {
 222                flush_tlb_kernel_vm();
 223                goto out;
 224        }
 225        else if (current->mm == NULL) {
 226                show_regs(container_of(regs, struct pt_regs, regs));
 227                panic("Segfault with no mm");
 228        }
 229        else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) {
 230                show_regs(container_of(regs, struct pt_regs, regs));
 231                panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx",
 232                       address, ip);
 233        }
 234
 235        if (SEGV_IS_FIXABLE(&fi))
 236                err = handle_page_fault(address, ip, is_write, is_user,
 237                                        &si_code);
 238        else {
 239                err = -EFAULT;
 240                /*
 241                 * A thread accessed NULL, we get a fault, but CR2 is invalid.
 242                 * This code is used in __do_copy_from_user() of TT mode.
 243                 * XXX tt mode is gone, so maybe this isn't needed any more
 244                 */
 245                address = 0;
 246        }
 247
 248        catcher = current->thread.fault_catcher;
 249        if (!err)
 250                goto out;
 251        else if (catcher != NULL) {
 252                current->thread.fault_addr = (void *) address;
 253                UML_LONGJMP(catcher, 1);
 254        }
 255        else if (current->thread.fault_addr != NULL)
 256                panic("fault_addr set but no fault catcher");
 257        else if (!is_user && arch_fixup(ip, regs))
 258                goto out;
 259
 260        if (!is_user) {
 261                show_regs(container_of(regs, struct pt_regs, regs));
 262                panic("Kernel mode fault at addr 0x%lx, ip 0x%lx",
 263                      address, ip);
 264        }
 265
 266        show_segv_info(regs);
 267
 268        if (err == -EACCES) {
 269                current->thread.arch.faultinfo = fi;
 270                force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
 271        } else {
 272                BUG_ON(err != -EFAULT);
 273                current->thread.arch.faultinfo = fi;
 274                force_sig_fault(SIGSEGV, si_code, (void __user *) address);
 275        }
 276
 277out:
 278        if (regs)
 279                current->thread.segv_regs = NULL;
 280
 281        return 0;
 282}
 283
 284void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
 285{
 286        int code, err;
 287        if (!UPT_IS_USER(regs)) {
 288                if (sig == SIGBUS)
 289                        printk(KERN_ERR "Bus error - the host /dev/shm or /tmp "
 290                               "mount likely just ran out of space\n");
 291                panic("Kernel mode signal %d", sig);
 292        }
 293
 294        arch_examine_signal(sig, regs);
 295
 296        /* Is the signal layout for the signal known?
 297         * Signal data must be scrubbed to prevent information leaks.
 298         */
 299        code = si->si_code;
 300        err = si->si_errno;
 301        if ((err == 0) && (siginfo_layout(sig, code) == SIL_FAULT)) {
 302                struct faultinfo *fi = UPT_FAULTINFO(regs);
 303                current->thread.arch.faultinfo = *fi;
 304                force_sig_fault(sig, code, (void __user *)FAULT_ADDRESS(*fi));
 305        } else {
 306                printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d) with errno %d\n",
 307                       sig, code, err);
 308                force_sig(sig);
 309        }
 310}
 311
 312void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs)
 313{
 314        if (current->thread.fault_catcher != NULL)
 315                UML_LONGJMP(current->thread.fault_catcher, 1);
 316        else
 317                relay_signal(sig, si, regs);
 318}
 319
 320void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
 321{
 322        do_IRQ(WINCH_IRQ, regs);
 323}
 324
 325void trap_init(void)
 326{
 327}
 328