linux/arch/um/kernel/trap.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   3 * Licensed under the GPL
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/sched/signal.h>
   8#include <linux/hardirq.h>
   9#include <linux/module.h>
  10#include <linux/uaccess.h>
  11#include <linux/sched/debug.h>
  12#include <asm/current.h>
  13#include <asm/pgtable.h>
  14#include <asm/tlbflush.h>
  15#include <arch.h>
  16#include <as-layout.h>
  17#include <kern_util.h>
  18#include <os.h>
  19#include <skas.h>
  20
  21/*
  22 * Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by
  23 * segv().
  24 */
  25int handle_page_fault(unsigned long address, unsigned long ip,
  26                      int is_write, int is_user, int *code_out)
  27{
  28        struct mm_struct *mm = current->mm;
  29        struct vm_area_struct *vma;
  30        pgd_t *pgd;
  31        pud_t *pud;
  32        pmd_t *pmd;
  33        pte_t *pte;
  34        int err = -EFAULT;
  35        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  36
  37        *code_out = SEGV_MAPERR;
  38
  39        /*
  40         * If the fault was with pagefaults disabled, don't take the fault, just
  41         * fail.
  42         */
  43        if (faulthandler_disabled())
  44                goto out_nosemaphore;
  45
  46        if (is_user)
  47                flags |= FAULT_FLAG_USER;
  48retry:
  49        down_read(&mm->mmap_sem);
  50        vma = find_vma(mm, address);
  51        if (!vma)
  52                goto out;
  53        else if (vma->vm_start <= address)
  54                goto good_area;
  55        else if (!(vma->vm_flags & VM_GROWSDOWN))
  56                goto out;
  57        else if (is_user && !ARCH_IS_STACKGROW(address))
  58                goto out;
  59        else if (expand_stack(vma, address))
  60                goto out;
  61
  62good_area:
  63        *code_out = SEGV_ACCERR;
  64        if (is_write) {
  65                if (!(vma->vm_flags & VM_WRITE))
  66                        goto out;
  67                flags |= FAULT_FLAG_WRITE;
  68        } else {
  69                /* Don't require VM_READ|VM_EXEC for write faults! */
  70                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  71                        goto out;
  72        }
  73
  74        do {
  75                int fault;
  76
  77                fault = handle_mm_fault(vma, address, flags);
  78
  79                if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
  80                        goto out_nosemaphore;
  81
  82                if (unlikely(fault & VM_FAULT_ERROR)) {
  83                        if (fault & VM_FAULT_OOM) {
  84                                goto out_of_memory;
  85                        } else if (fault & VM_FAULT_SIGSEGV) {
  86                                goto out;
  87                        } else if (fault & VM_FAULT_SIGBUS) {
  88                                err = -EACCES;
  89                                goto out;
  90                        }
  91                        BUG();
  92                }
  93                if (flags & FAULT_FLAG_ALLOW_RETRY) {
  94                        if (fault & VM_FAULT_MAJOR)
  95                                current->maj_flt++;
  96                        else
  97                                current->min_flt++;
  98                        if (fault & VM_FAULT_RETRY) {
  99                                flags &= ~FAULT_FLAG_ALLOW_RETRY;
 100                                flags |= FAULT_FLAG_TRIED;
 101
 102                                goto retry;
 103                        }
 104                }
 105
 106                pgd = pgd_offset(mm, address);
 107                pud = pud_offset(pgd, address);
 108                pmd = pmd_offset(pud, address);
 109                pte = pte_offset_kernel(pmd, address);
 110        } while (!pte_present(*pte));
 111        err = 0;
 112        /*
 113         * The below warning was added in place of
 114         *      pte_mkyoung(); if (is_write) pte_mkdirty();
 115         * If it's triggered, we'd see normally a hang here (a clean pte is
 116         * marked read-only to emulate the dirty bit).
 117         * However, the generic code can mark a PTE writable but clean on a
 118         * concurrent read fault, triggering this harmlessly. So comment it out.
 119         */
 120#if 0
 121        WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
 122#endif
 123        flush_tlb_page(vma, address);
 124out:
 125        up_read(&mm->mmap_sem);
 126out_nosemaphore:
 127        return err;
 128
 129out_of_memory:
 130        /*
 131         * We ran out of memory, call the OOM killer, and return the userspace
 132         * (which will retry the fault, or kill us if we got oom-killed).
 133         */
 134        up_read(&mm->mmap_sem);
 135        if (!is_user)
 136                goto out_nosemaphore;
 137        pagefault_out_of_memory();
 138        return 0;
 139}
 140EXPORT_SYMBOL(handle_page_fault);
 141
 142static void show_segv_info(struct uml_pt_regs *regs)
 143{
 144        struct task_struct *tsk = current;
 145        struct faultinfo *fi = UPT_FAULTINFO(regs);
 146
 147        if (!unhandled_signal(tsk, SIGSEGV))
 148                return;
 149
 150        if (!printk_ratelimit())
 151                return;
 152
 153        printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
 154                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 155                tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
 156                (void *)UPT_IP(regs), (void *)UPT_SP(regs),
 157                fi->error_code);
 158
 159        print_vma_addr(KERN_CONT " in ", UPT_IP(regs));
 160        printk(KERN_CONT "\n");
 161}
 162
 163static void bad_segv(struct faultinfo fi, unsigned long ip)
 164{
 165        struct siginfo si;
 166
 167        si.si_signo = SIGSEGV;
 168        si.si_code = SEGV_ACCERR;
 169        si.si_addr = (void __user *) FAULT_ADDRESS(fi);
 170        current->thread.arch.faultinfo = fi;
 171        force_sig_info(SIGSEGV, &si, current);
 172}
 173
 174void fatal_sigsegv(void)
 175{
 176        force_sigsegv(SIGSEGV, current);
 177        do_signal(&current->thread.regs);
 178        /*
 179         * This is to tell gcc that we're not returning - do_signal
 180         * can, in general, return, but in this case, it's not, since
 181         * we just got a fatal SIGSEGV queued.
 182         */
 183        os_dump_core();
 184}
 185
 186/**
 187 * segv_handler() - the SIGSEGV handler
 188 * @sig:        the signal number
 189 * @unused_si:  the signal info struct; unused in this handler
 190 * @regs:       the ptrace register information
 191 *
 192 * The handler first extracts the faultinfo from the UML ptrace regs struct.
 193 * If the userfault did not happen in an UML userspace process, bad_segv is called.
 194 * Otherwise the signal did happen in a cloned userspace process, handle it.
 195 */
 196void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
 197{
 198        struct faultinfo * fi = UPT_FAULTINFO(regs);
 199
 200        if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) {
 201                show_segv_info(regs);
 202                bad_segv(*fi, UPT_IP(regs));
 203                return;
 204        }
 205        segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
 206}
 207
 208/*
 209 * We give a *copy* of the faultinfo in the regs to segv.
 210 * This must be done, since nesting SEGVs could overwrite
 211 * the info in the regs. A pointer to the info then would
 212 * give us bad data!
 213 */
 214unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
 215                   struct uml_pt_regs *regs)
 216{
 217        struct siginfo si;
 218        jmp_buf *catcher;
 219        int err;
 220        int is_write = FAULT_WRITE(fi);
 221        unsigned long address = FAULT_ADDRESS(fi);
 222
 223        if (!is_user && regs)
 224                current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
 225
 226        if (!is_user && (address >= start_vm) && (address < end_vm)) {
 227                flush_tlb_kernel_vm();
 228                goto out;
 229        }
 230        else if (current->mm == NULL) {
 231                show_regs(container_of(regs, struct pt_regs, regs));
 232                panic("Segfault with no mm");
 233        }
 234        else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) {
 235                show_regs(container_of(regs, struct pt_regs, regs));
 236                panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx",
 237                       address, ip);
 238        }
 239
 240        if (SEGV_IS_FIXABLE(&fi))
 241                err = handle_page_fault(address, ip, is_write, is_user,
 242                                        &si.si_code);
 243        else {
 244                err = -EFAULT;
 245                /*
 246                 * A thread accessed NULL, we get a fault, but CR2 is invalid.
 247                 * This code is used in __do_copy_from_user() of TT mode.
 248                 * XXX tt mode is gone, so maybe this isn't needed any more
 249                 */
 250                address = 0;
 251        }
 252
 253        catcher = current->thread.fault_catcher;
 254        if (!err)
 255                goto out;
 256        else if (catcher != NULL) {
 257                current->thread.fault_addr = (void *) address;
 258                UML_LONGJMP(catcher, 1);
 259        }
 260        else if (current->thread.fault_addr != NULL)
 261                panic("fault_addr set but no fault catcher");
 262        else if (!is_user && arch_fixup(ip, regs))
 263                goto out;
 264
 265        if (!is_user) {
 266                show_regs(container_of(regs, struct pt_regs, regs));
 267                panic("Kernel mode fault at addr 0x%lx, ip 0x%lx",
 268                      address, ip);
 269        }
 270
 271        show_segv_info(regs);
 272
 273        if (err == -EACCES) {
 274                si.si_signo = SIGBUS;
 275                si.si_errno = 0;
 276                si.si_code = BUS_ADRERR;
 277                si.si_addr = (void __user *)address;
 278                current->thread.arch.faultinfo = fi;
 279                force_sig_info(SIGBUS, &si, current);
 280        } else {
 281                BUG_ON(err != -EFAULT);
 282                si.si_signo = SIGSEGV;
 283                si.si_addr = (void __user *) address;
 284                current->thread.arch.faultinfo = fi;
 285                force_sig_info(SIGSEGV, &si, current);
 286        }
 287
 288out:
 289        if (regs)
 290                current->thread.segv_regs = NULL;
 291
 292        return 0;
 293}
 294
 295void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
 296{
 297        struct faultinfo *fi;
 298        struct siginfo clean_si;
 299
 300        if (!UPT_IS_USER(regs)) {
 301                if (sig == SIGBUS)
 302                        printk(KERN_ERR "Bus error - the host /dev/shm or /tmp "
 303                               "mount likely just ran out of space\n");
 304                panic("Kernel mode signal %d", sig);
 305        }
 306
 307        arch_examine_signal(sig, regs);
 308
 309        memset(&clean_si, 0, sizeof(clean_si));
 310        clean_si.si_signo = si->si_signo;
 311        clean_si.si_errno = si->si_errno;
 312        clean_si.si_code = si->si_code;
 313        switch (sig) {
 314        case SIGILL:
 315        case SIGFPE:
 316        case SIGSEGV:
 317        case SIGBUS:
 318        case SIGTRAP:
 319                fi = UPT_FAULTINFO(regs);
 320                clean_si.si_addr = (void __user *) FAULT_ADDRESS(*fi);
 321                current->thread.arch.faultinfo = *fi;
 322#ifdef __ARCH_SI_TRAPNO
 323                clean_si.si_trapno = si->si_trapno;
 324#endif
 325                break;
 326        default:
 327                printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d)\n",
 328                        sig, si->si_code);
 329        }
 330
 331        force_sig_info(sig, &clean_si, current);
 332}
 333
 334void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs)
 335{
 336        if (current->thread.fault_catcher != NULL)
 337                UML_LONGJMP(current->thread.fault_catcher, 1);
 338        else
 339                relay_signal(sig, si, regs);
 340}
 341
 342void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
 343{
 344        do_IRQ(WINCH_IRQ, regs);
 345}
 346
 347void trap_init(void)
 348{
 349}
 350