linux/arch/s390/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  arch/s390/mm/fault.c
   3 *
   4 *  S390 version
   5 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
   6 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   7 *               Ulrich Weigand (uweigand@de.ibm.com)
   8 *
   9 *  Derived from "arch/i386/mm/fault.c"
  10 *    Copyright (C) 1995  Linus Torvalds
  11 */
  12
  13#include <linux/perf_event.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/kernel.h>
  17#include <linux/errno.h>
  18#include <linux/string.h>
  19#include <linux/types.h>
  20#include <linux/ptrace.h>
  21#include <linux/mman.h>
  22#include <linux/mm.h>
  23#include <linux/compat.h>
  24#include <linux/smp.h>
  25#include <linux/kdebug.h>
  26#include <linux/init.h>
  27#include <linux/console.h>
  28#include <linux/module.h>
  29#include <linux/hardirq.h>
  30#include <linux/kprobes.h>
  31#include <linux/uaccess.h>
  32#include <linux/hugetlb.h>
  33#include <asm/system.h>
  34#include <asm/pgtable.h>
  35#include <asm/s390_ext.h>
  36#include <asm/mmu_context.h>
  37#include "../kernel/entry.h"
  38
  39#ifndef CONFIG_64BIT
  40#define __FAIL_ADDR_MASK 0x7ffff000
  41#define __FIXUP_MASK 0x7fffffff
  42#define __SUBCODE_MASK 0x0200
  43#define __PF_RES_FIELD 0ULL
  44#else /* CONFIG_64BIT */
  45#define __FAIL_ADDR_MASK -4096L
  46#define __FIXUP_MASK ~0L
  47#define __SUBCODE_MASK 0x0600
  48#define __PF_RES_FIELD 0x8000000000000000ULL
  49#endif /* CONFIG_64BIT */
  50
  51#ifdef CONFIG_SYSCTL
  52extern int sysctl_userprocess_debug;
  53#endif
  54
  55#ifdef CONFIG_KPROBES
  56static inline int notify_page_fault(struct pt_regs *regs, long err)
  57{
  58        int ret = 0;
  59
  60        /* kprobe_running() needs smp_processor_id() */
  61        if (!user_mode(regs)) {
  62                preempt_disable();
  63                if (kprobe_running() && kprobe_fault_handler(regs, 14))
  64                        ret = 1;
  65                preempt_enable();
  66        }
  67
  68        return ret;
  69}
  70#else
  71static inline int notify_page_fault(struct pt_regs *regs, long err)
  72{
  73        return 0;
  74}
  75#endif
  76
  77
  78/*
  79 * Unlock any spinlocks which will prevent us from getting the
  80 * message out.
  81 */
  82void bust_spinlocks(int yes)
  83{
  84        if (yes) {
  85                oops_in_progress = 1;
  86        } else {
  87                int loglevel_save = console_loglevel;
  88                console_unblank();
  89                oops_in_progress = 0;
  90                /*
  91                 * OK, the message is on the console.  Now we call printk()
  92                 * without oops_in_progress set so that printk will give klogd
  93                 * a poke.  Hold onto your hats...
  94                 */
  95                console_loglevel = 15;
  96                printk(" ");
  97                console_loglevel = loglevel_save;
  98        }
  99}
 100
 101/*
 102 * Returns the address space associated with the fault.
 103 * Returns 0 for kernel space, 1 for user space and
 104 * 2 for code execution in user space with noexec=on.
 105 */
 106static inline int check_space(struct task_struct *tsk)
 107{
 108        /*
 109         * The lowest two bits of S390_lowcore.trans_exc_code
 110         * indicate which paging table was used.
 111         */
 112        int desc = S390_lowcore.trans_exc_code & 3;
 113
 114        if (desc == 3)  /* Home Segment Table Descriptor */
 115                return switch_amode == 0;
 116        if (desc == 2)  /* Secondary Segment Table Descriptor */
 117                return tsk->thread.mm_segment.ar4;
 118#ifdef CONFIG_S390_SWITCH_AMODE
 119        if (unlikely(desc == 1)) { /* STD determined via access register */
 120                /* %a0 always indicates primary space. */
 121                if (S390_lowcore.exc_access_id != 0) {
 122                        save_access_regs(tsk->thread.acrs);
 123                        /*
 124                         * An alet of 0 indicates primary space.
 125                         * An alet of 1 indicates secondary space.
 126                         * Any other alet values generate an
 127                         * alen-translation exception.
 128                         */
 129                        if (tsk->thread.acrs[S390_lowcore.exc_access_id])
 130                                return tsk->thread.mm_segment.ar4;
 131                }
 132        }
 133#endif
 134        /* Primary Segment Table Descriptor */
 135        return switch_amode << s390_noexec;
 136}
 137
 138/*
 139 * Send SIGSEGV to task.  This is an external routine
 140 * to keep the stack usage of do_page_fault small.
 141 */
 142static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
 143                       int si_code, unsigned long address)
 144{
 145        struct siginfo si;
 146
 147#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
 148#if defined(CONFIG_SYSCTL)
 149        if (sysctl_userprocess_debug)
 150#endif
 151        {
 152                printk("User process fault: interruption code 0x%lX\n",
 153                       error_code);
 154                printk("failing address: %lX\n", address);
 155                show_regs(regs);
 156        }
 157#endif
 158        si.si_signo = SIGSEGV;
 159        si.si_code = si_code;
 160        si.si_addr = (void __user *) address;
 161        force_sig_info(SIGSEGV, &si, current);
 162}
 163
 164static void do_no_context(struct pt_regs *regs, unsigned long error_code,
 165                          unsigned long address)
 166{
 167        const struct exception_table_entry *fixup;
 168
 169        /* Are we prepared to handle this kernel fault?  */
 170        fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
 171        if (fixup) {
 172                regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
 173                return;
 174        }
 175
 176        /*
 177         * Oops. The kernel tried to access some bad page. We'll have to
 178         * terminate things with extreme prejudice.
 179         */
 180        if (check_space(current) == 0)
 181                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 182                       " at virtual kernel address %p\n", (void *)address);
 183        else
 184                printk(KERN_ALERT "Unable to handle kernel paging request"
 185                       " at virtual user address %p\n", (void *)address);
 186
 187        die("Oops", regs, error_code);
 188        do_exit(SIGKILL);
 189}
 190
 191static void do_low_address(struct pt_regs *regs, unsigned long error_code)
 192{
 193        /* Low-address protection hit in kernel mode means
 194           NULL pointer write access in kernel mode.  */
 195        if (regs->psw.mask & PSW_MASK_PSTATE) {
 196                /* Low-address protection hit in user mode 'cannot happen'. */
 197                die ("Low-address protection", regs, error_code);
 198                do_exit(SIGKILL);
 199        }
 200
 201        do_no_context(regs, error_code, 0);
 202}
 203
 204static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
 205                      unsigned long address)
 206{
 207        struct task_struct *tsk = current;
 208        struct mm_struct *mm = tsk->mm;
 209
 210        up_read(&mm->mmap_sem);
 211        /*
 212         * Send a sigbus, regardless of whether we were in kernel
 213         * or user mode.
 214         */
 215        tsk->thread.prot_addr = address;
 216        tsk->thread.trap_no = error_code;
 217        force_sig(SIGBUS, tsk);
 218
 219        /* Kernel mode? Handle exceptions or die */
 220        if (!(regs->psw.mask & PSW_MASK_PSTATE))
 221                do_no_context(regs, error_code, address);
 222}
 223
 224#ifdef CONFIG_S390_EXEC_PROTECT
 225static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
 226                         unsigned long address, unsigned long error_code)
 227{
 228        u16 instruction;
 229        int rc;
 230#ifdef CONFIG_COMPAT
 231        int compat;
 232#endif
 233
 234        pagefault_disable();
 235        rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
 236        pagefault_enable();
 237        if (rc)
 238                return -EFAULT;
 239
 240        up_read(&mm->mmap_sem);
 241        clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
 242#ifdef CONFIG_COMPAT
 243        compat = is_compat_task();
 244        if (compat && instruction == 0x0a77)
 245                sys32_sigreturn();
 246        else if (compat && instruction == 0x0aad)
 247                sys32_rt_sigreturn();
 248        else
 249#endif
 250        if (instruction == 0x0a77)
 251                sys_sigreturn();
 252        else if (instruction == 0x0aad)
 253                sys_rt_sigreturn();
 254        else {
 255                current->thread.prot_addr = address;
 256                current->thread.trap_no = error_code;
 257                do_sigsegv(regs, error_code, SEGV_MAPERR, address);
 258        }
 259        return 0;
 260}
 261#endif /* CONFIG_S390_EXEC_PROTECT */
 262
 263/*
 264 * This routine handles page faults.  It determines the address,
 265 * and the problem, and then passes it off to one of the appropriate
 266 * routines.
 267 *
 268 * error_code:
 269 *   04       Protection           ->  Write-Protection  (suprression)
 270 *   10       Segment translation  ->  Not present       (nullification)
 271 *   11       Page translation     ->  Not present       (nullification)
 272 *   3b       Region third trans.  ->  Not present       (nullification)
 273 */
 274static inline void
 275do_exception(struct pt_regs *regs, unsigned long error_code, int write)
 276{
 277        struct task_struct *tsk;
 278        struct mm_struct *mm;
 279        struct vm_area_struct *vma;
 280        unsigned long address;
 281        int space;
 282        int si_code;
 283        int fault;
 284
 285        if (notify_page_fault(regs, error_code))
 286                return;
 287
 288        tsk = current;
 289        mm = tsk->mm;
 290
 291        /* get the failing address and the affected space */
 292        address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
 293        space = check_space(tsk);
 294
 295        /*
 296         * Verify that the fault happened in user space, that
 297         * we are not in an interrupt and that there is a 
 298         * user context.
 299         */
 300        if (unlikely(space == 0 || in_atomic() || !mm))
 301                goto no_context;
 302
 303        /*
 304         * When we get here, the fault happened in the current
 305         * task's user address space, so we can switch on the
 306         * interrupts again and then search the VMAs
 307         */
 308        local_irq_enable();
 309        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
 310        down_read(&mm->mmap_sem);
 311
 312        si_code = SEGV_MAPERR;
 313        vma = find_vma(mm, address);
 314        if (!vma)
 315                goto bad_area;
 316
 317#ifdef CONFIG_S390_EXEC_PROTECT
 318        if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
 319                if (!signal_return(mm, regs, address, error_code))
 320                        /*
 321                         * signal_return() has done an up_read(&mm->mmap_sem)
 322                         * if it returns 0.
 323                         */
 324                        return;
 325#endif
 326
 327        if (vma->vm_start <= address)
 328                goto good_area;
 329        if (!(vma->vm_flags & VM_GROWSDOWN))
 330                goto bad_area;
 331        if (expand_stack(vma, address))
 332                goto bad_area;
 333/*
 334 * Ok, we have a good vm_area for this memory access, so
 335 * we can handle it..
 336 */
 337good_area:
 338        si_code = SEGV_ACCERR;
 339        if (!write) {
 340                /* page not present, check vm flags */
 341                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 342                        goto bad_area;
 343        } else {
 344                if (!(vma->vm_flags & VM_WRITE))
 345                        goto bad_area;
 346        }
 347
 348        if (is_vm_hugetlb_page(vma))
 349                address &= HPAGE_MASK;
 350        /*
 351         * If for any reason at all we couldn't handle the fault,
 352         * make sure we exit gracefully rather than endlessly redo
 353         * the fault.
 354         */
 355        fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
 356        if (unlikely(fault & VM_FAULT_ERROR)) {
 357                if (fault & VM_FAULT_OOM) {
 358                        up_read(&mm->mmap_sem);
 359                        pagefault_out_of_memory();
 360                        return;
 361                } else if (fault & VM_FAULT_SIGBUS) {
 362                        do_sigbus(regs, error_code, address);
 363                        return;
 364                }
 365                BUG();
 366        }
 367        if (fault & VM_FAULT_MAJOR) {
 368                tsk->maj_flt++;
 369                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
 370                                     regs, address);
 371        } else {
 372                tsk->min_flt++;
 373                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
 374                                     regs, address);
 375        }
 376        up_read(&mm->mmap_sem);
 377        /*
 378         * The instruction that caused the program check will
 379         * be repeated. Don't signal single step via SIGTRAP.
 380         */
 381        clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
 382        return;
 383
 384/*
 385 * Something tried to access memory that isn't in our memory map..
 386 * Fix it, but check if it's kernel or user first..
 387 */
 388bad_area:
 389        up_read(&mm->mmap_sem);
 390
 391        /* User mode accesses just cause a SIGSEGV */
 392        if (regs->psw.mask & PSW_MASK_PSTATE) {
 393                tsk->thread.prot_addr = address;
 394                tsk->thread.trap_no = error_code;
 395                do_sigsegv(regs, error_code, si_code, address);
 396                return;
 397        }
 398
 399no_context:
 400        do_no_context(regs, error_code, address);
 401}
 402
 403void __kprobes do_protection_exception(struct pt_regs *regs,
 404                                       long error_code)
 405{
 406        /* Protection exception is supressing, decrement psw address. */
 407        regs->psw.addr -= (error_code >> 16);
 408        /*
 409         * Check for low-address protection.  This needs to be treated
 410         * as a special case because the translation exception code
 411         * field is not guaranteed to contain valid data in this case.
 412         */
 413        if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
 414                do_low_address(regs, error_code);
 415                return;
 416        }
 417        do_exception(regs, 4, 1);
 418}
 419
 420void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
 421{
 422        do_exception(regs, error_code & 0xff, 0);
 423}
 424
 425#ifdef CONFIG_64BIT
 426void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
 427{
 428        struct mm_struct *mm;
 429        struct vm_area_struct *vma;
 430        unsigned long address;
 431        int space;
 432
 433        mm = current->mm;
 434        address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
 435        space = check_space(current);
 436
 437        if (unlikely(space == 0 || in_atomic() || !mm))
 438                goto no_context;
 439
 440        local_irq_enable();
 441
 442        down_read(&mm->mmap_sem);
 443        vma = find_vma(mm, address);
 444        up_read(&mm->mmap_sem);
 445
 446        if (vma) {
 447                update_mm(mm, current);
 448                return;
 449        }
 450
 451        /* User mode accesses just cause a SIGSEGV */
 452        if (regs->psw.mask & PSW_MASK_PSTATE) {
 453                current->thread.prot_addr = address;
 454                current->thread.trap_no = error_code;
 455                do_sigsegv(regs, error_code, SEGV_MAPERR, address);
 456                return;
 457        }
 458
 459no_context:
 460        do_no_context(regs, error_code, address);
 461}
 462#endif
 463
 464#ifdef CONFIG_PFAULT 
 465/*
 466 * 'pfault' pseudo page faults routines.
 467 */
 468static ext_int_info_t ext_int_pfault;
 469static int pfault_disable = 0;
 470
 471static int __init nopfault(char *str)
 472{
 473        pfault_disable = 1;
 474        return 1;
 475}
 476
 477__setup("nopfault", nopfault);
 478
 479typedef struct {
 480        __u16 refdiagc;
 481        __u16 reffcode;
 482        __u16 refdwlen;
 483        __u16 refversn;
 484        __u64 refgaddr;
 485        __u64 refselmk;
 486        __u64 refcmpmk;
 487        __u64 reserved;
 488} __attribute__ ((packed, aligned(8))) pfault_refbk_t;
 489
 490int pfault_init(void)
 491{
 492        pfault_refbk_t refbk =
 493                { 0x258, 0, 5, 2, __LC_CURRENT, 1ULL << 48, 1ULL << 48,
 494                  __PF_RES_FIELD };
 495        int rc;
 496
 497        if (!MACHINE_IS_VM || pfault_disable)
 498                return -1;
 499        asm volatile(
 500                "       diag    %1,%0,0x258\n"
 501                "0:     j       2f\n"
 502                "1:     la      %0,8\n"
 503                "2:\n"
 504                EX_TABLE(0b,1b)
 505                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
 506        __ctl_set_bit(0, 9);
 507        return rc;
 508}
 509
 510void pfault_fini(void)
 511{
 512        pfault_refbk_t refbk =
 513        { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
 514
 515        if (!MACHINE_IS_VM || pfault_disable)
 516                return;
 517        __ctl_clear_bit(0,9);
 518        asm volatile(
 519                "       diag    %0,0,0x258\n"
 520                "0:\n"
 521                EX_TABLE(0b,0b)
 522                : : "a" (&refbk), "m" (refbk) : "cc");
 523}
 524
 525static void pfault_interrupt(__u16 error_code)
 526{
 527        struct task_struct *tsk;
 528        __u16 subcode;
 529
 530        /*
 531         * Get the external interruption subcode & pfault
 532         * initial/completion signal bit. VM stores this 
 533         * in the 'cpu address' field associated with the
 534         * external interrupt. 
 535         */
 536        subcode = S390_lowcore.cpu_addr;
 537        if ((subcode & 0xff00) != __SUBCODE_MASK)
 538                return;
 539
 540        /*
 541         * Get the token (= address of the task structure of the affected task).
 542         */
 543        tsk = *(struct task_struct **) __LC_PFAULT_INTPARM;
 544
 545        if (subcode & 0x0080) {
 546                /* signal bit is set -> a page has been swapped in by VM */
 547                if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
 548                        /* Initial interrupt was faster than the completion
 549                         * interrupt. pfault_wait is valid. Set pfault_wait
 550                         * back to zero and wake up the process. This can
 551                         * safely be done because the task is still sleeping
 552                         * and can't produce new pfaults. */
 553                        tsk->thread.pfault_wait = 0;
 554                        wake_up_process(tsk);
 555                        put_task_struct(tsk);
 556                }
 557        } else {
 558                /* signal bit not set -> a real page is missing. */
 559                get_task_struct(tsk);
 560                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 561                if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
 562                        /* Completion interrupt was faster than the initial
 563                         * interrupt (swapped in a -1 for pfault_wait). Set
 564                         * pfault_wait back to zero and exit. This can be
 565                         * done safely because tsk is running in kernel 
 566                         * mode and can't produce new pfaults. */
 567                        tsk->thread.pfault_wait = 0;
 568                        set_task_state(tsk, TASK_RUNNING);
 569                        put_task_struct(tsk);
 570                } else
 571                        set_tsk_need_resched(tsk);
 572        }
 573}
 574
 575void __init pfault_irq_init(void)
 576{
 577        if (!MACHINE_IS_VM)
 578                return;
 579
 580        /*
 581         * Try to get pfault pseudo page faults going.
 582         */
 583        if (register_early_external_interrupt(0x2603, pfault_interrupt,
 584                                              &ext_int_pfault) != 0)
 585                panic("Couldn't request external interrupt 0x2603");
 586
 587        if (pfault_init() == 0)
 588                return;
 589
 590        /* Tough luck, no pfault. */
 591        pfault_disable = 1;
 592        unregister_early_external_interrupt(0x2603, pfault_interrupt,
 593                                            &ext_int_pfault);
 594}
 595#endif
 596