linux/arch/s390/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   5 *               Ulrich Weigand (uweigand@de.ibm.com)
   6 *
   7 *  Derived from "arch/i386/mm/fault.c"
   8 *    Copyright (C) 1995  Linus Torvalds
   9 */
  10
  11#include <linux/kernel_stat.h>
  12#include <linux/perf_event.h>
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/types.h>
  19#include <linux/ptrace.h>
  20#include <linux/mman.h>
  21#include <linux/mm.h>
  22#include <linux/compat.h>
  23#include <linux/smp.h>
  24#include <linux/kdebug.h>
  25#include <linux/init.h>
  26#include <linux/console.h>
  27#include <linux/extable.h>
  28#include <linux/hardirq.h>
  29#include <linux/kprobes.h>
  30#include <linux/uaccess.h>
  31#include <linux/hugetlb.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/diag.h>
  34#include <asm/pgtable.h>
  35#include <asm/gmap.h>
  36#include <asm/irq.h>
  37#include <asm/mmu_context.h>
  38#include <asm/facility.h>
  39#include "../kernel/entry.h"
  40
  41#define __FAIL_ADDR_MASK -4096L
  42#define __SUBCODE_MASK 0x0600
  43#define __PF_RES_FIELD 0x8000000000000000ULL
  44
  45#define VM_FAULT_BADCONTEXT     0x010000
  46#define VM_FAULT_BADMAP         0x020000
  47#define VM_FAULT_BADACCESS      0x040000
  48#define VM_FAULT_SIGNAL         0x080000
  49#define VM_FAULT_PFAULT         0x100000
  50
  51static unsigned long store_indication __read_mostly;
  52
  53static int __init fault_init(void)
  54{
  55        if (test_facility(75))
  56                store_indication = 0xc00;
  57        return 0;
  58}
  59early_initcall(fault_init);
  60
  61static inline int notify_page_fault(struct pt_regs *regs)
  62{
  63        int ret = 0;
  64
  65        /* kprobe_running() needs smp_processor_id() */
  66        if (kprobes_built_in() && !user_mode(regs)) {
  67                preempt_disable();
  68                if (kprobe_running() && kprobe_fault_handler(regs, 14))
  69                        ret = 1;
  70                preempt_enable();
  71        }
  72        return ret;
  73}
  74
  75
  76/*
  77 * Unlock any spinlocks which will prevent us from getting the
  78 * message out.
  79 */
  80void bust_spinlocks(int yes)
  81{
  82        if (yes) {
  83                oops_in_progress = 1;
  84        } else {
  85                int loglevel_save = console_loglevel;
  86                console_unblank();
  87                oops_in_progress = 0;
  88                /*
  89                 * OK, the message is on the console.  Now we call printk()
  90                 * without oops_in_progress set so that printk will give klogd
  91                 * a poke.  Hold onto your hats...
  92                 */
  93                console_loglevel = 15;
  94                printk(" ");
  95                console_loglevel = loglevel_save;
  96        }
  97}
  98
  99/*
 100 * Returns the address space associated with the fault.
 101 * Returns 0 for kernel space and 1 for user space.
 102 */
 103static inline int user_space_fault(struct pt_regs *regs)
 104{
 105        unsigned long trans_exc_code;
 106
 107        /*
 108         * The lowest two bits of the translation exception
 109         * identification indicate which paging table was used.
 110         */
 111        trans_exc_code = regs->int_parm_long & 3;
 112        if (trans_exc_code == 3) /* home space -> kernel */
 113                return 0;
 114        if (user_mode(regs))
 115                return 1;
 116        if (trans_exc_code == 2) /* secondary space -> set_fs */
 117                return current->thread.mm_segment.ar4;
 118        if (current->flags & PF_VCPU)
 119                return 1;
 120        return 0;
 121}
 122
 123static int bad_address(void *p)
 124{
 125        unsigned long dummy;
 126
 127        return probe_kernel_address((unsigned long *)p, dummy);
 128}
 129
 130static void dump_pagetable(unsigned long asce, unsigned long address)
 131{
 132        unsigned long *table = __va(asce & PAGE_MASK);
 133
 134        pr_alert("AS:%016lx ", asce);
 135        switch (asce & _ASCE_TYPE_MASK) {
 136        case _ASCE_TYPE_REGION1:
 137                table = table + ((address >> 53) & 0x7ff);
 138                if (bad_address(table))
 139                        goto bad;
 140                pr_cont("R1:%016lx ", *table);
 141                if (*table & _REGION_ENTRY_INVALID)
 142                        goto out;
 143                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 144                /* fallthrough */
 145        case _ASCE_TYPE_REGION2:
 146                table = table + ((address >> 42) & 0x7ff);
 147                if (bad_address(table))
 148                        goto bad;
 149                pr_cont("R2:%016lx ", *table);
 150                if (*table & _REGION_ENTRY_INVALID)
 151                        goto out;
 152                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 153                /* fallthrough */
 154        case _ASCE_TYPE_REGION3:
 155                table = table + ((address >> 31) & 0x7ff);
 156                if (bad_address(table))
 157                        goto bad;
 158                pr_cont("R3:%016lx ", *table);
 159                if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
 160                        goto out;
 161                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 162                /* fallthrough */
 163        case _ASCE_TYPE_SEGMENT:
 164                table = table + ((address >> 20) & 0x7ff);
 165                if (bad_address(table))
 166                        goto bad;
 167                pr_cont("S:%016lx ", *table);
 168                if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
 169                        goto out;
 170                table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 171        }
 172        table = table + ((address >> 12) & 0xff);
 173        if (bad_address(table))
 174                goto bad;
 175        pr_cont("P:%016lx ", *table);
 176out:
 177        pr_cont("\n");
 178        return;
 179bad:
 180        pr_cont("BAD\n");
 181}
 182
 183static void dump_fault_info(struct pt_regs *regs)
 184{
 185        unsigned long asce;
 186
 187        pr_alert("Failing address: %016lx TEID: %016lx\n",
 188                 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 189        pr_alert("Fault in ");
 190        switch (regs->int_parm_long & 3) {
 191        case 3:
 192                pr_cont("home space ");
 193                break;
 194        case 2:
 195                pr_cont("secondary space ");
 196                break;
 197        case 1:
 198                pr_cont("access register ");
 199                break;
 200        case 0:
 201                pr_cont("primary space ");
 202                break;
 203        }
 204        pr_cont("mode while using ");
 205        if (!user_space_fault(regs)) {
 206                asce = S390_lowcore.kernel_asce;
 207                pr_cont("kernel ");
 208        }
 209#ifdef CONFIG_PGSTE
 210        else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
 211                struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
 212                asce = gmap->asce;
 213                pr_cont("gmap ");
 214        }
 215#endif
 216        else {
 217                asce = S390_lowcore.user_asce;
 218                pr_cont("user ");
 219        }
 220        pr_cont("ASCE.\n");
 221        dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
 222}
 223
 224int show_unhandled_signals = 1;
 225
 226void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
 227{
 228        if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
 229                return;
 230        if (!unhandled_signal(current, signr))
 231                return;
 232        if (!printk_ratelimit())
 233                return;
 234        printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
 235               regs->int_code & 0xffff, regs->int_code >> 17);
 236        print_vma_addr(KERN_CONT "in ", regs->psw.addr);
 237        printk(KERN_CONT "\n");
 238        if (is_mm_fault)
 239                dump_fault_info(regs);
 240        show_regs(regs);
 241}
 242
 243/*
 244 * Send SIGSEGV to task.  This is an external routine
 245 * to keep the stack usage of do_page_fault small.
 246 */
 247static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 248{
 249        struct siginfo si;
 250
 251        report_user_fault(regs, SIGSEGV, 1);
 252        si.si_signo = SIGSEGV;
 253        si.si_errno = 0;
 254        si.si_code = si_code;
 255        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 256        force_sig_info(SIGSEGV, &si, current);
 257}
 258
 259static noinline void do_no_context(struct pt_regs *regs)
 260{
 261        const struct exception_table_entry *fixup;
 262
 263        /* Are we prepared to handle this kernel fault?  */
 264        fixup = search_exception_tables(regs->psw.addr);
 265        if (fixup) {
 266                regs->psw.addr = extable_fixup(fixup);
 267                return;
 268        }
 269
 270        /*
 271         * Oops. The kernel tried to access some bad page. We'll have to
 272         * terminate things with extreme prejudice.
 273         */
 274        if (!user_space_fault(regs))
 275                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 276                       " in virtual kernel address space\n");
 277        else
 278                printk(KERN_ALERT "Unable to handle kernel paging request"
 279                       " in virtual user address space\n");
 280        dump_fault_info(regs);
 281        die(regs, "Oops");
 282        do_exit(SIGKILL);
 283}
 284
 285static noinline void do_low_address(struct pt_regs *regs)
 286{
 287        /* Low-address protection hit in kernel mode means
 288           NULL pointer write access in kernel mode.  */
 289        if (regs->psw.mask & PSW_MASK_PSTATE) {
 290                /* Low-address protection hit in user mode 'cannot happen'. */
 291                die (regs, "Low-address protection");
 292                do_exit(SIGKILL);
 293        }
 294
 295        do_no_context(regs);
 296}
 297
 298static noinline void do_sigbus(struct pt_regs *regs)
 299{
 300        struct task_struct *tsk = current;
 301        struct siginfo si;
 302
 303        /*
 304         * Send a sigbus, regardless of whether we were in kernel
 305         * or user mode.
 306         */
 307        si.si_signo = SIGBUS;
 308        si.si_errno = 0;
 309        si.si_code = BUS_ADRERR;
 310        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 311        force_sig_info(SIGBUS, &si, tsk);
 312}
 313
 314static noinline void do_fault_error(struct pt_regs *regs, int fault)
 315{
 316        int si_code;
 317
 318        switch (fault) {
 319        case VM_FAULT_BADACCESS:
 320        case VM_FAULT_BADMAP:
 321                /* Bad memory access. Check if it is kernel or user space. */
 322                if (user_mode(regs)) {
 323                        /* User mode accesses just cause a SIGSEGV */
 324                        si_code = (fault == VM_FAULT_BADMAP) ?
 325                                SEGV_MAPERR : SEGV_ACCERR;
 326                        do_sigsegv(regs, si_code);
 327                        return;
 328                }
 329        case VM_FAULT_BADCONTEXT:
 330        case VM_FAULT_PFAULT:
 331                do_no_context(regs);
 332                break;
 333        case VM_FAULT_SIGNAL:
 334                if (!user_mode(regs))
 335                        do_no_context(regs);
 336                break;
 337        default: /* fault & VM_FAULT_ERROR */
 338                if (fault & VM_FAULT_OOM) {
 339                        if (!user_mode(regs))
 340                                do_no_context(regs);
 341                        else
 342                                pagefault_out_of_memory();
 343                } else if (fault & VM_FAULT_SIGSEGV) {
 344                        /* Kernel mode? Handle exceptions or die */
 345                        if (!user_mode(regs))
 346                                do_no_context(regs);
 347                        else
 348                                do_sigsegv(regs, SEGV_MAPERR);
 349                } else if (fault & VM_FAULT_SIGBUS) {
 350                        /* Kernel mode? Handle exceptions or die */
 351                        if (!user_mode(regs))
 352                                do_no_context(regs);
 353                        else
 354                                do_sigbus(regs);
 355                } else
 356                        BUG();
 357                break;
 358        }
 359}
 360
 361/*
 362 * This routine handles page faults.  It determines the address,
 363 * and the problem, and then passes it off to one of the appropriate
 364 * routines.
 365 *
 366 * interruption code (int_code):
 367 *   04       Protection           ->  Write-Protection  (suprression)
 368 *   10       Segment translation  ->  Not present       (nullification)
 369 *   11       Page translation     ->  Not present       (nullification)
 370 *   3b       Region third trans.  ->  Not present       (nullification)
 371 */
 372static inline int do_exception(struct pt_regs *regs, int access)
 373{
 374#ifdef CONFIG_PGSTE
 375        struct gmap *gmap;
 376#endif
 377        struct task_struct *tsk;
 378        struct mm_struct *mm;
 379        struct vm_area_struct *vma;
 380        unsigned long trans_exc_code;
 381        unsigned long address;
 382        unsigned int flags;
 383        int fault;
 384
 385        tsk = current;
 386        /*
 387         * The instruction that caused the program check has
 388         * been nullified. Don't signal single step via SIGTRAP.
 389         */
 390        clear_pt_regs_flag(regs, PIF_PER_TRAP);
 391
 392        if (notify_page_fault(regs))
 393                return 0;
 394
 395        mm = tsk->mm;
 396        trans_exc_code = regs->int_parm_long;
 397
 398        /*
 399         * Verify that the fault happened in user space, that
 400         * we are not in an interrupt and that there is a 
 401         * user context.
 402         */
 403        fault = VM_FAULT_BADCONTEXT;
 404        if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
 405                goto out;
 406
 407        address = trans_exc_code & __FAIL_ADDR_MASK;
 408        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 409        flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 410        if (user_mode(regs))
 411                flags |= FAULT_FLAG_USER;
 412        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
 413                flags |= FAULT_FLAG_WRITE;
 414        down_read(&mm->mmap_sem);
 415
 416#ifdef CONFIG_PGSTE
 417        gmap = (current->flags & PF_VCPU) ?
 418                (struct gmap *) S390_lowcore.gmap : NULL;
 419        if (gmap) {
 420                current->thread.gmap_addr = address;
 421                current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
 422                current->thread.gmap_int_code = regs->int_code & 0xffff;
 423                address = __gmap_translate(gmap, address);
 424                if (address == -EFAULT) {
 425                        fault = VM_FAULT_BADMAP;
 426                        goto out_up;
 427                }
 428                if (gmap->pfault_enabled)
 429                        flags |= FAULT_FLAG_RETRY_NOWAIT;
 430        }
 431#endif
 432
 433retry:
 434        fault = VM_FAULT_BADMAP;
 435        vma = find_vma(mm, address);
 436        if (!vma)
 437                goto out_up;
 438
 439        if (unlikely(vma->vm_start > address)) {
 440                if (!(vma->vm_flags & VM_GROWSDOWN))
 441                        goto out_up;
 442                if (expand_stack(vma, address))
 443                        goto out_up;
 444        }
 445
 446        /*
 447         * Ok, we have a good vm_area for this memory access, so
 448         * we can handle it..
 449         */
 450        fault = VM_FAULT_BADACCESS;
 451        if (unlikely(!(vma->vm_flags & access)))
 452                goto out_up;
 453
 454        if (is_vm_hugetlb_page(vma))
 455                address &= HPAGE_MASK;
 456        /*
 457         * If for any reason at all we couldn't handle the fault,
 458         * make sure we exit gracefully rather than endlessly redo
 459         * the fault.
 460         */
 461        fault = handle_mm_fault(vma, address, flags);
 462        /* No reason to continue if interrupted by SIGKILL. */
 463        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
 464                fault = VM_FAULT_SIGNAL;
 465                goto out;
 466        }
 467        if (unlikely(fault & VM_FAULT_ERROR))
 468                goto out_up;
 469
 470        /*
 471         * Major/minor page fault accounting is only done on the
 472         * initial attempt. If we go through a retry, it is extremely
 473         * likely that the page will be found in page cache at that point.
 474         */
 475        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 476                if (fault & VM_FAULT_MAJOR) {
 477                        tsk->maj_flt++;
 478                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 479                                      regs, address);
 480                } else {
 481                        tsk->min_flt++;
 482                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 483                                      regs, address);
 484                }
 485                if (fault & VM_FAULT_RETRY) {
 486#ifdef CONFIG_PGSTE
 487                        if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
 488                                /* FAULT_FLAG_RETRY_NOWAIT has been set,
 489                                 * mmap_sem has not been released */
 490                                current->thread.gmap_pfault = 1;
 491                                fault = VM_FAULT_PFAULT;
 492                                goto out_up;
 493                        }
 494#endif
 495                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 496                         * of starvation. */
 497                        flags &= ~(FAULT_FLAG_ALLOW_RETRY |
 498                                   FAULT_FLAG_RETRY_NOWAIT);
 499                        flags |= FAULT_FLAG_TRIED;
 500                        down_read(&mm->mmap_sem);
 501                        goto retry;
 502                }
 503        }
 504#ifdef CONFIG_PGSTE
 505        if (gmap) {
 506                address =  __gmap_link(gmap, current->thread.gmap_addr,
 507                                       address);
 508                if (address == -EFAULT) {
 509                        fault = VM_FAULT_BADMAP;
 510                        goto out_up;
 511                }
 512                if (address == -ENOMEM) {
 513                        fault = VM_FAULT_OOM;
 514                        goto out_up;
 515                }
 516        }
 517#endif
 518        fault = 0;
 519out_up:
 520        up_read(&mm->mmap_sem);
 521out:
 522        return fault;
 523}
 524
 525void do_protection_exception(struct pt_regs *regs)
 526{
 527        unsigned long trans_exc_code;
 528        int fault;
 529
 530        trans_exc_code = regs->int_parm_long;
 531        /*
 532         * Protection exceptions are suppressing, decrement psw address.
 533         * The exception to this rule are aborted transactions, for these
 534         * the PSW already points to the correct location.
 535         */
 536        if (!(regs->int_code & 0x200))
 537                regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
 538        /*
 539         * Check for low-address protection.  This needs to be treated
 540         * as a special case because the translation exception code
 541         * field is not guaranteed to contain valid data in this case.
 542         */
 543        if (unlikely(!(trans_exc_code & 4))) {
 544                do_low_address(regs);
 545                return;
 546        }
 547        fault = do_exception(regs, VM_WRITE);
 548        if (unlikely(fault))
 549                do_fault_error(regs, fault);
 550}
 551NOKPROBE_SYMBOL(do_protection_exception);
 552
 553void do_dat_exception(struct pt_regs *regs)
 554{
 555        int access, fault;
 556
 557        access = VM_READ | VM_EXEC | VM_WRITE;
 558        fault = do_exception(regs, access);
 559        if (unlikely(fault))
 560                do_fault_error(regs, fault);
 561}
 562NOKPROBE_SYMBOL(do_dat_exception);
 563
 564#ifdef CONFIG_PFAULT 
 565/*
 566 * 'pfault' pseudo page faults routines.
 567 */
 568static int pfault_disable;
 569
 570static int __init nopfault(char *str)
 571{
 572        pfault_disable = 1;
 573        return 1;
 574}
 575
 576__setup("nopfault", nopfault);
 577
 578struct pfault_refbk {
 579        u16 refdiagc;
 580        u16 reffcode;
 581        u16 refdwlen;
 582        u16 refversn;
 583        u64 refgaddr;
 584        u64 refselmk;
 585        u64 refcmpmk;
 586        u64 reserved;
 587} __attribute__ ((packed, aligned(8)));
 588
 589int pfault_init(void)
 590{
 591        struct pfault_refbk refbk = {
 592                .refdiagc = 0x258,
 593                .reffcode = 0,
 594                .refdwlen = 5,
 595                .refversn = 2,
 596                .refgaddr = __LC_LPP,
 597                .refselmk = 1ULL << 48,
 598                .refcmpmk = 1ULL << 48,
 599                .reserved = __PF_RES_FIELD };
 600        int rc;
 601
 602        if (pfault_disable)
 603                return -1;
 604        diag_stat_inc(DIAG_STAT_X258);
 605        asm volatile(
 606                "       diag    %1,%0,0x258\n"
 607                "0:     j       2f\n"
 608                "1:     la      %0,8\n"
 609                "2:\n"
 610                EX_TABLE(0b,1b)
 611                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
 612        return rc;
 613}
 614
 615void pfault_fini(void)
 616{
 617        struct pfault_refbk refbk = {
 618                .refdiagc = 0x258,
 619                .reffcode = 1,
 620                .refdwlen = 5,
 621                .refversn = 2,
 622        };
 623
 624        if (pfault_disable)
 625                return;
 626        diag_stat_inc(DIAG_STAT_X258);
 627        asm volatile(
 628                "       diag    %0,0,0x258\n"
 629                "0:     nopr    %%r7\n"
 630                EX_TABLE(0b,0b)
 631                : : "a" (&refbk), "m" (refbk) : "cc");
 632}
 633
 634static DEFINE_SPINLOCK(pfault_lock);
 635static LIST_HEAD(pfault_list);
 636
 637#define PF_COMPLETE     0x0080
 638
 639/*
 640 * The mechanism of our pfault code: if Linux is running as guest, runs a user
 641 * space process and the user space process accesses a page that the host has
 642 * paged out we get a pfault interrupt.
 643 *
 644 * This allows us, within the guest, to schedule a different process. Without
 645 * this mechanism the host would have to suspend the whole virtual cpu until
 646 * the page has been paged in.
 647 *
 648 * So when we get such an interrupt then we set the state of the current task
 649 * to uninterruptible and also set the need_resched flag. Both happens within
 650 * interrupt context(!). If we later on want to return to user space we
 651 * recognize the need_resched flag and then call schedule().  It's not very
 652 * obvious how this works...
 653 *
 654 * Of course we have a lot of additional fun with the completion interrupt (->
 655 * host signals that a page of a process has been paged in and the process can
 656 * continue to run). This interrupt can arrive on any cpu and, since we have
 657 * virtual cpus, actually appear before the interrupt that signals that a page
 658 * is missing.
 659 */
 660static void pfault_interrupt(struct ext_code ext_code,
 661                             unsigned int param32, unsigned long param64)
 662{
 663        struct task_struct *tsk;
 664        __u16 subcode;
 665        pid_t pid;
 666
 667        /*
 668         * Get the external interruption subcode & pfault initial/completion
 669         * signal bit. VM stores this in the 'cpu address' field associated
 670         * with the external interrupt.
 671         */
 672        subcode = ext_code.subcode;
 673        if ((subcode & 0xff00) != __SUBCODE_MASK)
 674                return;
 675        inc_irq_stat(IRQEXT_PFL);
 676        /* Get the token (= pid of the affected task). */
 677        pid = param64 & LPP_PFAULT_PID_MASK;
 678        rcu_read_lock();
 679        tsk = find_task_by_pid_ns(pid, &init_pid_ns);
 680        if (tsk)
 681                get_task_struct(tsk);
 682        rcu_read_unlock();
 683        if (!tsk)
 684                return;
 685        spin_lock(&pfault_lock);
 686        if (subcode & PF_COMPLETE) {
 687                /* signal bit is set -> a page has been swapped in by VM */
 688                if (tsk->thread.pfault_wait == 1) {
 689                        /* Initial interrupt was faster than the completion
 690                         * interrupt. pfault_wait is valid. Set pfault_wait
 691                         * back to zero and wake up the process. This can
 692                         * safely be done because the task is still sleeping
 693                         * and can't produce new pfaults. */
 694                        tsk->thread.pfault_wait = 0;
 695                        list_del(&tsk->thread.list);
 696                        wake_up_process(tsk);
 697                        put_task_struct(tsk);
 698                } else {
 699                        /* Completion interrupt was faster than initial
 700                         * interrupt. Set pfault_wait to -1 so the initial
 701                         * interrupt doesn't put the task to sleep.
 702                         * If the task is not running, ignore the completion
 703                         * interrupt since it must be a leftover of a PFAULT
 704                         * CANCEL operation which didn't remove all pending
 705                         * completion interrupts. */
 706                        if (tsk->state == TASK_RUNNING)
 707                                tsk->thread.pfault_wait = -1;
 708                }
 709        } else {
 710                /* signal bit not set -> a real page is missing. */
 711                if (WARN_ON_ONCE(tsk != current))
 712                        goto out;
 713                if (tsk->thread.pfault_wait == 1) {
 714                        /* Already on the list with a reference: put to sleep */
 715                        goto block;
 716                } else if (tsk->thread.pfault_wait == -1) {
 717                        /* Completion interrupt was faster than the initial
 718                         * interrupt (pfault_wait == -1). Set pfault_wait
 719                         * back to zero and exit. */
 720                        tsk->thread.pfault_wait = 0;
 721                } else {
 722                        /* Initial interrupt arrived before completion
 723                         * interrupt. Let the task sleep.
 724                         * An extra task reference is needed since a different
 725                         * cpu may set the task state to TASK_RUNNING again
 726                         * before the scheduler is reached. */
 727                        get_task_struct(tsk);
 728                        tsk->thread.pfault_wait = 1;
 729                        list_add(&tsk->thread.list, &pfault_list);
 730block:
 731                        /* Since this must be a userspace fault, there
 732                         * is no kernel task state to trample. Rely on the
 733                         * return to userspace schedule() to block. */
 734                        __set_current_state(TASK_UNINTERRUPTIBLE);
 735                        set_tsk_need_resched(tsk);
 736                }
 737        }
 738out:
 739        spin_unlock(&pfault_lock);
 740        put_task_struct(tsk);
 741}
 742
 743static int pfault_cpu_dead(unsigned int cpu)
 744{
 745        struct thread_struct *thread, *next;
 746        struct task_struct *tsk;
 747
 748        spin_lock_irq(&pfault_lock);
 749        list_for_each_entry_safe(thread, next, &pfault_list, list) {
 750                thread->pfault_wait = 0;
 751                list_del(&thread->list);
 752                tsk = container_of(thread, struct task_struct, thread);
 753                wake_up_process(tsk);
 754                put_task_struct(tsk);
 755        }
 756        spin_unlock_irq(&pfault_lock);
 757        return 0;
 758}
 759
 760static int __init pfault_irq_init(void)
 761{
 762        int rc;
 763
 764        rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
 765        if (rc)
 766                goto out_extint;
 767        rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
 768        if (rc)
 769                goto out_pfault;
 770        irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 771        cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
 772                                  NULL, pfault_cpu_dead);
 773        return 0;
 774
 775out_pfault:
 776        unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
 777out_extint:
 778        pfault_disable = 1;
 779        return rc;
 780}
 781early_initcall(pfault_irq_init);
 782
 783#endif /* CONFIG_PFAULT */
 784