linux/arch/s390/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   5 *               Ulrich Weigand (uweigand@de.ibm.com)
   6 *
   7 *  Derived from "arch/i386/mm/fault.c"
   8 *    Copyright (C) 1995  Linus Torvalds
   9 */
  10
  11#include <linux/kernel_stat.h>
  12#include <linux/perf_event.h>
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/types.h>
  19#include <linux/ptrace.h>
  20#include <linux/mman.h>
  21#include <linux/mm.h>
  22#include <linux/compat.h>
  23#include <linux/smp.h>
  24#include <linux/kdebug.h>
  25#include <linux/init.h>
  26#include <linux/console.h>
  27#include <linux/module.h>
  28#include <linux/hardirq.h>
  29#include <linux/kprobes.h>
  30#include <linux/uaccess.h>
  31#include <linux/hugetlb.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/pgtable.h>
  34#include <asm/irq.h>
  35#include <asm/mmu_context.h>
  36#include <asm/facility.h>
  37#include "../kernel/entry.h"
  38
  39#ifndef CONFIG_64BIT
  40#define __FAIL_ADDR_MASK 0x7ffff000
  41#define __SUBCODE_MASK 0x0200
  42#define __PF_RES_FIELD 0ULL
  43#else /* CONFIG_64BIT */
  44#define __FAIL_ADDR_MASK -4096L
  45#define __SUBCODE_MASK 0x0600
  46#define __PF_RES_FIELD 0x8000000000000000ULL
  47#endif /* CONFIG_64BIT */
  48
  49#define VM_FAULT_BADCONTEXT     0x010000
  50#define VM_FAULT_BADMAP         0x020000
  51#define VM_FAULT_BADACCESS      0x040000
  52#define VM_FAULT_SIGNAL         0x080000
  53#define VM_FAULT_PFAULT         0x100000
  54
  55static unsigned long store_indication __read_mostly;
  56
  57#ifdef CONFIG_64BIT
  58static int __init fault_init(void)
  59{
  60        if (test_facility(75))
  61                store_indication = 0xc00;
  62        return 0;
  63}
  64early_initcall(fault_init);
  65#endif
  66
  67static inline int notify_page_fault(struct pt_regs *regs)
  68{
  69        int ret = 0;
  70
  71        /* kprobe_running() needs smp_processor_id() */
  72        if (kprobes_built_in() && !user_mode(regs)) {
  73                preempt_disable();
  74                if (kprobe_running() && kprobe_fault_handler(regs, 14))
  75                        ret = 1;
  76                preempt_enable();
  77        }
  78        return ret;
  79}
  80
  81
  82/*
  83 * Unlock any spinlocks which will prevent us from getting the
  84 * message out.
  85 */
  86void bust_spinlocks(int yes)
  87{
  88        if (yes) {
  89                oops_in_progress = 1;
  90        } else {
  91                int loglevel_save = console_loglevel;
  92                console_unblank();
  93                oops_in_progress = 0;
  94                /*
  95                 * OK, the message is on the console.  Now we call printk()
  96                 * without oops_in_progress set so that printk will give klogd
  97                 * a poke.  Hold onto your hats...
  98                 */
  99                console_loglevel = 15;
 100                printk(" ");
 101                console_loglevel = loglevel_save;
 102        }
 103}
 104
 105/*
 106 * Returns the address space associated with the fault.
 107 * Returns 0 for kernel space and 1 for user space.
 108 */
 109static inline int user_space_fault(struct pt_regs *regs)
 110{
 111        unsigned long trans_exc_code;
 112
 113        /*
 114         * The lowest two bits of the translation exception
 115         * identification indicate which paging table was used.
 116         */
 117        trans_exc_code = regs->int_parm_long & 3;
 118        if (trans_exc_code == 3) /* home space -> kernel */
 119                return 0;
 120        if (user_mode(regs))
 121                return 1;
 122        if (trans_exc_code == 2) /* secondary space -> set_fs */
 123                return current->thread.mm_segment.ar4;
 124        if (current->flags & PF_VCPU)
 125                return 1;
 126        return 0;
 127}
 128
 129static int bad_address(void *p)
 130{
 131        unsigned long dummy;
 132
 133        return probe_kernel_address((unsigned long *)p, dummy);
 134}
 135
 136#ifdef CONFIG_64BIT
 137static void dump_pagetable(unsigned long asce, unsigned long address)
 138{
 139        unsigned long *table = __va(asce & PAGE_MASK);
 140
 141        pr_alert("AS:%016lx ", asce);
 142        switch (asce & _ASCE_TYPE_MASK) {
 143        case _ASCE_TYPE_REGION1:
 144                table = table + ((address >> 53) & 0x7ff);
 145                if (bad_address(table))
 146                        goto bad;
 147                pr_cont("R1:%016lx ", *table);
 148                if (*table & _REGION_ENTRY_INVALID)
 149                        goto out;
 150                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 151                /* fallthrough */
 152        case _ASCE_TYPE_REGION2:
 153                table = table + ((address >> 42) & 0x7ff);
 154                if (bad_address(table))
 155                        goto bad;
 156                pr_cont("R2:%016lx ", *table);
 157                if (*table & _REGION_ENTRY_INVALID)
 158                        goto out;
 159                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 160                /* fallthrough */
 161        case _ASCE_TYPE_REGION3:
 162                table = table + ((address >> 31) & 0x7ff);
 163                if (bad_address(table))
 164                        goto bad;
 165                pr_cont("R3:%016lx ", *table);
 166                if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
 167                        goto out;
 168                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 169                /* fallthrough */
 170        case _ASCE_TYPE_SEGMENT:
 171                table = table + ((address >> 20) & 0x7ff);
 172                if (bad_address(table))
 173                        goto bad;
 174                pr_cont("S:%016lx ", *table);
 175                if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
 176                        goto out;
 177                table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 178        }
 179        table = table + ((address >> 12) & 0xff);
 180        if (bad_address(table))
 181                goto bad;
 182        pr_cont("P:%016lx ", *table);
 183out:
 184        pr_cont("\n");
 185        return;
 186bad:
 187        pr_cont("BAD\n");
 188}
 189
 190#else /* CONFIG_64BIT */
 191
 192static void dump_pagetable(unsigned long asce, unsigned long address)
 193{
 194        unsigned long *table = __va(asce & PAGE_MASK);
 195
 196        pr_alert("AS:%08lx ", asce);
 197        table = table + ((address >> 20) & 0x7ff);
 198        if (bad_address(table))
 199                goto bad;
 200        pr_cont("S:%08lx ", *table);
 201        if (*table & _SEGMENT_ENTRY_INVALID)
 202                goto out;
 203        table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 204        table = table + ((address >> 12) & 0xff);
 205        if (bad_address(table))
 206                goto bad;
 207        pr_cont("P:%08lx ", *table);
 208out:
 209        pr_cont("\n");
 210        return;
 211bad:
 212        pr_cont("BAD\n");
 213}
 214
 215#endif /* CONFIG_64BIT */
 216
 217static void dump_fault_info(struct pt_regs *regs)
 218{
 219        unsigned long asce;
 220
 221        pr_alert("Fault in ");
 222        switch (regs->int_parm_long & 3) {
 223        case 3:
 224                pr_cont("home space ");
 225                break;
 226        case 2:
 227                pr_cont("secondary space ");
 228                break;
 229        case 1:
 230                pr_cont("access register ");
 231                break;
 232        case 0:
 233                pr_cont("primary space ");
 234                break;
 235        }
 236        pr_cont("mode while using ");
 237        if (!user_space_fault(regs)) {
 238                asce = S390_lowcore.kernel_asce;
 239                pr_cont("kernel ");
 240        }
 241#ifdef CONFIG_PGSTE
 242        else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
 243                struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
 244                asce = gmap->asce;
 245                pr_cont("gmap ");
 246        }
 247#endif
 248        else {
 249                asce = S390_lowcore.user_asce;
 250                pr_cont("user ");
 251        }
 252        pr_cont("ASCE.\n");
 253        dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
 254}
 255
 256static inline void report_user_fault(struct pt_regs *regs, long signr)
 257{
 258        if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
 259                return;
 260        if (!unhandled_signal(current, signr))
 261                return;
 262        if (!printk_ratelimit())
 263                return;
 264        printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
 265               regs->int_code & 0xffff, regs->int_code >> 17);
 266        print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
 267        printk(KERN_CONT "\n");
 268        printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
 269               regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 270        dump_fault_info(regs);
 271        show_regs(regs);
 272}
 273
 274/*
 275 * Send SIGSEGV to task.  This is an external routine
 276 * to keep the stack usage of do_page_fault small.
 277 */
 278static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 279{
 280        struct siginfo si;
 281
 282        report_user_fault(regs, SIGSEGV);
 283        si.si_signo = SIGSEGV;
 284        si.si_code = si_code;
 285        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 286        force_sig_info(SIGSEGV, &si, current);
 287}
 288
 289static noinline void do_no_context(struct pt_regs *regs)
 290{
 291        const struct exception_table_entry *fixup;
 292        unsigned long address;
 293
 294        /* Are we prepared to handle this kernel fault?  */
 295        fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
 296        if (fixup) {
 297                regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
 298                return;
 299        }
 300
 301        /*
 302         * Oops. The kernel tried to access some bad page. We'll have to
 303         * terminate things with extreme prejudice.
 304         */
 305        address = regs->int_parm_long & __FAIL_ADDR_MASK;
 306        if (!user_space_fault(regs))
 307                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 308                       " in virtual kernel address space\n");
 309        else
 310                printk(KERN_ALERT "Unable to handle kernel paging request"
 311                       " in virtual user address space\n");
 312        printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
 313               regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 314        dump_fault_info(regs);
 315        die(regs, "Oops");
 316        do_exit(SIGKILL);
 317}
 318
 319static noinline void do_low_address(struct pt_regs *regs)
 320{
 321        /* Low-address protection hit in kernel mode means
 322           NULL pointer write access in kernel mode.  */
 323        if (regs->psw.mask & PSW_MASK_PSTATE) {
 324                /* Low-address protection hit in user mode 'cannot happen'. */
 325                die (regs, "Low-address protection");
 326                do_exit(SIGKILL);
 327        }
 328
 329        do_no_context(regs);
 330}
 331
 332static noinline void do_sigbus(struct pt_regs *regs)
 333{
 334        struct task_struct *tsk = current;
 335        struct siginfo si;
 336
 337        /*
 338         * Send a sigbus, regardless of whether we were in kernel
 339         * or user mode.
 340         */
 341        si.si_signo = SIGBUS;
 342        si.si_errno = 0;
 343        si.si_code = BUS_ADRERR;
 344        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 345        force_sig_info(SIGBUS, &si, tsk);
 346}
 347
 348static noinline void do_fault_error(struct pt_regs *regs, int fault)
 349{
 350        int si_code;
 351
 352        switch (fault) {
 353        case VM_FAULT_BADACCESS:
 354        case VM_FAULT_BADMAP:
 355                /* Bad memory access. Check if it is kernel or user space. */
 356                if (user_mode(regs)) {
 357                        /* User mode accesses just cause a SIGSEGV */
 358                        si_code = (fault == VM_FAULT_BADMAP) ?
 359                                SEGV_MAPERR : SEGV_ACCERR;
 360                        do_sigsegv(regs, si_code);
 361                        return;
 362                }
 363        case VM_FAULT_BADCONTEXT:
 364        case VM_FAULT_PFAULT:
 365                do_no_context(regs);
 366                break;
 367        case VM_FAULT_SIGNAL:
 368                if (!user_mode(regs))
 369                        do_no_context(regs);
 370                break;
 371        default: /* fault & VM_FAULT_ERROR */
 372                if (fault & VM_FAULT_OOM) {
 373                        if (!user_mode(regs))
 374                                do_no_context(regs);
 375                        else
 376                                pagefault_out_of_memory();
 377                } else if (fault & VM_FAULT_SIGSEGV) {
 378                        /* Kernel mode? Handle exceptions or die */
 379                        if (!user_mode(regs))
 380                                do_no_context(regs);
 381                        else
 382                                do_sigsegv(regs, SEGV_MAPERR);
 383                } else if (fault & VM_FAULT_SIGBUS) {
 384                        /* Kernel mode? Handle exceptions or die */
 385                        if (!user_mode(regs))
 386                                do_no_context(regs);
 387                        else
 388                                do_sigbus(regs);
 389                } else
 390                        BUG();
 391                break;
 392        }
 393}
 394
 395/*
 396 * This routine handles page faults.  It determines the address,
 397 * and the problem, and then passes it off to one of the appropriate
 398 * routines.
 399 *
 400 * interruption code (int_code):
 401 *   04       Protection           ->  Write-Protection  (suprression)
 402 *   10       Segment translation  ->  Not present       (nullification)
 403 *   11       Page translation     ->  Not present       (nullification)
 404 *   3b       Region third trans.  ->  Not present       (nullification)
 405 */
 406static inline int do_exception(struct pt_regs *regs, int access)
 407{
 408#ifdef CONFIG_PGSTE
 409        struct gmap *gmap;
 410#endif
 411        struct task_struct *tsk;
 412        struct mm_struct *mm;
 413        struct vm_area_struct *vma;
 414        unsigned long trans_exc_code;
 415        unsigned long address;
 416        unsigned int flags;
 417        int fault;
 418
 419        tsk = current;
 420        /*
 421         * The instruction that caused the program check has
 422         * been nullified. Don't signal single step via SIGTRAP.
 423         */
 424        clear_pt_regs_flag(regs, PIF_PER_TRAP);
 425
 426        if (notify_page_fault(regs))
 427                return 0;
 428
 429        mm = tsk->mm;
 430        trans_exc_code = regs->int_parm_long;
 431
 432        /*
 433         * Verify that the fault happened in user space, that
 434         * we are not in an interrupt and that there is a 
 435         * user context.
 436         */
 437        fault = VM_FAULT_BADCONTEXT;
 438        if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
 439                goto out;
 440
 441        address = trans_exc_code & __FAIL_ADDR_MASK;
 442        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 443        flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 444        if (user_mode(regs))
 445                flags |= FAULT_FLAG_USER;
 446        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
 447                flags |= FAULT_FLAG_WRITE;
 448        down_read(&mm->mmap_sem);
 449
 450#ifdef CONFIG_PGSTE
 451        gmap = (current->flags & PF_VCPU) ?
 452                (struct gmap *) S390_lowcore.gmap : NULL;
 453        if (gmap) {
 454                current->thread.gmap_addr = address;
 455                address = __gmap_translate(gmap, address);
 456                if (address == -EFAULT) {
 457                        fault = VM_FAULT_BADMAP;
 458                        goto out_up;
 459                }
 460                if (gmap->pfault_enabled)
 461                        flags |= FAULT_FLAG_RETRY_NOWAIT;
 462        }
 463#endif
 464
 465retry:
 466        fault = VM_FAULT_BADMAP;
 467        vma = find_vma(mm, address);
 468        if (!vma)
 469                goto out_up;
 470
 471        if (unlikely(vma->vm_start > address)) {
 472                if (!(vma->vm_flags & VM_GROWSDOWN))
 473                        goto out_up;
 474                if (expand_stack(vma, address))
 475                        goto out_up;
 476        }
 477
 478        /*
 479         * Ok, we have a good vm_area for this memory access, so
 480         * we can handle it..
 481         */
 482        fault = VM_FAULT_BADACCESS;
 483        if (unlikely(!(vma->vm_flags & access)))
 484                goto out_up;
 485
 486        if (is_vm_hugetlb_page(vma))
 487                address &= HPAGE_MASK;
 488        /*
 489         * If for any reason at all we couldn't handle the fault,
 490         * make sure we exit gracefully rather than endlessly redo
 491         * the fault.
 492         */
 493        fault = handle_mm_fault(mm, vma, address, flags);
 494        /* No reason to continue if interrupted by SIGKILL. */
 495        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
 496                fault = VM_FAULT_SIGNAL;
 497                goto out;
 498        }
 499        if (unlikely(fault & VM_FAULT_ERROR))
 500                goto out_up;
 501
 502        /*
 503         * Major/minor page fault accounting is only done on the
 504         * initial attempt. If we go through a retry, it is extremely
 505         * likely that the page will be found in page cache at that point.
 506         */
 507        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 508                if (fault & VM_FAULT_MAJOR) {
 509                        tsk->maj_flt++;
 510                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 511                                      regs, address);
 512                } else {
 513                        tsk->min_flt++;
 514                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 515                                      regs, address);
 516                }
 517                if (fault & VM_FAULT_RETRY) {
 518#ifdef CONFIG_PGSTE
 519                        if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
 520                                /* FAULT_FLAG_RETRY_NOWAIT has been set,
 521                                 * mmap_sem has not been released */
 522                                current->thread.gmap_pfault = 1;
 523                                fault = VM_FAULT_PFAULT;
 524                                goto out_up;
 525                        }
 526#endif
 527                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 528                         * of starvation. */
 529                        flags &= ~(FAULT_FLAG_ALLOW_RETRY |
 530                                   FAULT_FLAG_RETRY_NOWAIT);
 531                        flags |= FAULT_FLAG_TRIED;
 532                        down_read(&mm->mmap_sem);
 533                        goto retry;
 534                }
 535        }
 536#ifdef CONFIG_PGSTE
 537        if (gmap) {
 538                address =  __gmap_link(gmap, current->thread.gmap_addr,
 539                                       address);
 540                if (address == -EFAULT) {
 541                        fault = VM_FAULT_BADMAP;
 542                        goto out_up;
 543                }
 544                if (address == -ENOMEM) {
 545                        fault = VM_FAULT_OOM;
 546                        goto out_up;
 547                }
 548        }
 549#endif
 550        fault = 0;
 551out_up:
 552        up_read(&mm->mmap_sem);
 553out:
 554        return fault;
 555}
 556
 557void do_protection_exception(struct pt_regs *regs)
 558{
 559        unsigned long trans_exc_code;
 560        int fault;
 561
 562        trans_exc_code = regs->int_parm_long;
 563        /*
 564         * Protection exceptions are suppressing, decrement psw address.
 565         * The exception to this rule are aborted transactions, for these
 566         * the PSW already points to the correct location.
 567         */
 568        if (!(regs->int_code & 0x200))
 569                regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
 570        /*
 571         * Check for low-address protection.  This needs to be treated
 572         * as a special case because the translation exception code
 573         * field is not guaranteed to contain valid data in this case.
 574         */
 575        if (unlikely(!(trans_exc_code & 4))) {
 576                do_low_address(regs);
 577                return;
 578        }
 579        fault = do_exception(regs, VM_WRITE);
 580        if (unlikely(fault))
 581                do_fault_error(regs, fault);
 582}
 583NOKPROBE_SYMBOL(do_protection_exception);
 584
 585void do_dat_exception(struct pt_regs *regs)
 586{
 587        int access, fault;
 588
 589        access = VM_READ | VM_EXEC | VM_WRITE;
 590        fault = do_exception(regs, access);
 591        if (unlikely(fault))
 592                do_fault_error(regs, fault);
 593}
 594NOKPROBE_SYMBOL(do_dat_exception);
 595
 596#ifdef CONFIG_PFAULT 
 597/*
 598 * 'pfault' pseudo page faults routines.
 599 */
 600static int pfault_disable;
 601
 602static int __init nopfault(char *str)
 603{
 604        pfault_disable = 1;
 605        return 1;
 606}
 607
 608__setup("nopfault", nopfault);
 609
 610struct pfault_refbk {
 611        u16 refdiagc;
 612        u16 reffcode;
 613        u16 refdwlen;
 614        u16 refversn;
 615        u64 refgaddr;
 616        u64 refselmk;
 617        u64 refcmpmk;
 618        u64 reserved;
 619} __attribute__ ((packed, aligned(8)));
 620
 621int pfault_init(void)
 622{
 623        struct pfault_refbk refbk = {
 624                .refdiagc = 0x258,
 625                .reffcode = 0,
 626                .refdwlen = 5,
 627                .refversn = 2,
 628                .refgaddr = __LC_CURRENT_PID,
 629                .refselmk = 1ULL << 48,
 630                .refcmpmk = 1ULL << 48,
 631                .reserved = __PF_RES_FIELD };
 632        int rc;
 633
 634        if (pfault_disable)
 635                return -1;
 636        asm volatile(
 637                "       diag    %1,%0,0x258\n"
 638                "0:     j       2f\n"
 639                "1:     la      %0,8\n"
 640                "2:\n"
 641                EX_TABLE(0b,1b)
 642                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
 643        return rc;
 644}
 645
 646void pfault_fini(void)
 647{
 648        struct pfault_refbk refbk = {
 649                .refdiagc = 0x258,
 650                .reffcode = 1,
 651                .refdwlen = 5,
 652                .refversn = 2,
 653        };
 654
 655        if (pfault_disable)
 656                return;
 657        asm volatile(
 658                "       diag    %0,0,0x258\n"
 659                "0:\n"
 660                EX_TABLE(0b,0b)
 661                : : "a" (&refbk), "m" (refbk) : "cc");
 662}
 663
 664static DEFINE_SPINLOCK(pfault_lock);
 665static LIST_HEAD(pfault_list);
 666
 667static void pfault_interrupt(struct ext_code ext_code,
 668                             unsigned int param32, unsigned long param64)
 669{
 670        struct task_struct *tsk;
 671        __u16 subcode;
 672        pid_t pid;
 673
 674        /*
 675         * Get the external interruption subcode & pfault
 676         * initial/completion signal bit. VM stores this 
 677         * in the 'cpu address' field associated with the
 678         * external interrupt. 
 679         */
 680        subcode = ext_code.subcode;
 681        if ((subcode & 0xff00) != __SUBCODE_MASK)
 682                return;
 683        inc_irq_stat(IRQEXT_PFL);
 684        /* Get the token (= pid of the affected task). */
 685        pid = sizeof(void *) == 4 ? param32 : param64;
 686        rcu_read_lock();
 687        tsk = find_task_by_pid_ns(pid, &init_pid_ns);
 688        if (tsk)
 689                get_task_struct(tsk);
 690        rcu_read_unlock();
 691        if (!tsk)
 692                return;
 693        spin_lock(&pfault_lock);
 694        if (subcode & 0x0080) {
 695                /* signal bit is set -> a page has been swapped in by VM */
 696                if (tsk->thread.pfault_wait == 1) {
 697                        /* Initial interrupt was faster than the completion
 698                         * interrupt. pfault_wait is valid. Set pfault_wait
 699                         * back to zero and wake up the process. This can
 700                         * safely be done because the task is still sleeping
 701                         * and can't produce new pfaults. */
 702                        tsk->thread.pfault_wait = 0;
 703                        list_del(&tsk->thread.list);
 704                        wake_up_process(tsk);
 705                        put_task_struct(tsk);
 706                } else {
 707                        /* Completion interrupt was faster than initial
 708                         * interrupt. Set pfault_wait to -1 so the initial
 709                         * interrupt doesn't put the task to sleep.
 710                         * If the task is not running, ignore the completion
 711                         * interrupt since it must be a leftover of a PFAULT
 712                         * CANCEL operation which didn't remove all pending
 713                         * completion interrupts. */
 714                        if (tsk->state == TASK_RUNNING)
 715                                tsk->thread.pfault_wait = -1;
 716                }
 717        } else {
 718                /* signal bit not set -> a real page is missing. */
 719                if (WARN_ON_ONCE(tsk != current))
 720                        goto out;
 721                if (tsk->thread.pfault_wait == 1) {
 722                        /* Already on the list with a reference: put to sleep */
 723                        __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 724                        set_tsk_need_resched(tsk);
 725                } else if (tsk->thread.pfault_wait == -1) {
 726                        /* Completion interrupt was faster than the initial
 727                         * interrupt (pfault_wait == -1). Set pfault_wait
 728                         * back to zero and exit. */
 729                        tsk->thread.pfault_wait = 0;
 730                } else {
 731                        /* Initial interrupt arrived before completion
 732                         * interrupt. Let the task sleep.
 733                         * An extra task reference is needed since a different
 734                         * cpu may set the task state to TASK_RUNNING again
 735                         * before the scheduler is reached. */
 736                        get_task_struct(tsk);
 737                        tsk->thread.pfault_wait = 1;
 738                        list_add(&tsk->thread.list, &pfault_list);
 739                        __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 740                        set_tsk_need_resched(tsk);
 741                }
 742        }
 743out:
 744        spin_unlock(&pfault_lock);
 745        put_task_struct(tsk);
 746}
 747
 748static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
 749                             void *hcpu)
 750{
 751        struct thread_struct *thread, *next;
 752        struct task_struct *tsk;
 753
 754        switch (action & ~CPU_TASKS_FROZEN) {
 755        case CPU_DEAD:
 756                spin_lock_irq(&pfault_lock);
 757                list_for_each_entry_safe(thread, next, &pfault_list, list) {
 758                        thread->pfault_wait = 0;
 759                        list_del(&thread->list);
 760                        tsk = container_of(thread, struct task_struct, thread);
 761                        wake_up_process(tsk);
 762                        put_task_struct(tsk);
 763                }
 764                spin_unlock_irq(&pfault_lock);
 765                break;
 766        default:
 767                break;
 768        }
 769        return NOTIFY_OK;
 770}
 771
 772static int __init pfault_irq_init(void)
 773{
 774        int rc;
 775
 776        rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
 777        if (rc)
 778                goto out_extint;
 779        rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
 780        if (rc)
 781                goto out_pfault;
 782        irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 783        hotcpu_notifier(pfault_cpu_notify, 0);
 784        return 0;
 785
 786out_pfault:
 787        unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
 788out_extint:
 789        pfault_disable = 1;
 790        return rc;
 791}
 792early_initcall(pfault_irq_init);
 793
 794#endif /* CONFIG_PFAULT */
 795