linux/arch/s390/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   5 *               Ulrich Weigand (uweigand@de.ibm.com)
   6 *
   7 *  Derived from "arch/i386/mm/fault.c"
   8 *    Copyright (C) 1995  Linus Torvalds
   9 */
  10
  11#include <linux/kernel_stat.h>
  12#include <linux/perf_event.h>
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/types.h>
  19#include <linux/ptrace.h>
  20#include <linux/mman.h>
  21#include <linux/mm.h>
  22#include <linux/compat.h>
  23#include <linux/smp.h>
  24#include <linux/kdebug.h>
  25#include <linux/init.h>
  26#include <linux/console.h>
  27#include <linux/module.h>
  28#include <linux/hardirq.h>
  29#include <linux/kprobes.h>
  30#include <linux/uaccess.h>
  31#include <linux/hugetlb.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/diag.h>
  34#include <asm/pgtable.h>
  35#include <asm/gmap.h>
  36#include <asm/irq.h>
  37#include <asm/mmu_context.h>
  38#include <asm/facility.h>
  39#include "../kernel/entry.h"
  40
  41#define __FAIL_ADDR_MASK -4096L
  42#define __SUBCODE_MASK 0x0600
  43#define __PF_RES_FIELD 0x8000000000000000ULL
  44
  45#define VM_FAULT_BADCONTEXT     0x010000
  46#define VM_FAULT_BADMAP         0x020000
  47#define VM_FAULT_BADACCESS      0x040000
  48#define VM_FAULT_SIGNAL         0x080000
  49#define VM_FAULT_PFAULT         0x100000
  50
  51static unsigned long store_indication __read_mostly;
  52
  53static int __init fault_init(void)
  54{
  55        if (test_facility(75))
  56                store_indication = 0xc00;
  57        return 0;
  58}
  59early_initcall(fault_init);
  60
  61static inline int notify_page_fault(struct pt_regs *regs)
  62{
  63        int ret = 0;
  64
  65        /* kprobe_running() needs smp_processor_id() */
  66        if (kprobes_built_in() && !user_mode(regs)) {
  67                preempt_disable();
  68                if (kprobe_running() && kprobe_fault_handler(regs, 14))
  69                        ret = 1;
  70                preempt_enable();
  71        }
  72        return ret;
  73}
  74
  75
  76/*
  77 * Unlock any spinlocks which will prevent us from getting the
  78 * message out.
  79 */
  80void bust_spinlocks(int yes)
  81{
  82        if (yes) {
  83                oops_in_progress = 1;
  84        } else {
  85                int loglevel_save = console_loglevel;
  86                console_unblank();
  87                oops_in_progress = 0;
  88                /*
  89                 * OK, the message is on the console.  Now we call printk()
  90                 * without oops_in_progress set so that printk will give klogd
  91                 * a poke.  Hold onto your hats...
  92                 */
  93                console_loglevel = 15;
  94                printk(" ");
  95                console_loglevel = loglevel_save;
  96        }
  97}
  98
  99/*
 100 * Returns the address space associated with the fault.
 101 * Returns 0 for kernel space and 1 for user space.
 102 */
 103static inline int user_space_fault(struct pt_regs *regs)
 104{
 105        unsigned long trans_exc_code;
 106
 107        /*
 108         * The lowest two bits of the translation exception
 109         * identification indicate which paging table was used.
 110         */
 111        trans_exc_code = regs->int_parm_long & 3;
 112        if (trans_exc_code == 3) /* home space -> kernel */
 113                return 0;
 114        if (user_mode(regs))
 115                return 1;
 116        if (trans_exc_code == 2) /* secondary space -> set_fs */
 117                return current->thread.mm_segment.ar4;
 118        if (current->flags & PF_VCPU)
 119                return 1;
 120        return 0;
 121}
 122
 123static int bad_address(void *p)
 124{
 125        unsigned long dummy;
 126
 127        return probe_kernel_address((unsigned long *)p, dummy);
 128}
 129
 130static void dump_pagetable(unsigned long asce, unsigned long address)
 131{
 132        unsigned long *table = __va(asce & PAGE_MASK);
 133
 134        pr_alert("AS:%016lx ", asce);
 135        switch (asce & _ASCE_TYPE_MASK) {
 136        case _ASCE_TYPE_REGION1:
 137                table = table + ((address >> 53) & 0x7ff);
 138                if (bad_address(table))
 139                        goto bad;
 140                pr_cont("R1:%016lx ", *table);
 141                if (*table & _REGION_ENTRY_INVALID)
 142                        goto out;
 143                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 144                /* fallthrough */
 145        case _ASCE_TYPE_REGION2:
 146                table = table + ((address >> 42) & 0x7ff);
 147                if (bad_address(table))
 148                        goto bad;
 149                pr_cont("R2:%016lx ", *table);
 150                if (*table & _REGION_ENTRY_INVALID)
 151                        goto out;
 152                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 153                /* fallthrough */
 154        case _ASCE_TYPE_REGION3:
 155                table = table + ((address >> 31) & 0x7ff);
 156                if (bad_address(table))
 157                        goto bad;
 158                pr_cont("R3:%016lx ", *table);
 159                if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
 160                        goto out;
 161                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 162                /* fallthrough */
 163        case _ASCE_TYPE_SEGMENT:
 164                table = table + ((address >> 20) & 0x7ff);
 165                if (bad_address(table))
 166                        goto bad;
 167                pr_cont("S:%016lx ", *table);
 168                if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
 169                        goto out;
 170                table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 171        }
 172        table = table + ((address >> 12) & 0xff);
 173        if (bad_address(table))
 174                goto bad;
 175        pr_cont("P:%016lx ", *table);
 176out:
 177        pr_cont("\n");
 178        return;
 179bad:
 180        pr_cont("BAD\n");
 181}
 182
 183static void dump_fault_info(struct pt_regs *regs)
 184{
 185        unsigned long asce;
 186
 187        pr_alert("Failing address: %016lx TEID: %016lx\n",
 188                 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 189        pr_alert("Fault in ");
 190        switch (regs->int_parm_long & 3) {
 191        case 3:
 192                pr_cont("home space ");
 193                break;
 194        case 2:
 195                pr_cont("secondary space ");
 196                break;
 197        case 1:
 198                pr_cont("access register ");
 199                break;
 200        case 0:
 201                pr_cont("primary space ");
 202                break;
 203        }
 204        pr_cont("mode while using ");
 205        if (!user_space_fault(regs)) {
 206                asce = S390_lowcore.kernel_asce;
 207                pr_cont("kernel ");
 208        }
 209#ifdef CONFIG_PGSTE
 210        else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
 211                struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
 212                asce = gmap->asce;
 213                pr_cont("gmap ");
 214        }
 215#endif
 216        else {
 217                asce = S390_lowcore.user_asce;
 218                pr_cont("user ");
 219        }
 220        pr_cont("ASCE.\n");
 221        dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
 222}
 223
 224int show_unhandled_signals = 1;
 225
 226void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
 227{
 228        if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
 229                return;
 230        if (!unhandled_signal(current, signr))
 231                return;
 232        if (!printk_ratelimit())
 233                return;
 234        printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
 235               regs->int_code & 0xffff, regs->int_code >> 17);
 236        print_vma_addr(KERN_CONT "in ", regs->psw.addr);
 237        printk(KERN_CONT "\n");
 238        if (is_mm_fault)
 239                dump_fault_info(regs);
 240        show_regs(regs);
 241}
 242
 243/*
 244 * Send SIGSEGV to task.  This is an external routine
 245 * to keep the stack usage of do_page_fault small.
 246 */
 247static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 248{
 249        struct siginfo si;
 250
 251        report_user_fault(regs, SIGSEGV, 1);
 252        si.si_signo = SIGSEGV;
 253        si.si_code = si_code;
 254        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 255        force_sig_info(SIGSEGV, &si, current);
 256}
 257
 258static noinline void do_no_context(struct pt_regs *regs)
 259{
 260        const struct exception_table_entry *fixup;
 261
 262        /* Are we prepared to handle this kernel fault?  */
 263        fixup = search_exception_tables(regs->psw.addr);
 264        if (fixup) {
 265                regs->psw.addr = extable_fixup(fixup);
 266                return;
 267        }
 268
 269        /*
 270         * Oops. The kernel tried to access some bad page. We'll have to
 271         * terminate things with extreme prejudice.
 272         */
 273        if (!user_space_fault(regs))
 274                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 275                       " in virtual kernel address space\n");
 276        else
 277                printk(KERN_ALERT "Unable to handle kernel paging request"
 278                       " in virtual user address space\n");
 279        dump_fault_info(regs);
 280        die(regs, "Oops");
 281        do_exit(SIGKILL);
 282}
 283
 284static noinline void do_low_address(struct pt_regs *regs)
 285{
 286        /* Low-address protection hit in kernel mode means
 287           NULL pointer write access in kernel mode.  */
 288        if (regs->psw.mask & PSW_MASK_PSTATE) {
 289                /* Low-address protection hit in user mode 'cannot happen'. */
 290                die (regs, "Low-address protection");
 291                do_exit(SIGKILL);
 292        }
 293
 294        do_no_context(regs);
 295}
 296
 297static noinline void do_sigbus(struct pt_regs *regs)
 298{
 299        struct task_struct *tsk = current;
 300        struct siginfo si;
 301
 302        /*
 303         * Send a sigbus, regardless of whether we were in kernel
 304         * or user mode.
 305         */
 306        si.si_signo = SIGBUS;
 307        si.si_errno = 0;
 308        si.si_code = BUS_ADRERR;
 309        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 310        force_sig_info(SIGBUS, &si, tsk);
 311}
 312
 313static noinline void do_fault_error(struct pt_regs *regs, int fault)
 314{
 315        int si_code;
 316
 317        switch (fault) {
 318        case VM_FAULT_BADACCESS:
 319        case VM_FAULT_BADMAP:
 320                /* Bad memory access. Check if it is kernel or user space. */
 321                if (user_mode(regs)) {
 322                        /* User mode accesses just cause a SIGSEGV */
 323                        si_code = (fault == VM_FAULT_BADMAP) ?
 324                                SEGV_MAPERR : SEGV_ACCERR;
 325                        do_sigsegv(regs, si_code);
 326                        return;
 327                }
 328        case VM_FAULT_BADCONTEXT:
 329        case VM_FAULT_PFAULT:
 330                do_no_context(regs);
 331                break;
 332        case VM_FAULT_SIGNAL:
 333                if (!user_mode(regs))
 334                        do_no_context(regs);
 335                break;
 336        default: /* fault & VM_FAULT_ERROR */
 337                if (fault & VM_FAULT_OOM) {
 338                        if (!user_mode(regs))
 339                                do_no_context(regs);
 340                        else
 341                                pagefault_out_of_memory();
 342                } else if (fault & VM_FAULT_SIGSEGV) {
 343                        /* Kernel mode? Handle exceptions or die */
 344                        if (!user_mode(regs))
 345                                do_no_context(regs);
 346                        else
 347                                do_sigsegv(regs, SEGV_MAPERR);
 348                } else if (fault & VM_FAULT_SIGBUS) {
 349                        /* Kernel mode? Handle exceptions or die */
 350                        if (!user_mode(regs))
 351                                do_no_context(regs);
 352                        else
 353                                do_sigbus(regs);
 354                } else
 355                        BUG();
 356                break;
 357        }
 358}
 359
 360/*
 361 * This routine handles page faults.  It determines the address,
 362 * and the problem, and then passes it off to one of the appropriate
 363 * routines.
 364 *
 365 * interruption code (int_code):
 366 *   04       Protection           ->  Write-Protection  (suprression)
 367 *   10       Segment translation  ->  Not present       (nullification)
 368 *   11       Page translation     ->  Not present       (nullification)
 369 *   3b       Region third trans.  ->  Not present       (nullification)
 370 */
 371static inline int do_exception(struct pt_regs *regs, int access)
 372{
 373#ifdef CONFIG_PGSTE
 374        struct gmap *gmap;
 375#endif
 376        struct task_struct *tsk;
 377        struct mm_struct *mm;
 378        struct vm_area_struct *vma;
 379        unsigned long trans_exc_code;
 380        unsigned long address;
 381        unsigned int flags;
 382        int fault;
 383
 384        tsk = current;
 385        /*
 386         * The instruction that caused the program check has
 387         * been nullified. Don't signal single step via SIGTRAP.
 388         */
 389        clear_pt_regs_flag(regs, PIF_PER_TRAP);
 390
 391        if (notify_page_fault(regs))
 392                return 0;
 393
 394        mm = tsk->mm;
 395        trans_exc_code = regs->int_parm_long;
 396
 397        /*
 398         * Verify that the fault happened in user space, that
 399         * we are not in an interrupt and that there is a 
 400         * user context.
 401         */
 402        fault = VM_FAULT_BADCONTEXT;
 403        if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
 404                goto out;
 405
 406        address = trans_exc_code & __FAIL_ADDR_MASK;
 407        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 408        flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 409        if (user_mode(regs))
 410                flags |= FAULT_FLAG_USER;
 411        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
 412                flags |= FAULT_FLAG_WRITE;
 413        down_read(&mm->mmap_sem);
 414
 415#ifdef CONFIG_PGSTE
 416        gmap = (current->flags & PF_VCPU) ?
 417                (struct gmap *) S390_lowcore.gmap : NULL;
 418        if (gmap) {
 419                current->thread.gmap_addr = address;
 420                address = __gmap_translate(gmap, address);
 421                if (address == -EFAULT) {
 422                        fault = VM_FAULT_BADMAP;
 423                        goto out_up;
 424                }
 425                if (gmap->pfault_enabled)
 426                        flags |= FAULT_FLAG_RETRY_NOWAIT;
 427        }
 428#endif
 429
 430retry:
 431        fault = VM_FAULT_BADMAP;
 432        vma = find_vma(mm, address);
 433        if (!vma)
 434                goto out_up;
 435
 436        if (unlikely(vma->vm_start > address)) {
 437                if (!(vma->vm_flags & VM_GROWSDOWN))
 438                        goto out_up;
 439                if (expand_stack(vma, address))
 440                        goto out_up;
 441        }
 442
 443        /*
 444         * Ok, we have a good vm_area for this memory access, so
 445         * we can handle it..
 446         */
 447        fault = VM_FAULT_BADACCESS;
 448        if (unlikely(!(vma->vm_flags & access)))
 449                goto out_up;
 450
 451        if (is_vm_hugetlb_page(vma))
 452                address &= HPAGE_MASK;
 453        /*
 454         * If for any reason at all we couldn't handle the fault,
 455         * make sure we exit gracefully rather than endlessly redo
 456         * the fault.
 457         */
 458        fault = handle_mm_fault(mm, vma, address, flags);
 459        /* No reason to continue if interrupted by SIGKILL. */
 460        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
 461                fault = VM_FAULT_SIGNAL;
 462                goto out;
 463        }
 464        if (unlikely(fault & VM_FAULT_ERROR))
 465                goto out_up;
 466
 467        /*
 468         * Major/minor page fault accounting is only done on the
 469         * initial attempt. If we go through a retry, it is extremely
 470         * likely that the page will be found in page cache at that point.
 471         */
 472        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 473                if (fault & VM_FAULT_MAJOR) {
 474                        tsk->maj_flt++;
 475                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 476                                      regs, address);
 477                } else {
 478                        tsk->min_flt++;
 479                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 480                                      regs, address);
 481                }
 482                if (fault & VM_FAULT_RETRY) {
 483#ifdef CONFIG_PGSTE
 484                        if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
 485                                /* FAULT_FLAG_RETRY_NOWAIT has been set,
 486                                 * mmap_sem has not been released */
 487                                current->thread.gmap_pfault = 1;
 488                                fault = VM_FAULT_PFAULT;
 489                                goto out_up;
 490                        }
 491#endif
 492                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 493                         * of starvation. */
 494                        flags &= ~(FAULT_FLAG_ALLOW_RETRY |
 495                                   FAULT_FLAG_RETRY_NOWAIT);
 496                        flags |= FAULT_FLAG_TRIED;
 497                        down_read(&mm->mmap_sem);
 498                        goto retry;
 499                }
 500        }
 501#ifdef CONFIG_PGSTE
 502        if (gmap) {
 503                address =  __gmap_link(gmap, current->thread.gmap_addr,
 504                                       address);
 505                if (address == -EFAULT) {
 506                        fault = VM_FAULT_BADMAP;
 507                        goto out_up;
 508                }
 509                if (address == -ENOMEM) {
 510                        fault = VM_FAULT_OOM;
 511                        goto out_up;
 512                }
 513        }
 514#endif
 515        fault = 0;
 516out_up:
 517        up_read(&mm->mmap_sem);
 518out:
 519        return fault;
 520}
 521
 522void do_protection_exception(struct pt_regs *regs)
 523{
 524        unsigned long trans_exc_code;
 525        int fault;
 526
 527        trans_exc_code = regs->int_parm_long;
 528        /*
 529         * Protection exceptions are suppressing, decrement psw address.
 530         * The exception to this rule are aborted transactions, for these
 531         * the PSW already points to the correct location.
 532         */
 533        if (!(regs->int_code & 0x200))
 534                regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
 535        /*
 536         * Check for low-address protection.  This needs to be treated
 537         * as a special case because the translation exception code
 538         * field is not guaranteed to contain valid data in this case.
 539         */
 540        if (unlikely(!(trans_exc_code & 4))) {
 541                do_low_address(regs);
 542                return;
 543        }
 544        fault = do_exception(regs, VM_WRITE);
 545        if (unlikely(fault))
 546                do_fault_error(regs, fault);
 547}
 548NOKPROBE_SYMBOL(do_protection_exception);
 549
 550void do_dat_exception(struct pt_regs *regs)
 551{
 552        int access, fault;
 553
 554        access = VM_READ | VM_EXEC | VM_WRITE;
 555        fault = do_exception(regs, access);
 556        if (unlikely(fault))
 557                do_fault_error(regs, fault);
 558}
 559NOKPROBE_SYMBOL(do_dat_exception);
 560
 561#ifdef CONFIG_PFAULT 
 562/*
 563 * 'pfault' pseudo page faults routines.
 564 */
 565static int pfault_disable;
 566
 567static int __init nopfault(char *str)
 568{
 569        pfault_disable = 1;
 570        return 1;
 571}
 572
 573__setup("nopfault", nopfault);
 574
 575struct pfault_refbk {
 576        u16 refdiagc;
 577        u16 reffcode;
 578        u16 refdwlen;
 579        u16 refversn;
 580        u64 refgaddr;
 581        u64 refselmk;
 582        u64 refcmpmk;
 583        u64 reserved;
 584} __attribute__ ((packed, aligned(8)));
 585
 586int pfault_init(void)
 587{
 588        struct pfault_refbk refbk = {
 589                .refdiagc = 0x258,
 590                .reffcode = 0,
 591                .refdwlen = 5,
 592                .refversn = 2,
 593                .refgaddr = __LC_LPP,
 594                .refselmk = 1ULL << 48,
 595                .refcmpmk = 1ULL << 48,
 596                .reserved = __PF_RES_FIELD };
 597        int rc;
 598
 599        if (pfault_disable)
 600                return -1;
 601        diag_stat_inc(DIAG_STAT_X258);
 602        asm volatile(
 603                "       diag    %1,%0,0x258\n"
 604                "0:     j       2f\n"
 605                "1:     la      %0,8\n"
 606                "2:\n"
 607                EX_TABLE(0b,1b)
 608                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
 609        return rc;
 610}
 611
 612void pfault_fini(void)
 613{
 614        struct pfault_refbk refbk = {
 615                .refdiagc = 0x258,
 616                .reffcode = 1,
 617                .refdwlen = 5,
 618                .refversn = 2,
 619        };
 620
 621        if (pfault_disable)
 622                return;
 623        diag_stat_inc(DIAG_STAT_X258);
 624        asm volatile(
 625                "       diag    %0,0,0x258\n"
 626                "0:\n"
 627                EX_TABLE(0b,0b)
 628                : : "a" (&refbk), "m" (refbk) : "cc");
 629}
 630
 631static DEFINE_SPINLOCK(pfault_lock);
 632static LIST_HEAD(pfault_list);
 633
 634static void pfault_interrupt(struct ext_code ext_code,
 635                             unsigned int param32, unsigned long param64)
 636{
 637        struct task_struct *tsk;
 638        __u16 subcode;
 639        pid_t pid;
 640
 641        /*
 642         * Get the external interruption subcode & pfault
 643         * initial/completion signal bit. VM stores this 
 644         * in the 'cpu address' field associated with the
 645         * external interrupt. 
 646         */
 647        subcode = ext_code.subcode;
 648        if ((subcode & 0xff00) != __SUBCODE_MASK)
 649                return;
 650        inc_irq_stat(IRQEXT_PFL);
 651        /* Get the token (= pid of the affected task). */
 652        pid = param64 & LPP_PFAULT_PID_MASK;
 653        rcu_read_lock();
 654        tsk = find_task_by_pid_ns(pid, &init_pid_ns);
 655        if (tsk)
 656                get_task_struct(tsk);
 657        rcu_read_unlock();
 658        if (!tsk)
 659                return;
 660        spin_lock(&pfault_lock);
 661        if (subcode & 0x0080) {
 662                /* signal bit is set -> a page has been swapped in by VM */
 663                if (tsk->thread.pfault_wait == 1) {
 664                        /* Initial interrupt was faster than the completion
 665                         * interrupt. pfault_wait is valid. Set pfault_wait
 666                         * back to zero and wake up the process. This can
 667                         * safely be done because the task is still sleeping
 668                         * and can't produce new pfaults. */
 669                        tsk->thread.pfault_wait = 0;
 670                        list_del(&tsk->thread.list);
 671                        wake_up_process(tsk);
 672                        put_task_struct(tsk);
 673                } else {
 674                        /* Completion interrupt was faster than initial
 675                         * interrupt. Set pfault_wait to -1 so the initial
 676                         * interrupt doesn't put the task to sleep.
 677                         * If the task is not running, ignore the completion
 678                         * interrupt since it must be a leftover of a PFAULT
 679                         * CANCEL operation which didn't remove all pending
 680                         * completion interrupts. */
 681                        if (tsk->state == TASK_RUNNING)
 682                                tsk->thread.pfault_wait = -1;
 683                }
 684        } else {
 685                /* signal bit not set -> a real page is missing. */
 686                if (WARN_ON_ONCE(tsk != current))
 687                        goto out;
 688                if (tsk->thread.pfault_wait == 1) {
 689                        /* Already on the list with a reference: put to sleep */
 690                        __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 691                        set_tsk_need_resched(tsk);
 692                } else if (tsk->thread.pfault_wait == -1) {
 693                        /* Completion interrupt was faster than the initial
 694                         * interrupt (pfault_wait == -1). Set pfault_wait
 695                         * back to zero and exit. */
 696                        tsk->thread.pfault_wait = 0;
 697                } else {
 698                        /* Initial interrupt arrived before completion
 699                         * interrupt. Let the task sleep.
 700                         * An extra task reference is needed since a different
 701                         * cpu may set the task state to TASK_RUNNING again
 702                         * before the scheduler is reached. */
 703                        get_task_struct(tsk);
 704                        tsk->thread.pfault_wait = 1;
 705                        list_add(&tsk->thread.list, &pfault_list);
 706                        __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 707                        set_tsk_need_resched(tsk);
 708                }
 709        }
 710out:
 711        spin_unlock(&pfault_lock);
 712        put_task_struct(tsk);
 713}
 714
 715static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
 716                             void *hcpu)
 717{
 718        struct thread_struct *thread, *next;
 719        struct task_struct *tsk;
 720
 721        switch (action & ~CPU_TASKS_FROZEN) {
 722        case CPU_DEAD:
 723                spin_lock_irq(&pfault_lock);
 724                list_for_each_entry_safe(thread, next, &pfault_list, list) {
 725                        thread->pfault_wait = 0;
 726                        list_del(&thread->list);
 727                        tsk = container_of(thread, struct task_struct, thread);
 728                        wake_up_process(tsk);
 729                        put_task_struct(tsk);
 730                }
 731                spin_unlock_irq(&pfault_lock);
 732                break;
 733        default:
 734                break;
 735        }
 736        return NOTIFY_OK;
 737}
 738
 739static int __init pfault_irq_init(void)
 740{
 741        int rc;
 742
 743        rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
 744        if (rc)
 745                goto out_extint;
 746        rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
 747        if (rc)
 748                goto out_pfault;
 749        irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 750        hotcpu_notifier(pfault_cpu_notify, 0);
 751        return 0;
 752
 753out_pfault:
 754        unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
 755out_extint:
 756        pfault_disable = 1;
 757        return rc;
 758}
 759early_initcall(pfault_irq_init);
 760
 761#endif /* CONFIG_PFAULT */
 762