linux/arch/s390/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   5 *               Ulrich Weigand (uweigand@de.ibm.com)
   6 *
   7 *  Derived from "arch/i386/mm/fault.c"
   8 *    Copyright (C) 1995  Linus Torvalds
   9 */
  10
  11#include <linux/kernel_stat.h>
  12#include <linux/perf_event.h>
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/types.h>
  19#include <linux/ptrace.h>
  20#include <linux/mman.h>
  21#include <linux/mm.h>
  22#include <linux/compat.h>
  23#include <linux/smp.h>
  24#include <linux/kdebug.h>
  25#include <linux/init.h>
  26#include <linux/console.h>
  27#include <linux/module.h>
  28#include <linux/hardirq.h>
  29#include <linux/kprobes.h>
  30#include <linux/uaccess.h>
  31#include <linux/hugetlb.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/pgtable.h>
  34#include <asm/irq.h>
  35#include <asm/mmu_context.h>
  36#include <asm/facility.h>
  37#include "../kernel/entry.h"
  38
  39#ifndef CONFIG_64BIT
  40#define __FAIL_ADDR_MASK 0x7ffff000
  41#define __SUBCODE_MASK 0x0200
  42#define __PF_RES_FIELD 0ULL
  43#else /* CONFIG_64BIT */
  44#define __FAIL_ADDR_MASK -4096L
  45#define __SUBCODE_MASK 0x0600
  46#define __PF_RES_FIELD 0x8000000000000000ULL
  47#endif /* CONFIG_64BIT */
  48
  49#define VM_FAULT_BADCONTEXT     0x010000
  50#define VM_FAULT_BADMAP         0x020000
  51#define VM_FAULT_BADACCESS      0x040000
  52#define VM_FAULT_SIGNAL         0x080000
  53#define VM_FAULT_PFAULT         0x100000
  54
  55static unsigned long store_indication __read_mostly;
  56
  57#ifdef CONFIG_64BIT
  58static int __init fault_init(void)
  59{
  60        if (test_facility(75))
  61                store_indication = 0xc00;
  62        return 0;
  63}
  64early_initcall(fault_init);
  65#endif
  66
  67static inline int notify_page_fault(struct pt_regs *regs)
  68{
  69        int ret = 0;
  70
  71        /* kprobe_running() needs smp_processor_id() */
  72        if (kprobes_built_in() && !user_mode(regs)) {
  73                preempt_disable();
  74                if (kprobe_running() && kprobe_fault_handler(regs, 14))
  75                        ret = 1;
  76                preempt_enable();
  77        }
  78        return ret;
  79}
  80
  81
  82/*
  83 * Unlock any spinlocks which will prevent us from getting the
  84 * message out.
  85 */
  86void bust_spinlocks(int yes)
  87{
  88        if (yes) {
  89                oops_in_progress = 1;
  90        } else {
  91                int loglevel_save = console_loglevel;
  92                console_unblank();
  93                oops_in_progress = 0;
  94                /*
  95                 * OK, the message is on the console.  Now we call printk()
  96                 * without oops_in_progress set so that printk will give klogd
  97                 * a poke.  Hold onto your hats...
  98                 */
  99                console_loglevel = 15;
 100                printk(" ");
 101                console_loglevel = loglevel_save;
 102        }
 103}
 104
 105/*
 106 * Returns the address space associated with the fault.
 107 * Returns 0 for kernel space and 1 for user space.
 108 */
 109static inline int user_space_fault(struct pt_regs *regs)
 110{
 111        unsigned long trans_exc_code;
 112
 113        /*
 114         * The lowest two bits of the translation exception
 115         * identification indicate which paging table was used.
 116         */
 117        trans_exc_code = regs->int_parm_long & 3;
 118        if (trans_exc_code == 3) /* home space -> kernel */
 119                return 0;
 120        if (user_mode(regs))
 121                return 1;
 122        if (trans_exc_code == 2) /* secondary space -> set_fs */
 123                return current->thread.mm_segment.ar4;
 124        if (current->flags & PF_VCPU)
 125                return 1;
 126        return 0;
 127}
 128
 129static int bad_address(void *p)
 130{
 131        unsigned long dummy;
 132
 133        return probe_kernel_address((unsigned long *)p, dummy);
 134}
 135
 136#ifdef CONFIG_64BIT
 137static void dump_pagetable(unsigned long asce, unsigned long address)
 138{
 139        unsigned long *table = __va(asce & PAGE_MASK);
 140
 141        pr_alert("AS:%016lx ", asce);
 142        switch (asce & _ASCE_TYPE_MASK) {
 143        case _ASCE_TYPE_REGION1:
 144                table = table + ((address >> 53) & 0x7ff);
 145                if (bad_address(table))
 146                        goto bad;
 147                pr_cont("R1:%016lx ", *table);
 148                if (*table & _REGION_ENTRY_INVALID)
 149                        goto out;
 150                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 151                /* fallthrough */
 152        case _ASCE_TYPE_REGION2:
 153                table = table + ((address >> 42) & 0x7ff);
 154                if (bad_address(table))
 155                        goto bad;
 156                pr_cont("R2:%016lx ", *table);
 157                if (*table & _REGION_ENTRY_INVALID)
 158                        goto out;
 159                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 160                /* fallthrough */
 161        case _ASCE_TYPE_REGION3:
 162                table = table + ((address >> 31) & 0x7ff);
 163                if (bad_address(table))
 164                        goto bad;
 165                pr_cont("R3:%016lx ", *table);
 166                if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
 167                        goto out;
 168                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 169                /* fallthrough */
 170        case _ASCE_TYPE_SEGMENT:
 171                table = table + ((address >> 20) & 0x7ff);
 172                if (bad_address(table))
 173                        goto bad;
 174                pr_cont(KERN_CONT "S:%016lx ", *table);
 175                if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
 176                        goto out;
 177                table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 178        }
 179        table = table + ((address >> 12) & 0xff);
 180        if (bad_address(table))
 181                goto bad;
 182        pr_cont("P:%016lx ", *table);
 183out:
 184        pr_cont("\n");
 185        return;
 186bad:
 187        pr_cont("BAD\n");
 188}
 189
 190#else /* CONFIG_64BIT */
 191
 192static void dump_pagetable(unsigned long asce, unsigned long address)
 193{
 194        unsigned long *table = __va(asce & PAGE_MASK);
 195
 196        pr_alert("AS:%08lx ", asce);
 197        table = table + ((address >> 20) & 0x7ff);
 198        if (bad_address(table))
 199                goto bad;
 200        pr_cont("S:%08lx ", *table);
 201        if (*table & _SEGMENT_ENTRY_INVALID)
 202                goto out;
 203        table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 204        table = table + ((address >> 12) & 0xff);
 205        if (bad_address(table))
 206                goto bad;
 207        pr_cont("P:%08lx ", *table);
 208out:
 209        pr_cont("\n");
 210        return;
 211bad:
 212        pr_cont("BAD\n");
 213}
 214
 215#endif /* CONFIG_64BIT */
 216
 217static void dump_fault_info(struct pt_regs *regs)
 218{
 219        unsigned long asce;
 220
 221        pr_alert("Fault in ");
 222        switch (regs->int_parm_long & 3) {
 223        case 3:
 224                pr_cont("home space ");
 225                break;
 226        case 2:
 227                pr_cont("secondary space ");
 228                break;
 229        case 1:
 230                pr_cont("access register ");
 231                break;
 232        case 0:
 233                pr_cont("primary space ");
 234                break;
 235        }
 236        pr_cont("mode while using ");
 237        if (!user_space_fault(regs)) {
 238                asce = S390_lowcore.kernel_asce;
 239                pr_cont("kernel ");
 240        }
 241#ifdef CONFIG_PGSTE
 242        else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
 243                struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
 244                asce = gmap->asce;
 245                pr_cont("gmap ");
 246        }
 247#endif
 248        else {
 249                asce = S390_lowcore.user_asce;
 250                pr_cont("user ");
 251        }
 252        pr_cont("ASCE.\n");
 253        dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
 254}
 255
 256static inline void report_user_fault(struct pt_regs *regs, long signr)
 257{
 258        if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
 259                return;
 260        if (!unhandled_signal(current, signr))
 261                return;
 262        if (!printk_ratelimit())
 263                return;
 264        printk(KERN_ALERT "User process fault: interruption code 0x%X ",
 265               regs->int_code);
 266        print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
 267        printk(KERN_CONT "\n");
 268        printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
 269               regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 270        dump_fault_info(regs);
 271        show_regs(regs);
 272}
 273
 274/*
 275 * Send SIGSEGV to task.  This is an external routine
 276 * to keep the stack usage of do_page_fault small.
 277 */
 278static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 279{
 280        struct siginfo si;
 281
 282        report_user_fault(regs, SIGSEGV);
 283        si.si_signo = SIGSEGV;
 284        si.si_code = si_code;
 285        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 286        force_sig_info(SIGSEGV, &si, current);
 287}
 288
 289static noinline void do_no_context(struct pt_regs *regs)
 290{
 291        const struct exception_table_entry *fixup;
 292        unsigned long address;
 293
 294        /* Are we prepared to handle this kernel fault?  */
 295        fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
 296        if (fixup) {
 297                regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
 298                return;
 299        }
 300
 301        /*
 302         * Oops. The kernel tried to access some bad page. We'll have to
 303         * terminate things with extreme prejudice.
 304         */
 305        address = regs->int_parm_long & __FAIL_ADDR_MASK;
 306        if (!user_space_fault(regs))
 307                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 308                       " in virtual kernel address space\n");
 309        else
 310                printk(KERN_ALERT "Unable to handle kernel paging request"
 311                       " in virtual user address space\n");
 312        printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
 313               regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 314        dump_fault_info(regs);
 315        die(regs, "Oops");
 316        do_exit(SIGKILL);
 317}
 318
 319static noinline void do_low_address(struct pt_regs *regs)
 320{
 321        /* Low-address protection hit in kernel mode means
 322           NULL pointer write access in kernel mode.  */
 323        if (regs->psw.mask & PSW_MASK_PSTATE) {
 324                /* Low-address protection hit in user mode 'cannot happen'. */
 325                die (regs, "Low-address protection");
 326                do_exit(SIGKILL);
 327        }
 328
 329        do_no_context(regs);
 330}
 331
 332static noinline void do_sigbus(struct pt_regs *regs)
 333{
 334        struct task_struct *tsk = current;
 335        struct siginfo si;
 336
 337        /*
 338         * Send a sigbus, regardless of whether we were in kernel
 339         * or user mode.
 340         */
 341        si.si_signo = SIGBUS;
 342        si.si_errno = 0;
 343        si.si_code = BUS_ADRERR;
 344        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 345        force_sig_info(SIGBUS, &si, tsk);
 346}
 347
 348static noinline void do_fault_error(struct pt_regs *regs, int fault)
 349{
 350        int si_code;
 351
 352        switch (fault) {
 353        case VM_FAULT_BADACCESS:
 354        case VM_FAULT_BADMAP:
 355                /* Bad memory access. Check if it is kernel or user space. */
 356                if (user_mode(regs)) {
 357                        /* User mode accesses just cause a SIGSEGV */
 358                        si_code = (fault == VM_FAULT_BADMAP) ?
 359                                SEGV_MAPERR : SEGV_ACCERR;
 360                        do_sigsegv(regs, si_code);
 361                        return;
 362                }
 363        case VM_FAULT_BADCONTEXT:
 364        case VM_FAULT_PFAULT:
 365                do_no_context(regs);
 366                break;
 367        case VM_FAULT_SIGNAL:
 368                if (!user_mode(regs))
 369                        do_no_context(regs);
 370                break;
 371        default: /* fault & VM_FAULT_ERROR */
 372                if (fault & VM_FAULT_OOM) {
 373                        if (!user_mode(regs))
 374                                do_no_context(regs);
 375                        else
 376                                pagefault_out_of_memory();
 377                } else if (fault & VM_FAULT_SIGBUS) {
 378                        /* Kernel mode? Handle exceptions or die */
 379                        if (!user_mode(regs))
 380                                do_no_context(regs);
 381                        else
 382                                do_sigbus(regs);
 383                } else
 384                        BUG();
 385                break;
 386        }
 387}
 388
 389/*
 390 * This routine handles page faults.  It determines the address,
 391 * and the problem, and then passes it off to one of the appropriate
 392 * routines.
 393 *
 394 * interruption code (int_code):
 395 *   04       Protection           ->  Write-Protection  (suprression)
 396 *   10       Segment translation  ->  Not present       (nullification)
 397 *   11       Page translation     ->  Not present       (nullification)
 398 *   3b       Region third trans.  ->  Not present       (nullification)
 399 */
 400static inline int do_exception(struct pt_regs *regs, int access)
 401{
 402#ifdef CONFIG_PGSTE
 403        struct gmap *gmap;
 404#endif
 405        struct task_struct *tsk;
 406        struct mm_struct *mm;
 407        struct vm_area_struct *vma;
 408        unsigned long trans_exc_code;
 409        unsigned long address;
 410        unsigned int flags;
 411        int fault;
 412
 413        tsk = current;
 414        /*
 415         * The instruction that caused the program check has
 416         * been nullified. Don't signal single step via SIGTRAP.
 417         */
 418        clear_pt_regs_flag(regs, PIF_PER_TRAP);
 419
 420        if (notify_page_fault(regs))
 421                return 0;
 422
 423        mm = tsk->mm;
 424        trans_exc_code = regs->int_parm_long;
 425
 426        /*
 427         * Verify that the fault happened in user space, that
 428         * we are not in an interrupt and that there is a 
 429         * user context.
 430         */
 431        fault = VM_FAULT_BADCONTEXT;
 432        if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
 433                goto out;
 434
 435        address = trans_exc_code & __FAIL_ADDR_MASK;
 436        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 437        flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 438        if (user_mode(regs))
 439                flags |= FAULT_FLAG_USER;
 440        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
 441                flags |= FAULT_FLAG_WRITE;
 442        down_read(&mm->mmap_sem);
 443
 444#ifdef CONFIG_PGSTE
 445        gmap = (struct gmap *)
 446                ((current->flags & PF_VCPU) ? S390_lowcore.gmap : 0);
 447        if (gmap) {
 448                address = __gmap_fault(address, gmap);
 449                if (address == -EFAULT) {
 450                        fault = VM_FAULT_BADMAP;
 451                        goto out_up;
 452                }
 453                if (address == -ENOMEM) {
 454                        fault = VM_FAULT_OOM;
 455                        goto out_up;
 456                }
 457                if (gmap->pfault_enabled)
 458                        flags |= FAULT_FLAG_RETRY_NOWAIT;
 459        }
 460#endif
 461
 462retry:
 463        fault = VM_FAULT_BADMAP;
 464        vma = find_vma(mm, address);
 465        if (!vma)
 466                goto out_up;
 467
 468        if (unlikely(vma->vm_start > address)) {
 469                if (!(vma->vm_flags & VM_GROWSDOWN))
 470                        goto out_up;
 471                if (expand_stack(vma, address))
 472                        goto out_up;
 473        }
 474
 475        /*
 476         * Ok, we have a good vm_area for this memory access, so
 477         * we can handle it..
 478         */
 479        fault = VM_FAULT_BADACCESS;
 480        if (unlikely(!(vma->vm_flags & access)))
 481                goto out_up;
 482
 483        if (is_vm_hugetlb_page(vma))
 484                address &= HPAGE_MASK;
 485        /*
 486         * If for any reason at all we couldn't handle the fault,
 487         * make sure we exit gracefully rather than endlessly redo
 488         * the fault.
 489         */
 490        fault = handle_mm_fault(mm, vma, address, flags);
 491        /* No reason to continue if interrupted by SIGKILL. */
 492        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
 493                fault = VM_FAULT_SIGNAL;
 494                goto out;
 495        }
 496        if (unlikely(fault & VM_FAULT_ERROR))
 497                goto out_up;
 498
 499        /*
 500         * Major/minor page fault accounting is only done on the
 501         * initial attempt. If we go through a retry, it is extremely
 502         * likely that the page will be found in page cache at that point.
 503         */
 504        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 505                if (fault & VM_FAULT_MAJOR) {
 506                        tsk->maj_flt++;
 507                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 508                                      regs, address);
 509                } else {
 510                        tsk->min_flt++;
 511                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 512                                      regs, address);
 513                }
 514                if (fault & VM_FAULT_RETRY) {
 515#ifdef CONFIG_PGSTE
 516                        if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
 517                                /* FAULT_FLAG_RETRY_NOWAIT has been set,
 518                                 * mmap_sem has not been released */
 519                                current->thread.gmap_pfault = 1;
 520                                fault = VM_FAULT_PFAULT;
 521                                goto out_up;
 522                        }
 523#endif
 524                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 525                         * of starvation. */
 526                        flags &= ~(FAULT_FLAG_ALLOW_RETRY |
 527                                   FAULT_FLAG_RETRY_NOWAIT);
 528                        flags |= FAULT_FLAG_TRIED;
 529                        down_read(&mm->mmap_sem);
 530                        goto retry;
 531                }
 532        }
 533        fault = 0;
 534out_up:
 535        up_read(&mm->mmap_sem);
 536out:
 537        return fault;
 538}
 539
 540void __kprobes do_protection_exception(struct pt_regs *regs)
 541{
 542        unsigned long trans_exc_code;
 543        int fault;
 544
 545        trans_exc_code = regs->int_parm_long;
 546        /*
 547         * Protection exceptions are suppressing, decrement psw address.
 548         * The exception to this rule are aborted transactions, for these
 549         * the PSW already points to the correct location.
 550         */
 551        if (!(regs->int_code & 0x200))
 552                regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
 553        /*
 554         * Check for low-address protection.  This needs to be treated
 555         * as a special case because the translation exception code
 556         * field is not guaranteed to contain valid data in this case.
 557         */
 558        if (unlikely(!(trans_exc_code & 4))) {
 559                do_low_address(regs);
 560                return;
 561        }
 562        fault = do_exception(regs, VM_WRITE);
 563        if (unlikely(fault))
 564                do_fault_error(regs, fault);
 565}
 566
 567void __kprobes do_dat_exception(struct pt_regs *regs)
 568{
 569        int access, fault;
 570
 571        access = VM_READ | VM_EXEC | VM_WRITE;
 572        fault = do_exception(regs, access);
 573        if (unlikely(fault))
 574                do_fault_error(regs, fault);
 575}
 576
 577#ifdef CONFIG_PFAULT 
 578/*
 579 * 'pfault' pseudo page faults routines.
 580 */
 581static int pfault_disable;
 582
 583static int __init nopfault(char *str)
 584{
 585        pfault_disable = 1;
 586        return 1;
 587}
 588
 589__setup("nopfault", nopfault);
 590
 591struct pfault_refbk {
 592        u16 refdiagc;
 593        u16 reffcode;
 594        u16 refdwlen;
 595        u16 refversn;
 596        u64 refgaddr;
 597        u64 refselmk;
 598        u64 refcmpmk;
 599        u64 reserved;
 600} __attribute__ ((packed, aligned(8)));
 601
 602int pfault_init(void)
 603{
 604        struct pfault_refbk refbk = {
 605                .refdiagc = 0x258,
 606                .reffcode = 0,
 607                .refdwlen = 5,
 608                .refversn = 2,
 609                .refgaddr = __LC_CURRENT_PID,
 610                .refselmk = 1ULL << 48,
 611                .refcmpmk = 1ULL << 48,
 612                .reserved = __PF_RES_FIELD };
 613        int rc;
 614
 615        if (pfault_disable)
 616                return -1;
 617        asm volatile(
 618                "       diag    %1,%0,0x258\n"
 619                "0:     j       2f\n"
 620                "1:     la      %0,8\n"
 621                "2:\n"
 622                EX_TABLE(0b,1b)
 623                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
 624        return rc;
 625}
 626
 627void pfault_fini(void)
 628{
 629        struct pfault_refbk refbk = {
 630                .refdiagc = 0x258,
 631                .reffcode = 1,
 632                .refdwlen = 5,
 633                .refversn = 2,
 634        };
 635
 636        if (pfault_disable)
 637                return;
 638        asm volatile(
 639                "       diag    %0,0,0x258\n"
 640                "0:\n"
 641                EX_TABLE(0b,0b)
 642                : : "a" (&refbk), "m" (refbk) : "cc");
 643}
 644
 645static DEFINE_SPINLOCK(pfault_lock);
 646static LIST_HEAD(pfault_list);
 647
 648static void pfault_interrupt(struct ext_code ext_code,
 649                             unsigned int param32, unsigned long param64)
 650{
 651        struct task_struct *tsk;
 652        __u16 subcode;
 653        pid_t pid;
 654
 655        /*
 656         * Get the external interruption subcode & pfault
 657         * initial/completion signal bit. VM stores this 
 658         * in the 'cpu address' field associated with the
 659         * external interrupt. 
 660         */
 661        subcode = ext_code.subcode;
 662        if ((subcode & 0xff00) != __SUBCODE_MASK)
 663                return;
 664        inc_irq_stat(IRQEXT_PFL);
 665        /* Get the token (= pid of the affected task). */
 666        pid = sizeof(void *) == 4 ? param32 : param64;
 667        rcu_read_lock();
 668        tsk = find_task_by_pid_ns(pid, &init_pid_ns);
 669        if (tsk)
 670                get_task_struct(tsk);
 671        rcu_read_unlock();
 672        if (!tsk)
 673                return;
 674        spin_lock(&pfault_lock);
 675        if (subcode & 0x0080) {
 676                /* signal bit is set -> a page has been swapped in by VM */
 677                if (tsk->thread.pfault_wait == 1) {
 678                        /* Initial interrupt was faster than the completion
 679                         * interrupt. pfault_wait is valid. Set pfault_wait
 680                         * back to zero and wake up the process. This can
 681                         * safely be done because the task is still sleeping
 682                         * and can't produce new pfaults. */
 683                        tsk->thread.pfault_wait = 0;
 684                        list_del(&tsk->thread.list);
 685                        wake_up_process(tsk);
 686                        put_task_struct(tsk);
 687                } else {
 688                        /* Completion interrupt was faster than initial
 689                         * interrupt. Set pfault_wait to -1 so the initial
 690                         * interrupt doesn't put the task to sleep.
 691                         * If the task is not running, ignore the completion
 692                         * interrupt since it must be a leftover of a PFAULT
 693                         * CANCEL operation which didn't remove all pending
 694                         * completion interrupts. */
 695                        if (tsk->state == TASK_RUNNING)
 696                                tsk->thread.pfault_wait = -1;
 697                }
 698        } else {
 699                /* signal bit not set -> a real page is missing. */
 700                if (WARN_ON_ONCE(tsk != current))
 701                        goto out;
 702                if (tsk->thread.pfault_wait == 1) {
 703                        /* Already on the list with a reference: put to sleep */
 704                        __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 705                        set_tsk_need_resched(tsk);
 706                } else if (tsk->thread.pfault_wait == -1) {
 707                        /* Completion interrupt was faster than the initial
 708                         * interrupt (pfault_wait == -1). Set pfault_wait
 709                         * back to zero and exit. */
 710                        tsk->thread.pfault_wait = 0;
 711                } else {
 712                        /* Initial interrupt arrived before completion
 713                         * interrupt. Let the task sleep.
 714                         * An extra task reference is needed since a different
 715                         * cpu may set the task state to TASK_RUNNING again
 716                         * before the scheduler is reached. */
 717                        get_task_struct(tsk);
 718                        tsk->thread.pfault_wait = 1;
 719                        list_add(&tsk->thread.list, &pfault_list);
 720                        __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 721                        set_tsk_need_resched(tsk);
 722                }
 723        }
 724out:
 725        spin_unlock(&pfault_lock);
 726        put_task_struct(tsk);
 727}
 728
 729static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
 730                             void *hcpu)
 731{
 732        struct thread_struct *thread, *next;
 733        struct task_struct *tsk;
 734
 735        switch (action & ~CPU_TASKS_FROZEN) {
 736        case CPU_DEAD:
 737                spin_lock_irq(&pfault_lock);
 738                list_for_each_entry_safe(thread, next, &pfault_list, list) {
 739                        thread->pfault_wait = 0;
 740                        list_del(&thread->list);
 741                        tsk = container_of(thread, struct task_struct, thread);
 742                        wake_up_process(tsk);
 743                        put_task_struct(tsk);
 744                }
 745                spin_unlock_irq(&pfault_lock);
 746                break;
 747        default:
 748                break;
 749        }
 750        return NOTIFY_OK;
 751}
 752
 753static int __init pfault_irq_init(void)
 754{
 755        int rc;
 756
 757        rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
 758        if (rc)
 759                goto out_extint;
 760        rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
 761        if (rc)
 762                goto out_pfault;
 763        irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 764        hotcpu_notifier(pfault_cpu_notify, 0);
 765        return 0;
 766
 767out_pfault:
 768        unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
 769out_extint:
 770        pfault_disable = 1;
 771        return rc;
 772}
 773early_initcall(pfault_irq_init);
 774
 775#endif /* CONFIG_PFAULT */
 776