linux/arch/s390/mm/fault.c
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   5 *               Ulrich Weigand (uweigand@de.ibm.com)
   6 *
   7 *  Derived from "arch/i386/mm/fault.c"
   8 *    Copyright (C) 1995  Linus Torvalds
   9 */
  10
  11#include <linux/kernel_stat.h>
  12#include <linux/perf_event.h>
  13#include <linux/signal.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/types.h>
  19#include <linux/ptrace.h>
  20#include <linux/mman.h>
  21#include <linux/mm.h>
  22#include <linux/compat.h>
  23#include <linux/smp.h>
  24#include <linux/kdebug.h>
  25#include <linux/init.h>
  26#include <linux/console.h>
  27#include <linux/module.h>
  28#include <linux/hardirq.h>
  29#include <linux/kprobes.h>
  30#include <linux/uaccess.h>
  31#include <linux/hugetlb.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/pgtable.h>
  34#include <asm/irq.h>
  35#include <asm/mmu_context.h>
  36#include <asm/facility.h>
  37#include "../kernel/entry.h"
  38
  39#ifndef CONFIG_64BIT
  40#define __FAIL_ADDR_MASK 0x7ffff000
  41#define __SUBCODE_MASK 0x0200
  42#define __PF_RES_FIELD 0ULL
  43#else /* CONFIG_64BIT */
  44#define __FAIL_ADDR_MASK -4096L
  45#define __SUBCODE_MASK 0x0600
  46#define __PF_RES_FIELD 0x8000000000000000ULL
  47#endif /* CONFIG_64BIT */
  48
  49#define VM_FAULT_BADCONTEXT     0x010000
  50#define VM_FAULT_BADMAP         0x020000
  51#define VM_FAULT_BADACCESS      0x040000
  52#define VM_FAULT_SIGNAL         0x080000
  53
  54static unsigned long store_indication __read_mostly;
  55
  56#ifdef CONFIG_64BIT
  57static int __init fault_init(void)
  58{
  59        if (test_facility(75))
  60                store_indication = 0xc00;
  61        return 0;
  62}
  63early_initcall(fault_init);
  64#endif
  65
  66static inline int notify_page_fault(struct pt_regs *regs)
  67{
  68        int ret = 0;
  69
  70        /* kprobe_running() needs smp_processor_id() */
  71        if (kprobes_built_in() && !user_mode(regs)) {
  72                preempt_disable();
  73                if (kprobe_running() && kprobe_fault_handler(regs, 14))
  74                        ret = 1;
  75                preempt_enable();
  76        }
  77        return ret;
  78}
  79
  80
  81/*
  82 * Unlock any spinlocks which will prevent us from getting the
  83 * message out.
  84 */
  85void bust_spinlocks(int yes)
  86{
  87        if (yes) {
  88                oops_in_progress = 1;
  89        } else {
  90                int loglevel_save = console_loglevel;
  91                console_unblank();
  92                oops_in_progress = 0;
  93                /*
  94                 * OK, the message is on the console.  Now we call printk()
  95                 * without oops_in_progress set so that printk will give klogd
  96                 * a poke.  Hold onto your hats...
  97                 */
  98                console_loglevel = 15;
  99                printk(" ");
 100                console_loglevel = loglevel_save;
 101        }
 102}
 103
 104/*
 105 * Returns the address space associated with the fault.
 106 * Returns 0 for kernel space and 1 for user space.
 107 */
 108static inline int user_space_fault(unsigned long trans_exc_code)
 109{
 110        /*
 111         * The lowest two bits of the translation exception
 112         * identification indicate which paging table was used.
 113         */
 114        trans_exc_code &= 3;
 115        if (trans_exc_code == 2)
 116                /* Access via secondary space, set_fs setting decides */
 117                return current->thread.mm_segment.ar4;
 118        if (s390_user_mode == HOME_SPACE_MODE)
 119                /* User space if the access has been done via home space. */
 120                return trans_exc_code == 3;
 121        /*
 122         * If the user space is not the home space the kernel runs in home
 123         * space. Access via secondary space has already been covered,
 124         * access via primary space or access register is from user space
 125         * and access via home space is from the kernel.
 126         */
 127        return trans_exc_code != 3;
 128}
 129
 130static inline void report_user_fault(struct pt_regs *regs, long signr)
 131{
 132        if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
 133                return;
 134        if (!unhandled_signal(current, signr))
 135                return;
 136        if (!printk_ratelimit())
 137                return;
 138        printk(KERN_ALERT "User process fault: interruption code 0x%X ",
 139               regs->int_code);
 140        print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
 141        printk(KERN_CONT "\n");
 142        printk(KERN_ALERT "failing address: %lX\n",
 143               regs->int_parm_long & __FAIL_ADDR_MASK);
 144        show_regs(regs);
 145}
 146
 147/*
 148 * Send SIGSEGV to task.  This is an external routine
 149 * to keep the stack usage of do_page_fault small.
 150 */
 151static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 152{
 153        struct siginfo si;
 154
 155        report_user_fault(regs, SIGSEGV);
 156        si.si_signo = SIGSEGV;
 157        si.si_code = si_code;
 158        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 159        force_sig_info(SIGSEGV, &si, current);
 160}
 161
 162static noinline void do_no_context(struct pt_regs *regs)
 163{
 164        const struct exception_table_entry *fixup;
 165        unsigned long address;
 166
 167        /* Are we prepared to handle this kernel fault?  */
 168        fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
 169        if (fixup) {
 170                regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
 171                return;
 172        }
 173
 174        /*
 175         * Oops. The kernel tried to access some bad page. We'll have to
 176         * terminate things with extreme prejudice.
 177         */
 178        address = regs->int_parm_long & __FAIL_ADDR_MASK;
 179        if (!user_space_fault(regs->int_parm_long))
 180                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 181                       " at virtual kernel address %p\n", (void *)address);
 182        else
 183                printk(KERN_ALERT "Unable to handle kernel paging request"
 184                       " at virtual user address %p\n", (void *)address);
 185
 186        die(regs, "Oops");
 187        do_exit(SIGKILL);
 188}
 189
 190static noinline void do_low_address(struct pt_regs *regs)
 191{
 192        /* Low-address protection hit in kernel mode means
 193           NULL pointer write access in kernel mode.  */
 194        if (regs->psw.mask & PSW_MASK_PSTATE) {
 195                /* Low-address protection hit in user mode 'cannot happen'. */
 196                die (regs, "Low-address protection");
 197                do_exit(SIGKILL);
 198        }
 199
 200        do_no_context(regs);
 201}
 202
 203static noinline void do_sigbus(struct pt_regs *regs)
 204{
 205        struct task_struct *tsk = current;
 206        struct siginfo si;
 207
 208        /*
 209         * Send a sigbus, regardless of whether we were in kernel
 210         * or user mode.
 211         */
 212        si.si_signo = SIGBUS;
 213        si.si_errno = 0;
 214        si.si_code = BUS_ADRERR;
 215        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 216        force_sig_info(SIGBUS, &si, tsk);
 217}
 218
 219static noinline void do_fault_error(struct pt_regs *regs, int fault)
 220{
 221        int si_code;
 222
 223        switch (fault) {
 224        case VM_FAULT_BADACCESS:
 225        case VM_FAULT_BADMAP:
 226                /* Bad memory access. Check if it is kernel or user space. */
 227                if (user_mode(regs)) {
 228                        /* User mode accesses just cause a SIGSEGV */
 229                        si_code = (fault == VM_FAULT_BADMAP) ?
 230                                SEGV_MAPERR : SEGV_ACCERR;
 231                        do_sigsegv(regs, si_code);
 232                        return;
 233                }
 234        case VM_FAULT_BADCONTEXT:
 235                do_no_context(regs);
 236                break;
 237        case VM_FAULT_SIGNAL:
 238                if (!user_mode(regs))
 239                        do_no_context(regs);
 240                break;
 241        default: /* fault & VM_FAULT_ERROR */
 242                if (fault & VM_FAULT_OOM) {
 243                        if (!user_mode(regs))
 244                                do_no_context(regs);
 245                        else
 246                                pagefault_out_of_memory();
 247                } else if (fault & VM_FAULT_SIGBUS) {
 248                        /* Kernel mode? Handle exceptions or die */
 249                        if (!user_mode(regs))
 250                                do_no_context(regs);
 251                        else
 252                                do_sigbus(regs);
 253                } else
 254                        BUG();
 255                break;
 256        }
 257}
 258
 259/*
 260 * This routine handles page faults.  It determines the address,
 261 * and the problem, and then passes it off to one of the appropriate
 262 * routines.
 263 *
 264 * interruption code (int_code):
 265 *   04       Protection           ->  Write-Protection  (suprression)
 266 *   10       Segment translation  ->  Not present       (nullification)
 267 *   11       Page translation     ->  Not present       (nullification)
 268 *   3b       Region third trans.  ->  Not present       (nullification)
 269 */
 270static inline int do_exception(struct pt_regs *regs, int access)
 271{
 272        struct task_struct *tsk;
 273        struct mm_struct *mm;
 274        struct vm_area_struct *vma;
 275        unsigned long trans_exc_code;
 276        unsigned long address;
 277        unsigned int flags;
 278        int fault;
 279
 280        tsk = current;
 281        /*
 282         * The instruction that caused the program check has
 283         * been nullified. Don't signal single step via SIGTRAP.
 284         */
 285        clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
 286
 287        if (notify_page_fault(regs))
 288                return 0;
 289
 290        mm = tsk->mm;
 291        trans_exc_code = regs->int_parm_long;
 292
 293        /*
 294         * Verify that the fault happened in user space, that
 295         * we are not in an interrupt and that there is a 
 296         * user context.
 297         */
 298        fault = VM_FAULT_BADCONTEXT;
 299        if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
 300                goto out;
 301
 302        address = trans_exc_code & __FAIL_ADDR_MASK;
 303        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 304        flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 305        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
 306                flags |= FAULT_FLAG_WRITE;
 307        down_read(&mm->mmap_sem);
 308
 309#ifdef CONFIG_PGSTE
 310        if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
 311                address = __gmap_fault(address,
 312                                     (struct gmap *) S390_lowcore.gmap);
 313                if (address == -EFAULT) {
 314                        fault = VM_FAULT_BADMAP;
 315                        goto out_up;
 316                }
 317                if (address == -ENOMEM) {
 318                        fault = VM_FAULT_OOM;
 319                        goto out_up;
 320                }
 321        }
 322#endif
 323
 324retry:
 325        fault = VM_FAULT_BADMAP;
 326        vma = find_vma(mm, address);
 327        if (!vma)
 328                goto out_up;
 329
 330        if (unlikely(vma->vm_start > address)) {
 331                if (!(vma->vm_flags & VM_GROWSDOWN))
 332                        goto out_up;
 333                if (expand_stack(vma, address))
 334                        goto out_up;
 335        }
 336
 337        /*
 338         * Ok, we have a good vm_area for this memory access, so
 339         * we can handle it..
 340         */
 341        fault = VM_FAULT_BADACCESS;
 342        if (unlikely(!(vma->vm_flags & access)))
 343                goto out_up;
 344
 345        if (is_vm_hugetlb_page(vma))
 346                address &= HPAGE_MASK;
 347        /*
 348         * If for any reason at all we couldn't handle the fault,
 349         * make sure we exit gracefully rather than endlessly redo
 350         * the fault.
 351         */
 352        fault = handle_mm_fault(mm, vma, address, flags);
 353        /* No reason to continue if interrupted by SIGKILL. */
 354        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
 355                fault = VM_FAULT_SIGNAL;
 356                goto out;
 357        }
 358        if (unlikely(fault & VM_FAULT_ERROR))
 359                goto out_up;
 360
 361        /*
 362         * Major/minor page fault accounting is only done on the
 363         * initial attempt. If we go through a retry, it is extremely
 364         * likely that the page will be found in page cache at that point.
 365         */
 366        if (flags & FAULT_FLAG_ALLOW_RETRY) {
 367                if (fault & VM_FAULT_MAJOR) {
 368                        tsk->maj_flt++;
 369                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
 370                                      regs, address);
 371                } else {
 372                        tsk->min_flt++;
 373                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
 374                                      regs, address);
 375                }
 376                if (fault & VM_FAULT_RETRY) {
 377                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
 378                         * of starvation. */
 379                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
 380                        flags |= FAULT_FLAG_TRIED;
 381                        down_read(&mm->mmap_sem);
 382                        goto retry;
 383                }
 384        }
 385        fault = 0;
 386out_up:
 387        up_read(&mm->mmap_sem);
 388out:
 389        return fault;
 390}
 391
 392void __kprobes do_protection_exception(struct pt_regs *regs)
 393{
 394        unsigned long trans_exc_code;
 395        int fault;
 396
 397        trans_exc_code = regs->int_parm_long;
 398        /*
 399         * Protection exceptions are suppressing, decrement psw address.
 400         * The exception to this rule are aborted transactions, for these
 401         * the PSW already points to the correct location.
 402         */
 403        if (!(regs->int_code & 0x200))
 404                regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
 405        /*
 406         * Check for low-address protection.  This needs to be treated
 407         * as a special case because the translation exception code
 408         * field is not guaranteed to contain valid data in this case.
 409         */
 410        if (unlikely(!(trans_exc_code & 4))) {
 411                do_low_address(regs);
 412                return;
 413        }
 414        fault = do_exception(regs, VM_WRITE);
 415        if (unlikely(fault))
 416                do_fault_error(regs, fault);
 417}
 418
 419void __kprobes do_dat_exception(struct pt_regs *regs)
 420{
 421        int access, fault;
 422
 423        access = VM_READ | VM_EXEC | VM_WRITE;
 424        fault = do_exception(regs, access);
 425        if (unlikely(fault))
 426                do_fault_error(regs, fault);
 427}
 428
 429#ifdef CONFIG_64BIT
 430void __kprobes do_asce_exception(struct pt_regs *regs)
 431{
 432        struct mm_struct *mm = current->mm;
 433        struct vm_area_struct *vma;
 434        unsigned long trans_exc_code;
 435
 436        /*
 437         * The instruction that caused the program check has
 438         * been nullified. Don't signal single step via SIGTRAP.
 439         */
 440        clear_tsk_thread_flag(current, TIF_PER_TRAP);
 441
 442        trans_exc_code = regs->int_parm_long;
 443        if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
 444                goto no_context;
 445
 446        down_read(&mm->mmap_sem);
 447        vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
 448        up_read(&mm->mmap_sem);
 449
 450        if (vma) {
 451                update_mm(mm, current);
 452                return;
 453        }
 454
 455        /* User mode accesses just cause a SIGSEGV */
 456        if (user_mode(regs)) {
 457                do_sigsegv(regs, SEGV_MAPERR);
 458                return;
 459        }
 460
 461no_context:
 462        do_no_context(regs);
 463}
 464#endif
 465
 466int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
 467{
 468        struct pt_regs regs;
 469        int access, fault;
 470
 471        /* Emulate a uaccess fault from kernel mode. */
 472        regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
 473        if (!irqs_disabled())
 474                regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
 475        regs.psw.addr = (unsigned long) __builtin_return_address(0);
 476        regs.psw.addr |= PSW_ADDR_AMODE;
 477        regs.int_code = pgm_int_code;
 478        regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
 479        access = write ? VM_WRITE : VM_READ;
 480        fault = do_exception(&regs, access);
 481        /*
 482         * Since the fault happened in kernel mode while performing a uaccess
 483         * all we need to do now is emulating a fixup in case "fault" is not
 484         * zero.
 485         * For the calling uaccess functions this results always in -EFAULT.
 486         */
 487        return fault ? -EFAULT : 0;
 488}
 489
 490#ifdef CONFIG_PFAULT 
 491/*
 492 * 'pfault' pseudo page faults routines.
 493 */
 494static int pfault_disable;
 495
 496static int __init nopfault(char *str)
 497{
 498        pfault_disable = 1;
 499        return 1;
 500}
 501
 502__setup("nopfault", nopfault);
 503
 504struct pfault_refbk {
 505        u16 refdiagc;
 506        u16 reffcode;
 507        u16 refdwlen;
 508        u16 refversn;
 509        u64 refgaddr;
 510        u64 refselmk;
 511        u64 refcmpmk;
 512        u64 reserved;
 513} __attribute__ ((packed, aligned(8)));
 514
 515int pfault_init(void)
 516{
 517        struct pfault_refbk refbk = {
 518                .refdiagc = 0x258,
 519                .reffcode = 0,
 520                .refdwlen = 5,
 521                .refversn = 2,
 522                .refgaddr = __LC_CURRENT_PID,
 523                .refselmk = 1ULL << 48,
 524                .refcmpmk = 1ULL << 48,
 525                .reserved = __PF_RES_FIELD };
 526        int rc;
 527
 528        if (pfault_disable)
 529                return -1;
 530        asm volatile(
 531                "       diag    %1,%0,0x258\n"
 532                "0:     j       2f\n"
 533                "1:     la      %0,8\n"
 534                "2:\n"
 535                EX_TABLE(0b,1b)
 536                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
 537        return rc;
 538}
 539
 540void pfault_fini(void)
 541{
 542        struct pfault_refbk refbk = {
 543                .refdiagc = 0x258,
 544                .reffcode = 1,
 545                .refdwlen = 5,
 546                .refversn = 2,
 547        };
 548
 549        if (pfault_disable)
 550                return;
 551        asm volatile(
 552                "       diag    %0,0,0x258\n"
 553                "0:\n"
 554                EX_TABLE(0b,0b)
 555                : : "a" (&refbk), "m" (refbk) : "cc");
 556}
 557
 558static DEFINE_SPINLOCK(pfault_lock);
 559static LIST_HEAD(pfault_list);
 560
 561static void pfault_interrupt(struct ext_code ext_code,
 562                             unsigned int param32, unsigned long param64)
 563{
 564        struct task_struct *tsk;
 565        __u16 subcode;
 566        pid_t pid;
 567
 568        /*
 569         * Get the external interruption subcode & pfault
 570         * initial/completion signal bit. VM stores this 
 571         * in the 'cpu address' field associated with the
 572         * external interrupt. 
 573         */
 574        subcode = ext_code.subcode;
 575        if ((subcode & 0xff00) != __SUBCODE_MASK)
 576                return;
 577        inc_irq_stat(IRQEXT_PFL);
 578        /* Get the token (= pid of the affected task). */
 579        pid = sizeof(void *) == 4 ? param32 : param64;
 580        rcu_read_lock();
 581        tsk = find_task_by_pid_ns(pid, &init_pid_ns);
 582        if (tsk)
 583                get_task_struct(tsk);
 584        rcu_read_unlock();
 585        if (!tsk)
 586                return;
 587        spin_lock(&pfault_lock);
 588        if (subcode & 0x0080) {
 589                /* signal bit is set -> a page has been swapped in by VM */
 590                if (tsk->thread.pfault_wait == 1) {
 591                        /* Initial interrupt was faster than the completion
 592                         * interrupt. pfault_wait is valid. Set pfault_wait
 593                         * back to zero and wake up the process. This can
 594                         * safely be done because the task is still sleeping
 595                         * and can't produce new pfaults. */
 596                        tsk->thread.pfault_wait = 0;
 597                        list_del(&tsk->thread.list);
 598                        wake_up_process(tsk);
 599                        put_task_struct(tsk);
 600                } else {
 601                        /* Completion interrupt was faster than initial
 602                         * interrupt. Set pfault_wait to -1 so the initial
 603                         * interrupt doesn't put the task to sleep.
 604                         * If the task is not running, ignore the completion
 605                         * interrupt since it must be a leftover of a PFAULT
 606                         * CANCEL operation which didn't remove all pending
 607                         * completion interrupts. */
 608                        if (tsk->state == TASK_RUNNING)
 609                                tsk->thread.pfault_wait = -1;
 610                }
 611        } else {
 612                /* signal bit not set -> a real page is missing. */
 613                if (WARN_ON_ONCE(tsk != current))
 614                        goto out;
 615                if (tsk->thread.pfault_wait == 1) {
 616                        /* Already on the list with a reference: put to sleep */
 617                        __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 618                        set_tsk_need_resched(tsk);
 619                } else if (tsk->thread.pfault_wait == -1) {
 620                        /* Completion interrupt was faster than the initial
 621                         * interrupt (pfault_wait == -1). Set pfault_wait
 622                         * back to zero and exit. */
 623                        tsk->thread.pfault_wait = 0;
 624                } else {
 625                        /* Initial interrupt arrived before completion
 626                         * interrupt. Let the task sleep.
 627                         * An extra task reference is needed since a different
 628                         * cpu may set the task state to TASK_RUNNING again
 629                         * before the scheduler is reached. */
 630                        get_task_struct(tsk);
 631                        tsk->thread.pfault_wait = 1;
 632                        list_add(&tsk->thread.list, &pfault_list);
 633                        __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 634                        set_tsk_need_resched(tsk);
 635                }
 636        }
 637out:
 638        spin_unlock(&pfault_lock);
 639        put_task_struct(tsk);
 640}
 641
 642static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
 643                                       unsigned long action, void *hcpu)
 644{
 645        struct thread_struct *thread, *next;
 646        struct task_struct *tsk;
 647
 648        switch (action & ~CPU_TASKS_FROZEN) {
 649        case CPU_DEAD:
 650                spin_lock_irq(&pfault_lock);
 651                list_for_each_entry_safe(thread, next, &pfault_list, list) {
 652                        thread->pfault_wait = 0;
 653                        list_del(&thread->list);
 654                        tsk = container_of(thread, struct task_struct, thread);
 655                        wake_up_process(tsk);
 656                        put_task_struct(tsk);
 657                }
 658                spin_unlock_irq(&pfault_lock);
 659                break;
 660        default:
 661                break;
 662        }
 663        return NOTIFY_OK;
 664}
 665
 666static int __init pfault_irq_init(void)
 667{
 668        int rc;
 669
 670        rc = register_external_interrupt(0x2603, pfault_interrupt);
 671        if (rc)
 672                goto out_extint;
 673        rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
 674        if (rc)
 675                goto out_pfault;
 676        service_subclass_irq_register();
 677        hotcpu_notifier(pfault_cpu_notify, 0);
 678        return 0;
 679
 680out_pfault:
 681        unregister_external_interrupt(0x2603, pfault_interrupt);
 682out_extint:
 683        pfault_disable = 1;
 684        return rc;
 685}
 686early_initcall(pfault_irq_init);
 687
 688#endif /* CONFIG_PFAULT */
 689