linux/arch/arm/vfp/vfpmodule.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/vfp/vfpmodule.c
   3 *
   4 *  Copyright (C) 2004 ARM Limited.
   5 *  Written by Deep Blue Solutions Limited.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/types.h>
  12#include <linux/cpu.h>
  13#include <linux/cpu_pm.h>
  14#include <linux/hardirq.h>
  15#include <linux/kernel.h>
  16#include <linux/notifier.h>
  17#include <linux/signal.h>
  18#include <linux/sched.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/uaccess.h>
  22#include <linux/user.h>
  23
  24#include <asm/cp15.h>
  25#include <asm/cputype.h>
  26#include <asm/system_info.h>
  27#include <asm/thread_notify.h>
  28#include <asm/vfp.h>
  29
  30#include "vfpinstr.h"
  31#include "vfp.h"
  32
  33/*
  34 * Our undef handlers (in entry.S)
  35 */
  36void vfp_testing_entry(void);
  37void vfp_support_entry(void);
  38void vfp_null_entry(void);
  39
  40void (*vfp_vector)(void) = vfp_null_entry;
  41
  42/*
  43 * Dual-use variable.
  44 * Used in startup: set to non-zero if VFP checks fail
  45 * After startup, holds VFP architecture
  46 */
  47unsigned int VFP_arch;
  48
  49/*
  50 * The pointer to the vfpstate structure of the thread which currently
  51 * owns the context held in the VFP hardware, or NULL if the hardware
  52 * context is invalid.
  53 *
  54 * For UP, this is sufficient to tell which thread owns the VFP context.
  55 * However, for SMP, we also need to check the CPU number stored in the
  56 * saved state too to catch migrations.
  57 */
  58union vfp_state *vfp_current_hw_state[NR_CPUS];
  59
  60/*
  61 * Is 'thread's most up to date state stored in this CPUs hardware?
  62 * Must be called from non-preemptible context.
  63 */
  64static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
  65{
  66#ifdef CONFIG_SMP
  67        if (thread->vfpstate.hard.cpu != cpu)
  68                return false;
  69#endif
  70        return vfp_current_hw_state[cpu] == &thread->vfpstate;
  71}
  72
  73/*
  74 * Force a reload of the VFP context from the thread structure.  We do
  75 * this by ensuring that access to the VFP hardware is disabled, and
  76 * clear vfp_current_hw_state.  Must be called from non-preemptible context.
  77 */
  78static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
  79{
  80        if (vfp_state_in_hw(cpu, thread)) {
  81                fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
  82                vfp_current_hw_state[cpu] = NULL;
  83        }
  84#ifdef CONFIG_SMP
  85        thread->vfpstate.hard.cpu = NR_CPUS;
  86#endif
  87}
  88
  89/*
  90 * Per-thread VFP initialization.
  91 */
  92static void vfp_thread_flush(struct thread_info *thread)
  93{
  94        union vfp_state *vfp = &thread->vfpstate;
  95        unsigned int cpu;
  96
  97        /*
  98         * Disable VFP to ensure we initialize it first.  We must ensure
  99         * that the modification of vfp_current_hw_state[] and hardware
 100         * disable are done for the same CPU and without preemption.
 101         *
 102         * Do this first to ensure that preemption won't overwrite our
 103         * state saving should access to the VFP be enabled at this point.
 104         */
 105        cpu = get_cpu();
 106        if (vfp_current_hw_state[cpu] == vfp)
 107                vfp_current_hw_state[cpu] = NULL;
 108        fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
 109        put_cpu();
 110
 111        memset(vfp, 0, sizeof(union vfp_state));
 112
 113        vfp->hard.fpexc = FPEXC_EN;
 114        vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
 115#ifdef CONFIG_SMP
 116        vfp->hard.cpu = NR_CPUS;
 117#endif
 118}
 119
 120static void vfp_thread_exit(struct thread_info *thread)
 121{
 122        /* release case: Per-thread VFP cleanup. */
 123        union vfp_state *vfp = &thread->vfpstate;
 124        unsigned int cpu = get_cpu();
 125
 126        if (vfp_current_hw_state[cpu] == vfp)
 127                vfp_current_hw_state[cpu] = NULL;
 128        put_cpu();
 129}
 130
 131static void vfp_thread_copy(struct thread_info *thread)
 132{
 133        struct thread_info *parent = current_thread_info();
 134
 135        vfp_sync_hwstate(parent);
 136        thread->vfpstate = parent->vfpstate;
 137#ifdef CONFIG_SMP
 138        thread->vfpstate.hard.cpu = NR_CPUS;
 139#endif
 140}
 141
 142/*
 143 * When this function is called with the following 'cmd's, the following
 144 * is true while this function is being run:
 145 *  THREAD_NOFTIFY_SWTICH:
 146 *   - the previously running thread will not be scheduled onto another CPU.
 147 *   - the next thread to be run (v) will not be running on another CPU.
 148 *   - thread->cpu is the local CPU number
 149 *   - not preemptible as we're called in the middle of a thread switch
 150 *  THREAD_NOTIFY_FLUSH:
 151 *   - the thread (v) will be running on the local CPU, so
 152 *      v === current_thread_info()
 153 *   - thread->cpu is the local CPU number at the time it is accessed,
 154 *      but may change at any time.
 155 *   - we could be preempted if tree preempt rcu is enabled, so
 156 *      it is unsafe to use thread->cpu.
 157 *  THREAD_NOTIFY_EXIT
 158 *   - the thread (v) will be running on the local CPU, so
 159 *      v === current_thread_info()
 160 *   - thread->cpu is the local CPU number at the time it is accessed,
 161 *      but may change at any time.
 162 *   - we could be preempted if tree preempt rcu is enabled, so
 163 *      it is unsafe to use thread->cpu.
 164 */
 165static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
 166{
 167        struct thread_info *thread = v;
 168        u32 fpexc;
 169#ifdef CONFIG_SMP
 170        unsigned int cpu;
 171#endif
 172
 173        switch (cmd) {
 174        case THREAD_NOTIFY_SWITCH:
 175                fpexc = fmrx(FPEXC);
 176
 177#ifdef CONFIG_SMP
 178                cpu = thread->cpu;
 179
 180                /*
 181                 * On SMP, if VFP is enabled, save the old state in
 182                 * case the thread migrates to a different CPU. The
 183                 * restoring is done lazily.
 184                 */
 185                if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
 186                        vfp_save_state(vfp_current_hw_state[cpu], fpexc);
 187#endif
 188
 189                /*
 190                 * Always disable VFP so we can lazily save/restore the
 191                 * old state.
 192                 */
 193                fmxr(FPEXC, fpexc & ~FPEXC_EN);
 194                break;
 195
 196        case THREAD_NOTIFY_FLUSH:
 197                vfp_thread_flush(thread);
 198                break;
 199
 200        case THREAD_NOTIFY_EXIT:
 201                vfp_thread_exit(thread);
 202                break;
 203
 204        case THREAD_NOTIFY_COPY:
 205                vfp_thread_copy(thread);
 206                break;
 207        }
 208
 209        return NOTIFY_DONE;
 210}
 211
 212static struct notifier_block vfp_notifier_block = {
 213        .notifier_call  = vfp_notifier,
 214};
 215
 216/*
 217 * Raise a SIGFPE for the current process.
 218 * sicode describes the signal being raised.
 219 */
 220static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
 221{
 222        siginfo_t info;
 223
 224        memset(&info, 0, sizeof(info));
 225
 226        info.si_signo = SIGFPE;
 227        info.si_code = sicode;
 228        info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
 229
 230        /*
 231         * This is the same as NWFPE, because it's not clear what
 232         * this is used for
 233         */
 234        current->thread.error_code = 0;
 235        current->thread.trap_no = 6;
 236
 237        send_sig_info(SIGFPE, &info, current);
 238}
 239
 240static void vfp_panic(char *reason, u32 inst)
 241{
 242        int i;
 243
 244        pr_err("VFP: Error: %s\n", reason);
 245        pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
 246                fmrx(FPEXC), fmrx(FPSCR), inst);
 247        for (i = 0; i < 32; i += 2)
 248                pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
 249                       i, vfp_get_float(i), i+1, vfp_get_float(i+1));
 250}
 251
 252/*
 253 * Process bitmask of exception conditions.
 254 */
 255static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
 256{
 257        int si_code = 0;
 258
 259        pr_debug("VFP: raising exceptions %08x\n", exceptions);
 260
 261        if (exceptions == VFP_EXCEPTION_ERROR) {
 262                vfp_panic("unhandled bounce", inst);
 263                vfp_raise_sigfpe(0, regs);
 264                return;
 265        }
 266
 267        /*
 268         * If any of the status flags are set, update the FPSCR.
 269         * Comparison instructions always return at least one of
 270         * these flags set.
 271         */
 272        if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
 273                fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
 274
 275        fpscr |= exceptions;
 276
 277        fmxr(FPSCR, fpscr);
 278
 279#define RAISE(stat,en,sig)                              \
 280        if (exceptions & stat && fpscr & en)            \
 281                si_code = sig;
 282
 283        /*
 284         * These are arranged in priority order, least to highest.
 285         */
 286        RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
 287        RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
 288        RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
 289        RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
 290        RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
 291
 292        if (si_code)
 293                vfp_raise_sigfpe(si_code, regs);
 294}
 295
 296/*
 297 * Emulate a VFP instruction.
 298 */
 299static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
 300{
 301        u32 exceptions = VFP_EXCEPTION_ERROR;
 302
 303        pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
 304
 305        if (INST_CPRTDO(inst)) {
 306                if (!INST_CPRT(inst)) {
 307                        /*
 308                         * CPDO
 309                         */
 310                        if (vfp_single(inst)) {
 311                                exceptions = vfp_single_cpdo(inst, fpscr);
 312                        } else {
 313                                exceptions = vfp_double_cpdo(inst, fpscr);
 314                        }
 315                } else {
 316                        /*
 317                         * A CPRT instruction can not appear in FPINST2, nor
 318                         * can it cause an exception.  Therefore, we do not
 319                         * have to emulate it.
 320                         */
 321                }
 322        } else {
 323                /*
 324                 * A CPDT instruction can not appear in FPINST2, nor can
 325                 * it cause an exception.  Therefore, we do not have to
 326                 * emulate it.
 327                 */
 328        }
 329        return exceptions & ~VFP_NAN_FLAG;
 330}
 331
 332/*
 333 * Package up a bounce condition.
 334 */
 335void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
 336{
 337        u32 fpscr, orig_fpscr, fpsid, exceptions;
 338
 339        pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
 340
 341        /*
 342         * At this point, FPEXC can have the following configuration:
 343         *
 344         *  EX DEX IXE
 345         *  0   1   x   - synchronous exception
 346         *  1   x   0   - asynchronous exception
 347         *  1   x   1   - sychronous on VFP subarch 1 and asynchronous on later
 348         *  0   0   1   - synchronous on VFP9 (non-standard subarch 1
 349         *                implementation), undefined otherwise
 350         *
 351         * Clear various bits and enable access to the VFP so we can
 352         * handle the bounce.
 353         */
 354        fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
 355
 356        fpsid = fmrx(FPSID);
 357        orig_fpscr = fpscr = fmrx(FPSCR);
 358
 359        /*
 360         * Check for the special VFP subarch 1 and FPSCR.IXE bit case
 361         */
 362        if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
 363            && (fpscr & FPSCR_IXE)) {
 364                /*
 365                 * Synchronous exception, emulate the trigger instruction
 366                 */
 367                goto emulate;
 368        }
 369
 370        if (fpexc & FPEXC_EX) {
 371#ifndef CONFIG_CPU_FEROCEON
 372                /*
 373                 * Asynchronous exception. The instruction is read from FPINST
 374                 * and the interrupted instruction has to be restarted.
 375                 */
 376                trigger = fmrx(FPINST);
 377                regs->ARM_pc -= 4;
 378#endif
 379        } else if (!(fpexc & FPEXC_DEX)) {
 380                /*
 381                 * Illegal combination of bits. It can be caused by an
 382                 * unallocated VFP instruction but with FPSCR.IXE set and not
 383                 * on VFP subarch 1.
 384                 */
 385                 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
 386                goto exit;
 387        }
 388
 389        /*
 390         * Modify fpscr to indicate the number of iterations remaining.
 391         * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
 392         * whether FPEXC.VECITR or FPSCR.LEN is used.
 393         */
 394        if (fpexc & (FPEXC_EX | FPEXC_VV)) {
 395                u32 len;
 396
 397                len = fpexc + (1 << FPEXC_LENGTH_BIT);
 398
 399                fpscr &= ~FPSCR_LENGTH_MASK;
 400                fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
 401        }
 402
 403        /*
 404         * Handle the first FP instruction.  We used to take note of the
 405         * FPEXC bounce reason, but this appears to be unreliable.
 406         * Emulate the bounced instruction instead.
 407         */
 408        exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
 409        if (exceptions)
 410                vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
 411
 412        /*
 413         * If there isn't a second FP instruction, exit now. Note that
 414         * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
 415         */
 416        if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
 417                goto exit;
 418
 419        /*
 420         * The barrier() here prevents fpinst2 being read
 421         * before the condition above.
 422         */
 423        barrier();
 424        trigger = fmrx(FPINST2);
 425
 426 emulate:
 427        exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
 428        if (exceptions)
 429                vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
 430 exit:
 431        preempt_enable();
 432}
 433
 434static void vfp_enable(void *unused)
 435{
 436        u32 access;
 437
 438        BUG_ON(preemptible());
 439        access = get_copro_access();
 440
 441        /*
 442         * Enable full access to VFP (cp10 and cp11)
 443         */
 444        set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
 445}
 446
 447#ifdef CONFIG_CPU_PM
 448static int vfp_pm_suspend(void)
 449{
 450        struct thread_info *ti = current_thread_info();
 451        u32 fpexc = fmrx(FPEXC);
 452
 453        /* if vfp is on, then save state for resumption */
 454        if (fpexc & FPEXC_EN) {
 455                pr_debug("%s: saving vfp state\n", __func__);
 456                vfp_save_state(&ti->vfpstate, fpexc);
 457
 458                /* disable, just in case */
 459                fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
 460        } else if (vfp_current_hw_state[ti->cpu]) {
 461#ifndef CONFIG_SMP
 462                fmxr(FPEXC, fpexc | FPEXC_EN);
 463                vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
 464                fmxr(FPEXC, fpexc);
 465#endif
 466        }
 467
 468        /* clear any information we had about last context state */
 469        vfp_current_hw_state[ti->cpu] = NULL;
 470
 471        return 0;
 472}
 473
 474static void vfp_pm_resume(void)
 475{
 476        /* ensure we have access to the vfp */
 477        vfp_enable(NULL);
 478
 479        /* and disable it to ensure the next usage restores the state */
 480        fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
 481}
 482
 483static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
 484        void *v)
 485{
 486        switch (cmd) {
 487        case CPU_PM_ENTER:
 488                vfp_pm_suspend();
 489                break;
 490        case CPU_PM_ENTER_FAILED:
 491        case CPU_PM_EXIT:
 492                vfp_pm_resume();
 493                break;
 494        }
 495        return NOTIFY_OK;
 496}
 497
 498static struct notifier_block vfp_cpu_pm_notifier_block = {
 499        .notifier_call = vfp_cpu_pm_notifier,
 500};
 501
 502static void vfp_pm_init(void)
 503{
 504        cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
 505}
 506
 507#else
 508static inline void vfp_pm_init(void) { }
 509#endif /* CONFIG_CPU_PM */
 510
 511/*
 512 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
 513 * with the hardware state.
 514 */
 515void vfp_sync_hwstate(struct thread_info *thread)
 516{
 517        unsigned int cpu = get_cpu();
 518
 519        if (vfp_state_in_hw(cpu, thread)) {
 520                u32 fpexc = fmrx(FPEXC);
 521
 522                /*
 523                 * Save the last VFP state on this CPU.
 524                 */
 525                fmxr(FPEXC, fpexc | FPEXC_EN);
 526                vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
 527                fmxr(FPEXC, fpexc);
 528        }
 529
 530        put_cpu();
 531}
 532
 533/* Ensure that the thread reloads the hardware VFP state on the next use. */
 534void vfp_flush_hwstate(struct thread_info *thread)
 535{
 536        unsigned int cpu = get_cpu();
 537
 538        vfp_force_reload(cpu, thread);
 539
 540        put_cpu();
 541}
 542
 543/*
 544 * Save the current VFP state into the provided structures and prepare
 545 * for entry into a new function (signal handler).
 546 */
 547int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
 548                                    struct user_vfp_exc __user *ufp_exc)
 549{
 550        struct thread_info *thread = current_thread_info();
 551        struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
 552        int err = 0;
 553
 554        /* Ensure that the saved hwstate is up-to-date. */
 555        vfp_sync_hwstate(thread);
 556
 557        /*
 558         * Copy the floating point registers. There can be unused
 559         * registers see asm/hwcap.h for details.
 560         */
 561        err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
 562                              sizeof(hwstate->fpregs));
 563        /*
 564         * Copy the status and control register.
 565         */
 566        __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
 567
 568        /*
 569         * Copy the exception registers.
 570         */
 571        __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
 572        __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
 573        __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
 574
 575        if (err)
 576                return -EFAULT;
 577
 578        /* Ensure that VFP is disabled. */
 579        vfp_flush_hwstate(thread);
 580
 581        /*
 582         * As per the PCS, clear the length and stride bits for function
 583         * entry.
 584         */
 585        hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
 586        return 0;
 587}
 588
 589/* Sanitise and restore the current VFP state from the provided structures. */
 590int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
 591                             struct user_vfp_exc __user *ufp_exc)
 592{
 593        struct thread_info *thread = current_thread_info();
 594        struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
 595        unsigned long fpexc;
 596        int err = 0;
 597
 598        /* Disable VFP to avoid corrupting the new thread state. */
 599        vfp_flush_hwstate(thread);
 600
 601        /*
 602         * Copy the floating point registers. There can be unused
 603         * registers see asm/hwcap.h for details.
 604         */
 605        err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
 606                                sizeof(hwstate->fpregs));
 607        /*
 608         * Copy the status and control register.
 609         */
 610        __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
 611
 612        /*
 613         * Sanitise and restore the exception registers.
 614         */
 615        __get_user_error(fpexc, &ufp_exc->fpexc, err);
 616
 617        /* Ensure the VFP is enabled. */
 618        fpexc |= FPEXC_EN;
 619
 620        /* Ensure FPINST2 is invalid and the exception flag is cleared. */
 621        fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
 622        hwstate->fpexc = fpexc;
 623
 624        __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
 625        __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
 626
 627        return err ? -EFAULT : 0;
 628}
 629
 630/*
 631 * VFP hardware can lose all context when a CPU goes offline.
 632 * As we will be running in SMP mode with CPU hotplug, we will save the
 633 * hardware state at every thread switch.  We clear our held state when
 634 * a CPU has been killed, indicating that the VFP hardware doesn't contain
 635 * a threads VFP state.  When a CPU starts up, we re-enable access to the
 636 * VFP hardware.
 637 *
 638 * Both CPU_DYING and CPU_STARTING are called on the CPU which
 639 * is being offlined/onlined.
 640 */
 641static int vfp_hotplug(struct notifier_block *b, unsigned long action,
 642        void *hcpu)
 643{
 644        if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
 645                vfp_force_reload((long)hcpu, current_thread_info());
 646        } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
 647                vfp_enable(NULL);
 648        return NOTIFY_OK;
 649}
 650
 651/*
 652 * VFP support code initialisation.
 653 */
 654static int __init vfp_init(void)
 655{
 656        unsigned int vfpsid;
 657        unsigned int cpu_arch = cpu_architecture();
 658
 659        if (cpu_arch >= CPU_ARCH_ARMv6)
 660                on_each_cpu(vfp_enable, NULL, 1);
 661
 662        /*
 663         * First check that there is a VFP that we can use.
 664         * The handler is already setup to just log calls, so
 665         * we just need to read the VFPSID register.
 666         */
 667        vfp_vector = vfp_testing_entry;
 668        barrier();
 669        vfpsid = fmrx(FPSID);
 670        barrier();
 671        vfp_vector = vfp_null_entry;
 672
 673        pr_info("VFP support v0.3: ");
 674        if (VFP_arch)
 675                pr_cont("not present\n");
 676        else if (vfpsid & FPSID_NODOUBLE) {
 677                pr_cont("no double precision support\n");
 678        } else {
 679                hotcpu_notifier(vfp_hotplug, 0);
 680
 681                VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
 682                pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
 683                        (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
 684                        (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
 685                        (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
 686                        (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
 687                        (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
 688
 689                vfp_vector = vfp_support_entry;
 690
 691                thread_register_notifier(&vfp_notifier_block);
 692                vfp_pm_init();
 693
 694                /*
 695                 * We detected VFP, and the support code is
 696                 * in place; report VFP support to userspace.
 697                 */
 698                elf_hwcap |= HWCAP_VFP;
 699#ifdef CONFIG_VFPv3
 700                if (VFP_arch >= 2) {
 701                        elf_hwcap |= HWCAP_VFPv3;
 702
 703                        /*
 704                         * Check for VFPv3 D16 and VFPv4 D16.  CPUs in
 705                         * this configuration only have 16 x 64bit
 706                         * registers.
 707                         */
 708                        if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
 709                                elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
 710                        else
 711                                elf_hwcap |= HWCAP_VFPD32;
 712                }
 713#endif
 714                /*
 715                 * Check for the presence of the Advanced SIMD
 716                 * load/store instructions, integer and single
 717                 * precision floating point operations. Only check
 718                 * for NEON if the hardware has the MVFR registers.
 719                 */
 720                if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
 721#ifdef CONFIG_NEON
 722                        if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
 723                                elf_hwcap |= HWCAP_NEON;
 724#endif
 725#ifdef CONFIG_VFPv3
 726                        if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
 727                                elf_hwcap |= HWCAP_VFPv4;
 728#endif
 729                }
 730        }
 731        return 0;
 732}
 733
 734late_initcall(vfp_init);
 735