linux/arch/x86/oprofile/nmi_int.c
<<
>>
Prefs
   1/**
   2 * @file nmi_int.c
   3 *
   4 * @remark Copyright 2002-2009 OProfile authors
   5 * @remark Read the file COPYING
   6 *
   7 * @author John Levon <levon@movementarian.org>
   8 * @author Robert Richter <robert.richter@amd.com>
   9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
  10 * @author Jason Yeh <jason.yeh@amd.com>
  11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  12 */
  13
  14#include <linux/init.h>
  15#include <linux/notifier.h>
  16#include <linux/smp.h>
  17#include <linux/oprofile.h>
  18#include <linux/sysdev.h>
  19#include <linux/slab.h>
  20#include <linux/moduleparam.h>
  21#include <linux/kdebug.h>
  22#include <linux/cpu.h>
  23#include <asm/nmi.h>
  24#include <asm/msr.h>
  25#include <asm/apic.h>
  26
  27#include "op_counter.h"
  28#include "op_x86_model.h"
  29
  30static struct op_x86_model_spec *model;
  31static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
  32static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
  33
  34/* must be protected with get_online_cpus()/put_online_cpus(): */
  35static int nmi_enabled;
  36static int ctr_running;
  37
  38struct op_counter_config counter_config[OP_MAX_COUNTER];
  39
  40/* common functions */
  41
  42u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
  43                    struct op_counter_config *counter_config)
  44{
  45        u64 val = 0;
  46        u16 event = (u16)counter_config->event;
  47
  48        val |= ARCH_PERFMON_EVENTSEL_INT;
  49        val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
  50        val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
  51        val |= (counter_config->unit_mask & 0xFF) << 8;
  52        event &= model->event_mask ? model->event_mask : 0xFF;
  53        val |= event & 0xFF;
  54        val |= (event & 0x0F00) << 24;
  55
  56        return val;
  57}
  58
  59
  60static int profile_exceptions_notify(struct notifier_block *self,
  61                                     unsigned long val, void *data)
  62{
  63        struct die_args *args = (struct die_args *)data;
  64        int ret = NOTIFY_DONE;
  65
  66        switch (val) {
  67        case DIE_NMI:
  68                if (ctr_running)
  69                        model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
  70                else if (!nmi_enabled)
  71                        break;
  72                else
  73                        model->stop(&__get_cpu_var(cpu_msrs));
  74                ret = NOTIFY_STOP;
  75                break;
  76        default:
  77                break;
  78        }
  79        return ret;
  80}
  81
  82static void nmi_cpu_save_registers(struct op_msrs *msrs)
  83{
  84        struct op_msr *counters = msrs->counters;
  85        struct op_msr *controls = msrs->controls;
  86        unsigned int i;
  87
  88        for (i = 0; i < model->num_counters; ++i) {
  89                if (counters[i].addr)
  90                        rdmsrl(counters[i].addr, counters[i].saved);
  91        }
  92
  93        for (i = 0; i < model->num_controls; ++i) {
  94                if (controls[i].addr)
  95                        rdmsrl(controls[i].addr, controls[i].saved);
  96        }
  97}
  98
  99static void nmi_cpu_start(void *dummy)
 100{
 101        struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
 102        if (!msrs->controls)
 103                WARN_ON_ONCE(1);
 104        else
 105                model->start(msrs);
 106}
 107
 108static int nmi_start(void)
 109{
 110        get_online_cpus();
 111        on_each_cpu(nmi_cpu_start, NULL, 1);
 112        ctr_running = 1;
 113        put_online_cpus();
 114        return 0;
 115}
 116
 117static void nmi_cpu_stop(void *dummy)
 118{
 119        struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
 120        if (!msrs->controls)
 121                WARN_ON_ONCE(1);
 122        else
 123                model->stop(msrs);
 124}
 125
 126static void nmi_stop(void)
 127{
 128        get_online_cpus();
 129        on_each_cpu(nmi_cpu_stop, NULL, 1);
 130        ctr_running = 0;
 131        put_online_cpus();
 132}
 133
 134#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
 135
 136static DEFINE_PER_CPU(int, switch_index);
 137
 138static inline int has_mux(void)
 139{
 140        return !!model->switch_ctrl;
 141}
 142
 143inline int op_x86_phys_to_virt(int phys)
 144{
 145        return __this_cpu_read(switch_index) + phys;
 146}
 147
 148inline int op_x86_virt_to_phys(int virt)
 149{
 150        return virt % model->num_counters;
 151}
 152
 153static void nmi_shutdown_mux(void)
 154{
 155        int i;
 156
 157        if (!has_mux())
 158                return;
 159
 160        for_each_possible_cpu(i) {
 161                kfree(per_cpu(cpu_msrs, i).multiplex);
 162                per_cpu(cpu_msrs, i).multiplex = NULL;
 163                per_cpu(switch_index, i) = 0;
 164        }
 165}
 166
 167static int nmi_setup_mux(void)
 168{
 169        size_t multiplex_size =
 170                sizeof(struct op_msr) * model->num_virt_counters;
 171        int i;
 172
 173        if (!has_mux())
 174                return 1;
 175
 176        for_each_possible_cpu(i) {
 177                per_cpu(cpu_msrs, i).multiplex =
 178                        kzalloc(multiplex_size, GFP_KERNEL);
 179                if (!per_cpu(cpu_msrs, i).multiplex)
 180                        return 0;
 181        }
 182
 183        return 1;
 184}
 185
 186static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
 187{
 188        int i;
 189        struct op_msr *multiplex = msrs->multiplex;
 190
 191        if (!has_mux())
 192                return;
 193
 194        for (i = 0; i < model->num_virt_counters; ++i) {
 195                if (counter_config[i].enabled) {
 196                        multiplex[i].saved = -(u64)counter_config[i].count;
 197                } else {
 198                        multiplex[i].saved = 0;
 199                }
 200        }
 201
 202        per_cpu(switch_index, cpu) = 0;
 203}
 204
 205static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
 206{
 207        struct op_msr *counters = msrs->counters;
 208        struct op_msr *multiplex = msrs->multiplex;
 209        int i;
 210
 211        for (i = 0; i < model->num_counters; ++i) {
 212                int virt = op_x86_phys_to_virt(i);
 213                if (counters[i].addr)
 214                        rdmsrl(counters[i].addr, multiplex[virt].saved);
 215        }
 216}
 217
 218static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
 219{
 220        struct op_msr *counters = msrs->counters;
 221        struct op_msr *multiplex = msrs->multiplex;
 222        int i;
 223
 224        for (i = 0; i < model->num_counters; ++i) {
 225                int virt = op_x86_phys_to_virt(i);
 226                if (counters[i].addr)
 227                        wrmsrl(counters[i].addr, multiplex[virt].saved);
 228        }
 229}
 230
 231static void nmi_cpu_switch(void *dummy)
 232{
 233        int cpu = smp_processor_id();
 234        int si = per_cpu(switch_index, cpu);
 235        struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
 236
 237        nmi_cpu_stop(NULL);
 238        nmi_cpu_save_mpx_registers(msrs);
 239
 240        /* move to next set */
 241        si += model->num_counters;
 242        if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
 243                per_cpu(switch_index, cpu) = 0;
 244        else
 245                per_cpu(switch_index, cpu) = si;
 246
 247        model->switch_ctrl(model, msrs);
 248        nmi_cpu_restore_mpx_registers(msrs);
 249
 250        nmi_cpu_start(NULL);
 251}
 252
 253
 254/*
 255 * Quick check to see if multiplexing is necessary.
 256 * The check should be sufficient since counters are used
 257 * in ordre.
 258 */
 259static int nmi_multiplex_on(void)
 260{
 261        return counter_config[model->num_counters].count ? 0 : -EINVAL;
 262}
 263
 264static int nmi_switch_event(void)
 265{
 266        if (!has_mux())
 267                return -ENOSYS;         /* not implemented */
 268        if (nmi_multiplex_on() < 0)
 269                return -EINVAL;         /* not necessary */
 270
 271        get_online_cpus();
 272        if (ctr_running)
 273                on_each_cpu(nmi_cpu_switch, NULL, 1);
 274        put_online_cpus();
 275
 276        return 0;
 277}
 278
 279static inline void mux_init(struct oprofile_operations *ops)
 280{
 281        if (has_mux())
 282                ops->switch_events = nmi_switch_event;
 283}
 284
 285static void mux_clone(int cpu)
 286{
 287        if (!has_mux())
 288                return;
 289
 290        memcpy(per_cpu(cpu_msrs, cpu).multiplex,
 291               per_cpu(cpu_msrs, 0).multiplex,
 292               sizeof(struct op_msr) * model->num_virt_counters);
 293}
 294
 295#else
 296
 297inline int op_x86_phys_to_virt(int phys) { return phys; }
 298inline int op_x86_virt_to_phys(int virt) { return virt; }
 299static inline void nmi_shutdown_mux(void) { }
 300static inline int nmi_setup_mux(void) { return 1; }
 301static inline void
 302nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
 303static inline void mux_init(struct oprofile_operations *ops) { }
 304static void mux_clone(int cpu) { }
 305
 306#endif
 307
 308static void free_msrs(void)
 309{
 310        int i;
 311        for_each_possible_cpu(i) {
 312                kfree(per_cpu(cpu_msrs, i).counters);
 313                per_cpu(cpu_msrs, i).counters = NULL;
 314                kfree(per_cpu(cpu_msrs, i).controls);
 315                per_cpu(cpu_msrs, i).controls = NULL;
 316        }
 317        nmi_shutdown_mux();
 318}
 319
 320static int allocate_msrs(void)
 321{
 322        size_t controls_size = sizeof(struct op_msr) * model->num_controls;
 323        size_t counters_size = sizeof(struct op_msr) * model->num_counters;
 324
 325        int i;
 326        for_each_possible_cpu(i) {
 327                per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
 328                                                        GFP_KERNEL);
 329                if (!per_cpu(cpu_msrs, i).counters)
 330                        goto fail;
 331                per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
 332                                                        GFP_KERNEL);
 333                if (!per_cpu(cpu_msrs, i).controls)
 334                        goto fail;
 335        }
 336
 337        if (!nmi_setup_mux())
 338                goto fail;
 339
 340        return 1;
 341
 342fail:
 343        free_msrs();
 344        return 0;
 345}
 346
 347static void nmi_cpu_setup(void *dummy)
 348{
 349        int cpu = smp_processor_id();
 350        struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
 351        nmi_cpu_save_registers(msrs);
 352        spin_lock(&oprofilefs_lock);
 353        model->setup_ctrs(model, msrs);
 354        nmi_cpu_setup_mux(cpu, msrs);
 355        spin_unlock(&oprofilefs_lock);
 356        per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
 357        apic_write(APIC_LVTPC, APIC_DM_NMI);
 358}
 359
 360static struct notifier_block profile_exceptions_nb = {
 361        .notifier_call = profile_exceptions_notify,
 362        .next = NULL,
 363        .priority = NMI_LOCAL_LOW_PRIOR,
 364};
 365
 366static void nmi_cpu_restore_registers(struct op_msrs *msrs)
 367{
 368        struct op_msr *counters = msrs->counters;
 369        struct op_msr *controls = msrs->controls;
 370        unsigned int i;
 371
 372        for (i = 0; i < model->num_controls; ++i) {
 373                if (controls[i].addr)
 374                        wrmsrl(controls[i].addr, controls[i].saved);
 375        }
 376
 377        for (i = 0; i < model->num_counters; ++i) {
 378                if (counters[i].addr)
 379                        wrmsrl(counters[i].addr, counters[i].saved);
 380        }
 381}
 382
 383static void nmi_cpu_shutdown(void *dummy)
 384{
 385        unsigned int v;
 386        int cpu = smp_processor_id();
 387        struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
 388
 389        /* restoring APIC_LVTPC can trigger an apic error because the delivery
 390         * mode and vector nr combination can be illegal. That's by design: on
 391         * power on apic lvt contain a zero vector nr which are legal only for
 392         * NMI delivery mode. So inhibit apic err before restoring lvtpc
 393         */
 394        v = apic_read(APIC_LVTERR);
 395        apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
 396        apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
 397        apic_write(APIC_LVTERR, v);
 398        nmi_cpu_restore_registers(msrs);
 399        if (model->cpu_down)
 400                model->cpu_down();
 401}
 402
 403static void nmi_cpu_up(void *dummy)
 404{
 405        if (nmi_enabled)
 406                nmi_cpu_setup(dummy);
 407        if (ctr_running)
 408                nmi_cpu_start(dummy);
 409}
 410
 411static void nmi_cpu_down(void *dummy)
 412{
 413        if (ctr_running)
 414                nmi_cpu_stop(dummy);
 415        if (nmi_enabled)
 416                nmi_cpu_shutdown(dummy);
 417}
 418
 419static int nmi_create_files(struct super_block *sb, struct dentry *root)
 420{
 421        unsigned int i;
 422
 423        for (i = 0; i < model->num_virt_counters; ++i) {
 424                struct dentry *dir;
 425                char buf[4];
 426
 427                /* quick little hack to _not_ expose a counter if it is not
 428                 * available for use.  This should protect userspace app.
 429                 * NOTE:  assumes 1:1 mapping here (that counters are organized
 430                 *        sequentially in their struct assignment).
 431                 */
 432                if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
 433                        continue;
 434
 435                snprintf(buf,  sizeof(buf), "%d", i);
 436                dir = oprofilefs_mkdir(sb, root, buf);
 437                oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
 438                oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
 439                oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
 440                oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
 441                oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
 442                oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
 443        }
 444
 445        return 0;
 446}
 447
 448static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
 449                                 void *data)
 450{
 451        int cpu = (unsigned long)data;
 452        switch (action) {
 453        case CPU_DOWN_FAILED:
 454        case CPU_ONLINE:
 455                smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
 456                break;
 457        case CPU_DOWN_PREPARE:
 458                smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
 459                break;
 460        }
 461        return NOTIFY_DONE;
 462}
 463
 464static struct notifier_block oprofile_cpu_nb = {
 465        .notifier_call = oprofile_cpu_notifier
 466};
 467
 468static int nmi_setup(void)
 469{
 470        int err = 0;
 471        int cpu;
 472
 473        if (!allocate_msrs())
 474                return -ENOMEM;
 475
 476        /* We need to serialize save and setup for HT because the subset
 477         * of msrs are distinct for save and setup operations
 478         */
 479
 480        /* Assume saved/restored counters are the same on all CPUs */
 481        err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
 482        if (err)
 483                goto fail;
 484
 485        for_each_possible_cpu(cpu) {
 486                if (!cpu)
 487                        continue;
 488
 489                memcpy(per_cpu(cpu_msrs, cpu).counters,
 490                       per_cpu(cpu_msrs, 0).counters,
 491                       sizeof(struct op_msr) * model->num_counters);
 492
 493                memcpy(per_cpu(cpu_msrs, cpu).controls,
 494                       per_cpu(cpu_msrs, 0).controls,
 495                       sizeof(struct op_msr) * model->num_controls);
 496
 497                mux_clone(cpu);
 498        }
 499
 500        nmi_enabled = 0;
 501        ctr_running = 0;
 502        barrier();
 503        err = register_die_notifier(&profile_exceptions_nb);
 504        if (err)
 505                goto fail;
 506
 507        get_online_cpus();
 508        register_cpu_notifier(&oprofile_cpu_nb);
 509        on_each_cpu(nmi_cpu_setup, NULL, 1);
 510        nmi_enabled = 1;
 511        put_online_cpus();
 512
 513        return 0;
 514fail:
 515        free_msrs();
 516        return err;
 517}
 518
 519static void nmi_shutdown(void)
 520{
 521        struct op_msrs *msrs;
 522
 523        get_online_cpus();
 524        unregister_cpu_notifier(&oprofile_cpu_nb);
 525        on_each_cpu(nmi_cpu_shutdown, NULL, 1);
 526        nmi_enabled = 0;
 527        ctr_running = 0;
 528        put_online_cpus();
 529        barrier();
 530        unregister_die_notifier(&profile_exceptions_nb);
 531        msrs = &get_cpu_var(cpu_msrs);
 532        model->shutdown(msrs);
 533        free_msrs();
 534        put_cpu_var(cpu_msrs);
 535}
 536
 537#ifdef CONFIG_PM
 538
 539static int nmi_suspend(struct sys_device *dev, pm_message_t state)
 540{
 541        /* Only one CPU left, just stop that one */
 542        if (nmi_enabled == 1)
 543                nmi_cpu_stop(NULL);
 544        return 0;
 545}
 546
 547static int nmi_resume(struct sys_device *dev)
 548{
 549        if (nmi_enabled == 1)
 550                nmi_cpu_start(NULL);
 551        return 0;
 552}
 553
 554static struct sysdev_class oprofile_sysclass = {
 555        .name           = "oprofile",
 556        .resume         = nmi_resume,
 557        .suspend        = nmi_suspend,
 558};
 559
 560static struct sys_device device_oprofile = {
 561        .id     = 0,
 562        .cls    = &oprofile_sysclass,
 563};
 564
 565static int __init init_sysfs(void)
 566{
 567        int error;
 568
 569        error = sysdev_class_register(&oprofile_sysclass);
 570        if (error)
 571                return error;
 572
 573        error = sysdev_register(&device_oprofile);
 574        if (error)
 575                sysdev_class_unregister(&oprofile_sysclass);
 576
 577        return error;
 578}
 579
 580static void exit_sysfs(void)
 581{
 582        sysdev_unregister(&device_oprofile);
 583        sysdev_class_unregister(&oprofile_sysclass);
 584}
 585
 586#else
 587
 588static inline int  init_sysfs(void) { return 0; }
 589static inline void exit_sysfs(void) { }
 590
 591#endif /* CONFIG_PM */
 592
 593static int __init p4_init(char **cpu_type)
 594{
 595        __u8 cpu_model = boot_cpu_data.x86_model;
 596
 597        if (cpu_model > 6 || cpu_model == 5)
 598                return 0;
 599
 600#ifndef CONFIG_SMP
 601        *cpu_type = "i386/p4";
 602        model = &op_p4_spec;
 603        return 1;
 604#else
 605        switch (smp_num_siblings) {
 606        case 1:
 607                *cpu_type = "i386/p4";
 608                model = &op_p4_spec;
 609                return 1;
 610
 611        case 2:
 612                *cpu_type = "i386/p4-ht";
 613                model = &op_p4_ht2_spec;
 614                return 1;
 615        }
 616#endif
 617
 618        printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
 619        printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
 620        return 0;
 621}
 622
 623static int force_arch_perfmon;
 624static int force_cpu_type(const char *str, struct kernel_param *kp)
 625{
 626        if (!strcmp(str, "arch_perfmon")) {
 627                force_arch_perfmon = 1;
 628                printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
 629        }
 630
 631        return 0;
 632}
 633module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
 634
 635static int __init ppro_init(char **cpu_type)
 636{
 637        __u8 cpu_model = boot_cpu_data.x86_model;
 638        struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
 639
 640        if (force_arch_perfmon && cpu_has_arch_perfmon)
 641                return 0;
 642
 643        /*
 644         * Documentation on identifying Intel processors by CPU family
 645         * and model can be found in the Intel Software Developer's
 646         * Manuals (SDM):
 647         *
 648         *  http://www.intel.com/products/processor/manuals/
 649         *
 650         * As of May 2010 the documentation for this was in the:
 651         * "Intel 64 and IA-32 Architectures Software Developer's
 652         * Manual Volume 3B: System Programming Guide", "Table B-1
 653         * CPUID Signature Values of DisplayFamily_DisplayModel".
 654         */
 655        switch (cpu_model) {
 656        case 0 ... 2:
 657                *cpu_type = "i386/ppro";
 658                break;
 659        case 3 ... 5:
 660                *cpu_type = "i386/pii";
 661                break;
 662        case 6 ... 8:
 663        case 10 ... 11:
 664                *cpu_type = "i386/piii";
 665                break;
 666        case 9:
 667        case 13:
 668                *cpu_type = "i386/p6_mobile";
 669                break;
 670        case 14:
 671                *cpu_type = "i386/core";
 672                break;
 673        case 0x0f:
 674        case 0x16:
 675        case 0x17:
 676        case 0x1d:
 677                *cpu_type = "i386/core_2";
 678                break;
 679        case 0x1a:
 680        case 0x1e:
 681        case 0x2e:
 682                spec = &op_arch_perfmon_spec;
 683                *cpu_type = "i386/core_i7";
 684                break;
 685        case 0x1c:
 686                *cpu_type = "i386/atom";
 687                break;
 688        default:
 689                /* Unknown */
 690                return 0;
 691        }
 692
 693        model = spec;
 694        return 1;
 695}
 696
 697int __init op_nmi_init(struct oprofile_operations *ops)
 698{
 699        __u8 vendor = boot_cpu_data.x86_vendor;
 700        __u8 family = boot_cpu_data.x86;
 701        char *cpu_type = NULL;
 702        int ret = 0;
 703
 704        if (!cpu_has_apic)
 705                return -ENODEV;
 706
 707        switch (vendor) {
 708        case X86_VENDOR_AMD:
 709                /* Needs to be at least an Athlon (or hammer in 32bit mode) */
 710
 711                switch (family) {
 712                case 6:
 713                        cpu_type = "i386/athlon";
 714                        break;
 715                case 0xf:
 716                        /*
 717                         * Actually it could be i386/hammer too, but
 718                         * give user space an consistent name.
 719                         */
 720                        cpu_type = "x86-64/hammer";
 721                        break;
 722                case 0x10:
 723                        cpu_type = "x86-64/family10";
 724                        break;
 725                case 0x11:
 726                        cpu_type = "x86-64/family11h";
 727                        break;
 728                case 0x12:
 729                        cpu_type = "x86-64/family12h";
 730                        break;
 731                case 0x14:
 732                        cpu_type = "x86-64/family14h";
 733                        break;
 734                case 0x15:
 735                        cpu_type = "x86-64/family15h";
 736                        break;
 737                default:
 738                        return -ENODEV;
 739                }
 740                model = &op_amd_spec;
 741                break;
 742
 743        case X86_VENDOR_INTEL:
 744                switch (family) {
 745                        /* Pentium IV */
 746                case 0xf:
 747                        p4_init(&cpu_type);
 748                        break;
 749
 750                        /* A P6-class processor */
 751                case 6:
 752                        ppro_init(&cpu_type);
 753                        break;
 754
 755                default:
 756                        break;
 757                }
 758
 759                if (cpu_type)
 760                        break;
 761
 762                if (!cpu_has_arch_perfmon)
 763                        return -ENODEV;
 764
 765                /* use arch perfmon as fallback */
 766                cpu_type = "i386/arch_perfmon";
 767                model = &op_arch_perfmon_spec;
 768                break;
 769
 770        default:
 771                return -ENODEV;
 772        }
 773
 774        /* default values, can be overwritten by model */
 775        ops->create_files       = nmi_create_files;
 776        ops->setup              = nmi_setup;
 777        ops->shutdown           = nmi_shutdown;
 778        ops->start              = nmi_start;
 779        ops->stop               = nmi_stop;
 780        ops->cpu_type           = cpu_type;
 781
 782        if (model->init)
 783                ret = model->init(ops);
 784        if (ret)
 785                return ret;
 786
 787        if (!model->num_virt_counters)
 788                model->num_virt_counters = model->num_counters;
 789
 790        mux_init(ops);
 791
 792        ret = init_sysfs();
 793        if (ret)
 794                return ret;
 795
 796        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
 797        return 0;
 798}
 799
 800void op_nmi_exit(void)
 801{
 802        exit_sysfs();
 803}
 804