linux/arch/x86/kvm/pmu.c
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
   3 *
   4 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
   5 *
   6 * Authors:
   7 *   Avi Kivity   <avi@redhat.com>
   8 *   Gleb Natapov <gleb@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2.  See
  11 * the COPYING file in the top-level directory.
  12 *
  13 */
  14
  15#include <linux/types.h>
  16#include <linux/kvm_host.h>
  17#include <linux/perf_event.h>
  18#include "x86.h"
  19#include "cpuid.h"
  20#include "lapic.h"
  21
  22static struct kvm_arch_event_perf_mapping {
  23        u8 eventsel;
  24        u8 unit_mask;
  25        unsigned event_type;
  26        bool inexact;
  27} arch_events[] = {
  28        /* Index must match CPUID 0x0A.EBX bit vector */
  29        [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
  30        [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
  31        [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
  32        [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
  33        [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
  34        [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
  35        [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
  36        [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
  37};
  38
  39/* mapping between fixed pmc index and arch_events array */
  40int fixed_pmc_events[] = {1, 0, 7};
  41
  42static bool pmc_is_gp(struct kvm_pmc *pmc)
  43{
  44        return pmc->type == KVM_PMC_GP;
  45}
  46
  47static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
  48{
  49        struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
  50
  51        return pmu->counter_bitmask[pmc->type];
  52}
  53
  54static inline bool pmc_enabled(struct kvm_pmc *pmc)
  55{
  56        struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
  57        return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
  58}
  59
  60static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
  61                                         u32 base)
  62{
  63        if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
  64                return &pmu->gp_counters[msr - base];
  65        return NULL;
  66}
  67
  68static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
  69{
  70        int base = MSR_CORE_PERF_FIXED_CTR0;
  71        if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
  72                return &pmu->fixed_counters[msr - base];
  73        return NULL;
  74}
  75
  76static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
  77{
  78        return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
  79}
  80
  81static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
  82{
  83        if (idx < INTEL_PMC_IDX_FIXED)
  84                return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
  85        else
  86                return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
  87}
  88
  89void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
  90{
  91        if (vcpu->arch.apic)
  92                kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
  93}
  94
  95static void trigger_pmi(struct irq_work *irq_work)
  96{
  97        struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
  98                        irq_work);
  99        struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
 100                        arch.pmu);
 101
 102        kvm_deliver_pmi(vcpu);
 103}
 104
 105static void kvm_perf_overflow(struct perf_event *perf_event,
 106                              struct perf_sample_data *data,
 107                              struct pt_regs *regs)
 108{
 109        struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 110        struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
 111        if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
 112                __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
 113                kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
 114        }
 115}
 116
 117static void kvm_perf_overflow_intr(struct perf_event *perf_event,
 118                struct perf_sample_data *data, struct pt_regs *regs)
 119{
 120        struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 121        struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
 122        if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
 123                __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
 124                kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
 125                /*
 126                 * Inject PMI. If vcpu was in a guest mode during NMI PMI
 127                 * can be ejected on a guest mode re-entry. Otherwise we can't
 128                 * be sure that vcpu wasn't executing hlt instruction at the
 129                 * time of vmexit and is not going to re-enter guest mode until,
 130                 * woken up. So we should wake it, but this is impossible from
 131                 * NMI context. Do it from irq work instead.
 132                 */
 133                if (!kvm_is_in_guest())
 134                        irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
 135                else
 136                        kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
 137        }
 138}
 139
 140static u64 read_pmc(struct kvm_pmc *pmc)
 141{
 142        u64 counter, enabled, running;
 143
 144        counter = pmc->counter;
 145
 146        if (pmc->perf_event)
 147                counter += perf_event_read_value(pmc->perf_event,
 148                                                 &enabled, &running);
 149
 150        /* FIXME: Scaling needed? */
 151
 152        return counter & pmc_bitmask(pmc);
 153}
 154
 155static void stop_counter(struct kvm_pmc *pmc)
 156{
 157        if (pmc->perf_event) {
 158                pmc->counter = read_pmc(pmc);
 159                perf_event_release_kernel(pmc->perf_event);
 160                pmc->perf_event = NULL;
 161        }
 162}
 163
 164static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
 165                unsigned config, bool exclude_user, bool exclude_kernel,
 166                bool intr, bool in_tx, bool in_tx_cp)
 167{
 168        struct perf_event *event;
 169        struct perf_event_attr attr = {
 170                .type = type,
 171                .size = sizeof(attr),
 172                .pinned = true,
 173                .exclude_idle = true,
 174                .exclude_host = 1,
 175                .exclude_user = exclude_user,
 176                .exclude_kernel = exclude_kernel,
 177                .config = config,
 178        };
 179        if (in_tx)
 180                attr.config |= HSW_IN_TX;
 181        if (in_tx_cp)
 182                attr.config |= HSW_IN_TX_CHECKPOINTED;
 183
 184        attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
 185
 186        event = perf_event_create_kernel_counter(&attr, -1, current,
 187                                                 intr ? kvm_perf_overflow_intr :
 188                                                 kvm_perf_overflow, pmc);
 189        if (IS_ERR(event)) {
 190                printk_once("kvm: pmu event creation failed %ld\n",
 191                                PTR_ERR(event));
 192                return;
 193        }
 194
 195        pmc->perf_event = event;
 196        clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
 197}
 198
 199static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
 200                u8 unit_mask)
 201{
 202        int i;
 203
 204        for (i = 0; i < ARRAY_SIZE(arch_events); i++)
 205                if (arch_events[i].eventsel == event_select
 206                                && arch_events[i].unit_mask == unit_mask
 207                                && (pmu->available_event_types & (1 << i)))
 208                        break;
 209
 210        if (i == ARRAY_SIZE(arch_events))
 211                return PERF_COUNT_HW_MAX;
 212
 213        return arch_events[i].event_type;
 214}
 215
 216static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 217{
 218        unsigned config, type = PERF_TYPE_RAW;
 219        u8 event_select, unit_mask;
 220
 221        if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
 222                printk_once("kvm pmu: pin control bit is ignored\n");
 223
 224        pmc->eventsel = eventsel;
 225
 226        stop_counter(pmc);
 227
 228        if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
 229                return;
 230
 231        event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
 232        unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
 233
 234        if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
 235                                ARCH_PERFMON_EVENTSEL_INV |
 236                                ARCH_PERFMON_EVENTSEL_CMASK |
 237                                HSW_IN_TX |
 238                                HSW_IN_TX_CHECKPOINTED))) {
 239                config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
 240                                unit_mask);
 241                if (config != PERF_COUNT_HW_MAX)
 242                        type = PERF_TYPE_HARDWARE;
 243        }
 244
 245        if (type == PERF_TYPE_RAW)
 246                config = eventsel & X86_RAW_EVENT_MASK;
 247
 248        reprogram_counter(pmc, type, config,
 249                        !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
 250                        !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
 251                        eventsel & ARCH_PERFMON_EVENTSEL_INT,
 252                        (eventsel & HSW_IN_TX),
 253                        (eventsel & HSW_IN_TX_CHECKPOINTED));
 254}
 255
 256static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
 257{
 258        unsigned en = en_pmi & 0x3;
 259        bool pmi = en_pmi & 0x8;
 260
 261        stop_counter(pmc);
 262
 263        if (!en || !pmc_enabled(pmc))
 264                return;
 265
 266        reprogram_counter(pmc, PERF_TYPE_HARDWARE,
 267                        arch_events[fixed_pmc_events[idx]].event_type,
 268                        !(en & 0x2), /* exclude user */
 269                        !(en & 0x1), /* exclude kernel */
 270                        pmi, false, false);
 271}
 272
 273static inline u8 fixed_en_pmi(u64 ctrl, int idx)
 274{
 275        return (ctrl >> (idx * 4)) & 0xf;
 276}
 277
 278static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 279{
 280        int i;
 281
 282        for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
 283                u8 en_pmi = fixed_en_pmi(data, i);
 284                struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
 285
 286                if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
 287                        continue;
 288
 289                reprogram_fixed_counter(pmc, en_pmi, i);
 290        }
 291
 292        pmu->fixed_ctr_ctrl = data;
 293}
 294
 295static void reprogram_idx(struct kvm_pmu *pmu, int idx)
 296{
 297        struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
 298
 299        if (!pmc)
 300                return;
 301
 302        if (pmc_is_gp(pmc))
 303                reprogram_gp_counter(pmc, pmc->eventsel);
 304        else {
 305                int fidx = idx - INTEL_PMC_IDX_FIXED;
 306                reprogram_fixed_counter(pmc,
 307                                fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
 308        }
 309}
 310
 311static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
 312{
 313        int bit;
 314        u64 diff = pmu->global_ctrl ^ data;
 315
 316        pmu->global_ctrl = data;
 317
 318        for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
 319                reprogram_idx(pmu, bit);
 320}
 321
 322bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
 323{
 324        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 325        int ret;
 326
 327        switch (msr) {
 328        case MSR_CORE_PERF_FIXED_CTR_CTRL:
 329        case MSR_CORE_PERF_GLOBAL_STATUS:
 330        case MSR_CORE_PERF_GLOBAL_CTRL:
 331        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 332                ret = pmu->version > 1;
 333                break;
 334        default:
 335                ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
 336                        || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
 337                        || get_fixed_pmc(pmu, msr);
 338                break;
 339        }
 340        return ret;
 341}
 342
 343int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
 344{
 345        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 346        struct kvm_pmc *pmc;
 347
 348        switch (index) {
 349        case MSR_CORE_PERF_FIXED_CTR_CTRL:
 350                *data = pmu->fixed_ctr_ctrl;
 351                return 0;
 352        case MSR_CORE_PERF_GLOBAL_STATUS:
 353                *data = pmu->global_status;
 354                return 0;
 355        case MSR_CORE_PERF_GLOBAL_CTRL:
 356                *data = pmu->global_ctrl;
 357                return 0;
 358        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 359                *data = pmu->global_ovf_ctrl;
 360                return 0;
 361        default:
 362                if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
 363                                (pmc = get_fixed_pmc(pmu, index))) {
 364                        *data = read_pmc(pmc);
 365                        return 0;
 366                } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
 367                        *data = pmc->eventsel;
 368                        return 0;
 369                }
 370        }
 371        return 1;
 372}
 373
 374int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 375{
 376        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 377        struct kvm_pmc *pmc;
 378        u32 index = msr_info->index;
 379        u64 data = msr_info->data;
 380
 381        switch (index) {
 382        case MSR_CORE_PERF_FIXED_CTR_CTRL:
 383                if (pmu->fixed_ctr_ctrl == data)
 384                        return 0;
 385                if (!(data & 0xfffffffffffff444ull)) {
 386                        reprogram_fixed_counters(pmu, data);
 387                        return 0;
 388                }
 389                break;
 390        case MSR_CORE_PERF_GLOBAL_STATUS:
 391                if (msr_info->host_initiated) {
 392                        pmu->global_status = data;
 393                        return 0;
 394                }
 395                break; /* RO MSR */
 396        case MSR_CORE_PERF_GLOBAL_CTRL:
 397                if (pmu->global_ctrl == data)
 398                        return 0;
 399                if (!(data & pmu->global_ctrl_mask)) {
 400                        global_ctrl_changed(pmu, data);
 401                        return 0;
 402                }
 403                break;
 404        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 405                if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
 406                        if (!msr_info->host_initiated)
 407                                pmu->global_status &= ~data;
 408                        pmu->global_ovf_ctrl = data;
 409                        return 0;
 410                }
 411                break;
 412        default:
 413                if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
 414                                (pmc = get_fixed_pmc(pmu, index))) {
 415                        if (!msr_info->host_initiated)
 416                                data = (s64)(s32)data;
 417                        pmc->counter += data - read_pmc(pmc);
 418                        return 0;
 419                } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
 420                        if (data == pmc->eventsel)
 421                                return 0;
 422                        if (!(data & pmu->reserved_bits)) {
 423                                reprogram_gp_counter(pmc, data);
 424                                return 0;
 425                        }
 426                }
 427        }
 428        return 1;
 429}
 430
 431int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
 432{
 433        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 434        bool fixed = pmc & (1u << 30);
 435        pmc &= ~(3u << 30);
 436        return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
 437                (fixed && pmc >= pmu->nr_arch_fixed_counters);
 438}
 439
 440int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
 441{
 442        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 443        bool fast_mode = pmc & (1u << 31);
 444        bool fixed = pmc & (1u << 30);
 445        struct kvm_pmc *counters;
 446        u64 ctr;
 447
 448        pmc &= ~(3u << 30);
 449        if (!fixed && pmc >= pmu->nr_arch_gp_counters)
 450                return 1;
 451        if (fixed && pmc >= pmu->nr_arch_fixed_counters)
 452                return 1;
 453        counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
 454        ctr = read_pmc(&counters[pmc]);
 455        if (fast_mode)
 456                ctr = (u32)ctr;
 457        *data = ctr;
 458
 459        return 0;
 460}
 461
 462void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
 463{
 464        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 465        struct kvm_cpuid_entry2 *entry;
 466        unsigned bitmap_len;
 467
 468        pmu->nr_arch_gp_counters = 0;
 469        pmu->nr_arch_fixed_counters = 0;
 470        pmu->counter_bitmask[KVM_PMC_GP] = 0;
 471        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
 472        pmu->version = 0;
 473        pmu->reserved_bits = 0xffffffff00200000ull;
 474
 475        entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
 476        if (!entry)
 477                return;
 478
 479        pmu->version = entry->eax & 0xff;
 480        if (!pmu->version)
 481                return;
 482
 483        pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
 484                        INTEL_PMC_MAX_GENERIC);
 485        pmu->counter_bitmask[KVM_PMC_GP] =
 486                ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
 487        bitmap_len = (entry->eax >> 24) & 0xff;
 488        pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
 489
 490        if (pmu->version == 1) {
 491                pmu->nr_arch_fixed_counters = 0;
 492        } else {
 493                pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
 494                                INTEL_PMC_MAX_FIXED);
 495                pmu->counter_bitmask[KVM_PMC_FIXED] =
 496                        ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
 497        }
 498
 499        pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
 500                (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
 501        pmu->global_ctrl_mask = ~pmu->global_ctrl;
 502
 503        entry = kvm_find_cpuid_entry(vcpu, 7, 0);
 504        if (entry &&
 505            (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
 506            (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
 507                pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
 508}
 509
 510void kvm_pmu_init(struct kvm_vcpu *vcpu)
 511{
 512        int i;
 513        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 514
 515        memset(pmu, 0, sizeof(*pmu));
 516        for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
 517                pmu->gp_counters[i].type = KVM_PMC_GP;
 518                pmu->gp_counters[i].vcpu = vcpu;
 519                pmu->gp_counters[i].idx = i;
 520        }
 521        for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
 522                pmu->fixed_counters[i].type = KVM_PMC_FIXED;
 523                pmu->fixed_counters[i].vcpu = vcpu;
 524                pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
 525        }
 526        init_irq_work(&pmu->irq_work, trigger_pmi);
 527        kvm_pmu_cpuid_update(vcpu);
 528}
 529
 530void kvm_pmu_reset(struct kvm_vcpu *vcpu)
 531{
 532        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 533        int i;
 534
 535        irq_work_sync(&pmu->irq_work);
 536        for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
 537                struct kvm_pmc *pmc = &pmu->gp_counters[i];
 538                stop_counter(pmc);
 539                pmc->counter = pmc->eventsel = 0;
 540        }
 541
 542        for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
 543                stop_counter(&pmu->fixed_counters[i]);
 544
 545        pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
 546                pmu->global_ovf_ctrl = 0;
 547}
 548
 549void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
 550{
 551        kvm_pmu_reset(vcpu);
 552}
 553
 554void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
 555{
 556        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 557        u64 bitmask;
 558        int bit;
 559
 560        bitmask = pmu->reprogram_pmi;
 561
 562        for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
 563                struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
 564
 565                if (unlikely(!pmc || !pmc->perf_event)) {
 566                        clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
 567                        continue;
 568                }
 569
 570                reprogram_idx(pmu, bit);
 571        }
 572}
 573