qemu/target/ppc/power8-pmu.c
<<
>>
Prefs
   1/*
   2 * PMU emulation helpers for TCG IBM POWER chips
   3 *
   4 *  Copyright IBM Corp. 2021
   5 *
   6 * Authors:
   7 *  Daniel Henrique Barboza      <danielhb413@gmail.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 */
  12
  13#include "qemu/osdep.h"
  14#include "cpu.h"
  15#include "helper_regs.h"
  16#include "exec/exec-all.h"
  17#include "exec/helper-proto.h"
  18#include "qemu/error-report.h"
  19#include "qemu/main-loop.h"
  20#include "hw/ppc/ppc.h"
  21#include "power8-pmu.h"
  22
  23#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
  24
  25#define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL
  26
  27static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
  28{
  29    if (sprn == SPR_POWER_PMC1) {
  30        return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE;
  31    }
  32
  33    return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
  34}
  35
  36void pmu_update_summaries(CPUPPCState *env)
  37{
  38    target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
  39    target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
  40    int ins_cnt = 0;
  41    int cyc_cnt = 0;
  42
  43    if (mmcr0 & MMCR0_FC) {
  44        goto hflags_calc;
  45    }
  46
  47    if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
  48        target_ulong sel;
  49
  50        sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
  51        switch (sel) {
  52        case 0x02:
  53        case 0xfe:
  54            ins_cnt |= 1 << 1;
  55            break;
  56        case 0x1e:
  57        case 0xf0:
  58            cyc_cnt |= 1 << 1;
  59            break;
  60        }
  61
  62        sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
  63        ins_cnt |= (sel == 0x02) << 2;
  64        cyc_cnt |= (sel == 0x1e) << 2;
  65
  66        sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
  67        ins_cnt |= (sel == 0x02) << 3;
  68        cyc_cnt |= (sel == 0x1e) << 3;
  69
  70        sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
  71        ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
  72        cyc_cnt |= (sel == 0x1e) << 4;
  73    }
  74
  75    ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
  76    cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
  77
  78 hflags_calc:
  79    env->pmc_ins_cnt = ins_cnt;
  80    env->pmc_cyc_cnt = cyc_cnt;
  81    env->hflags = deposit32(env->hflags, HFLAGS_INSN_CNT, 1, ins_cnt != 0);
  82}
  83
  84static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
  85{
  86    target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
  87    unsigned ins_cnt = env->pmc_ins_cnt;
  88    bool overflow_triggered = false;
  89    target_ulong tmp;
  90
  91    if (unlikely(ins_cnt & 0x1e)) {
  92        if (ins_cnt & (1 << 1)) {
  93            tmp = env->spr[SPR_POWER_PMC1];
  94            tmp += num_insns;
  95            if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
  96                tmp = PMC_COUNTER_NEGATIVE_VAL;
  97                overflow_triggered = true;
  98            }
  99            env->spr[SPR_POWER_PMC1] = tmp;
 100        }
 101
 102        if (ins_cnt & (1 << 2)) {
 103            tmp = env->spr[SPR_POWER_PMC2];
 104            tmp += num_insns;
 105            if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
 106                tmp = PMC_COUNTER_NEGATIVE_VAL;
 107                overflow_triggered = true;
 108            }
 109            env->spr[SPR_POWER_PMC2] = tmp;
 110        }
 111
 112        if (ins_cnt & (1 << 3)) {
 113            tmp = env->spr[SPR_POWER_PMC3];
 114            tmp += num_insns;
 115            if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
 116                tmp = PMC_COUNTER_NEGATIVE_VAL;
 117                overflow_triggered = true;
 118            }
 119            env->spr[SPR_POWER_PMC3] = tmp;
 120        }
 121
 122        if (ins_cnt & (1 << 4)) {
 123            target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
 124            int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
 125            if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
 126                tmp = env->spr[SPR_POWER_PMC4];
 127                tmp += num_insns;
 128                if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
 129                    tmp = PMC_COUNTER_NEGATIVE_VAL;
 130                    overflow_triggered = true;
 131                }
 132                env->spr[SPR_POWER_PMC4] = tmp;
 133            }
 134        }
 135    }
 136
 137    if (ins_cnt & (1 << 5)) {
 138        tmp = env->spr[SPR_POWER_PMC5];
 139        tmp += num_insns;
 140        if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
 141            tmp = PMC_COUNTER_NEGATIVE_VAL;
 142            overflow_triggered = true;
 143        }
 144        env->spr[SPR_POWER_PMC5] = tmp;
 145    }
 146
 147    return overflow_triggered;
 148}
 149
 150static void pmu_update_cycles(CPUPPCState *env)
 151{
 152    uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 153    uint64_t time_delta = now - env->pmu_base_time;
 154    int sprn, cyc_cnt = env->pmc_cyc_cnt;
 155
 156    for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
 157        if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
 158            /*
 159             * The pseries and powernv clock runs at 1Ghz, meaning
 160             * that 1 nanosec equals 1 cycle.
 161             */
 162            env->spr[sprn] += time_delta;
 163        }
 164    }
 165
 166    /* Update base_time for future calculations */
 167    env->pmu_base_time = now;
 168}
 169
 170/*
 171 * Helper function to retrieve the cycle overflow timer of the
 172 * 'sprn' counter.
 173 */
 174static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn)
 175{
 176    return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1];
 177}
 178
 179static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
 180{
 181    QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
 182    int64_t timeout;
 183
 184    /*
 185     * PMC5 does not have an overflow timer and this pointer
 186     * will be NULL.
 187     */
 188    if (!pmc_overflow_timer) {
 189        return;
 190    }
 191
 192    if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
 193        !pmc_has_overflow_enabled(env, sprn)) {
 194        /* Overflow timer is not needed for this counter */
 195        timer_del(pmc_overflow_timer);
 196        return;
 197    }
 198
 199    if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
 200        timeout = 0;
 201    } else {
 202        timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
 203    }
 204
 205    /*
 206     * Use timer_mod_anticipate() because an overflow timer might
 207     * be already running for this PMC.
 208     */
 209    timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout);
 210}
 211
 212static void pmu_update_overflow_timers(CPUPPCState *env)
 213{
 214    int sprn;
 215
 216    /*
 217     * Scroll through all PMCs and start counter overflow timers for
 218     * PM_CYC events, if needed.
 219     */
 220    for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
 221        pmc_update_overflow_timer(env, sprn);
 222    }
 223}
 224
 225static void pmu_delete_timers(CPUPPCState *env)
 226{
 227    QEMUTimer *pmc_overflow_timer;
 228    int sprn;
 229
 230    for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
 231        pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
 232
 233        if (pmc_overflow_timer) {
 234            timer_del(pmc_overflow_timer);
 235        }
 236    }
 237}
 238
 239void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
 240{
 241    bool hflags_pmcc0 = (value & MMCR0_PMCC0) != 0;
 242    bool hflags_pmcc1 = (value & MMCR0_PMCC1) != 0;
 243
 244    pmu_update_cycles(env);
 245
 246    env->spr[SPR_POWER_MMCR0] = value;
 247
 248    /* MMCR0 writes can change HFLAGS_PMCC[01] and HFLAGS_INSN_CNT */
 249    env->hflags = deposit32(env->hflags, HFLAGS_PMCC0, 1, hflags_pmcc0);
 250    env->hflags = deposit32(env->hflags, HFLAGS_PMCC1, 1, hflags_pmcc1);
 251
 252    pmu_update_summaries(env);
 253
 254    /* Update cycle overflow timers with the current MMCR0 state */
 255    pmu_update_overflow_timers(env);
 256}
 257
 258void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
 259{
 260    pmu_update_cycles(env);
 261
 262    env->spr[SPR_POWER_MMCR1] = value;
 263
 264    /* MMCR1 writes can change HFLAGS_INSN_CNT */
 265    pmu_update_summaries(env);
 266}
 267
 268target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
 269{
 270    pmu_update_cycles(env);
 271
 272    return env->spr[sprn];
 273}
 274
 275void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value)
 276{
 277    pmu_update_cycles(env);
 278
 279    env->spr[sprn] = value;
 280
 281    pmc_update_overflow_timer(env, sprn);
 282}
 283
 284static void fire_PMC_interrupt(PowerPCCPU *cpu)
 285{
 286    CPUPPCState *env = &cpu->env;
 287
 288    pmu_update_cycles(env);
 289
 290    if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
 291        env->spr[SPR_POWER_MMCR0] &= ~MMCR0_FCECE;
 292        env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
 293
 294        /* Changing MMCR0_FC requires a new HFLAGS_INSN_CNT calc */
 295        pmu_update_summaries(env);
 296
 297        /*
 298         * Delete all pending timers if we need to freeze
 299         * the PMC. We'll restart them when the PMC starts
 300         * running again.
 301         */
 302        pmu_delete_timers(env);
 303    }
 304
 305    if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
 306        env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
 307        env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
 308    }
 309
 310    raise_ebb_perfm_exception(env);
 311}
 312
 313/* This helper assumes that the PMC is running. */
 314void helper_insns_inc(CPUPPCState *env, uint32_t num_insns)
 315{
 316    bool overflow_triggered;
 317    PowerPCCPU *cpu;
 318
 319    overflow_triggered = pmu_increment_insns(env, num_insns);
 320
 321    if (overflow_triggered) {
 322        cpu = env_archcpu(env);
 323        fire_PMC_interrupt(cpu);
 324    }
 325}
 326
 327static void cpu_ppc_pmu_timer_cb(void *opaque)
 328{
 329    PowerPCCPU *cpu = opaque;
 330
 331    fire_PMC_interrupt(cpu);
 332}
 333
 334void cpu_ppc_pmu_init(CPUPPCState *env)
 335{
 336    PowerPCCPU *cpu = env_archcpu(env);
 337    int i, sprn;
 338
 339    for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
 340        if (sprn == SPR_POWER_PMC5) {
 341            continue;
 342        }
 343
 344        i = sprn - SPR_POWER_PMC1;
 345
 346        env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
 347                                                       &cpu_ppc_pmu_timer_cb,
 348                                                       cpu);
 349    }
 350}
 351#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
 352