qemu/target/riscv/pmu.c
<<
>>
Prefs
   1/*
   2 * RISC-V PMU file.
   3 *
   4 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2 or later, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "pmu.h"
  22#include "sysemu/cpu-timers.h"
  23#include "sysemu/device_tree.h"
  24
  25#define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
  26#define MAKE_32BIT_MASK(shift, length) \
  27        (((uint32_t)(~0UL) >> (32 - (length))) << (shift))
  28
  29/*
  30 * To keep it simple, any event can be mapped to any programmable counters in
  31 * QEMU. The generic cycle & instruction count events can also be monitored
  32 * using programmable counters. In that case, mcycle & minstret must continue
  33 * to provide the correct value as well. Heterogeneous PMU per hart is not
  34 * supported yet. Thus, number of counters are same across all harts.
  35 */
  36void riscv_pmu_generate_fdt_node(void *fdt, int num_ctrs, char *pmu_name)
  37{
  38    uint32_t fdt_event_ctr_map[15] = {};
  39    uint32_t cmask;
  40
  41    /* All the programmable counters can map to any event */
  42    cmask = MAKE_32BIT_MASK(3, num_ctrs);
  43
  44   /*
  45    * The event encoding is specified in the SBI specification
  46    * Event idx is a 20bits wide number encoded as follows:
  47    * event_idx[19:16] = type
  48    * event_idx[15:0] = code
  49    * The code field in cache events are encoded as follows:
  50    * event_idx.code[15:3] = cache_id
  51    * event_idx.code[2:1] = op_id
  52    * event_idx.code[0:0] = result_id
  53    */
  54
  55   /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
  56   fdt_event_ctr_map[0] = cpu_to_be32(0x00000001);
  57   fdt_event_ctr_map[1] = cpu_to_be32(0x00000001);
  58   fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
  59
  60   /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
  61   fdt_event_ctr_map[3] = cpu_to_be32(0x00000002);
  62   fdt_event_ctr_map[4] = cpu_to_be32(0x00000002);
  63   fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
  64
  65   /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
  66   fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
  67   fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
  68   fdt_event_ctr_map[8] = cpu_to_be32(cmask);
  69
  70   /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
  71   fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
  72   fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
  73   fdt_event_ctr_map[11] = cpu_to_be32(cmask);
  74
  75   /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
  76   fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
  77   fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
  78   fdt_event_ctr_map[14] = cpu_to_be32(cmask);
  79
  80   /* This a OpenSBI specific DT property documented in OpenSBI docs */
  81   qemu_fdt_setprop(fdt, pmu_name, "riscv,event-to-mhpmcounters",
  82                    fdt_event_ctr_map, sizeof(fdt_event_ctr_map));
  83}
  84
  85static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx)
  86{
  87    if (ctr_idx < 3 || ctr_idx >= RV_MAX_MHPMCOUNTERS ||
  88        !(cpu->pmu_avail_ctrs & BIT(ctr_idx))) {
  89        return false;
  90    } else {
  91        return true;
  92    }
  93}
  94
  95static bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx)
  96{
  97    CPURISCVState *env = &cpu->env;
  98
  99    if (riscv_pmu_counter_valid(cpu, ctr_idx) &&
 100        !get_field(env->mcountinhibit, BIT(ctr_idx))) {
 101        return true;
 102    } else {
 103        return false;
 104    }
 105}
 106
 107static int riscv_pmu_incr_ctr_rv32(RISCVCPU *cpu, uint32_t ctr_idx)
 108{
 109    CPURISCVState *env = &cpu->env;
 110    target_ulong max_val = UINT32_MAX;
 111    PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
 112    bool virt_on = env->virt_enabled;
 113
 114    /* Privilege mode filtering */
 115    if ((env->priv == PRV_M &&
 116        (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_MINH)) ||
 117        (env->priv == PRV_S && virt_on &&
 118        (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VSINH)) ||
 119        (env->priv == PRV_U && virt_on &&
 120        (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VUINH)) ||
 121        (env->priv == PRV_S && !virt_on &&
 122        (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_SINH)) ||
 123        (env->priv == PRV_U && !virt_on &&
 124        (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_UINH))) {
 125        return 0;
 126    }
 127
 128    /* Handle the overflow scenario */
 129    if (counter->mhpmcounter_val == max_val) {
 130        if (counter->mhpmcounterh_val == max_val) {
 131            counter->mhpmcounter_val = 0;
 132            counter->mhpmcounterh_val = 0;
 133            /* Generate interrupt only if OF bit is clear */
 134            if (!(env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_OF)) {
 135                env->mhpmeventh_val[ctr_idx] |= MHPMEVENTH_BIT_OF;
 136                riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
 137            }
 138        } else {
 139            counter->mhpmcounterh_val++;
 140        }
 141    } else {
 142        counter->mhpmcounter_val++;
 143    }
 144
 145    return 0;
 146}
 147
 148static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
 149{
 150    CPURISCVState *env = &cpu->env;
 151    PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
 152    uint64_t max_val = UINT64_MAX;
 153    bool virt_on = env->virt_enabled;
 154
 155    /* Privilege mode filtering */
 156    if ((env->priv == PRV_M &&
 157        (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_MINH)) ||
 158        (env->priv == PRV_S && virt_on &&
 159        (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VSINH)) ||
 160        (env->priv == PRV_U && virt_on &&
 161        (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VUINH)) ||
 162        (env->priv == PRV_S && !virt_on &&
 163        (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_SINH)) ||
 164        (env->priv == PRV_U && !virt_on &&
 165        (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_UINH))) {
 166        return 0;
 167    }
 168
 169    /* Handle the overflow scenario */
 170    if (counter->mhpmcounter_val == max_val) {
 171        counter->mhpmcounter_val = 0;
 172        /* Generate interrupt only if OF bit is clear */
 173        if (!(env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_OF)) {
 174            env->mhpmevent_val[ctr_idx] |= MHPMEVENT_BIT_OF;
 175            riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
 176        }
 177    } else {
 178        counter->mhpmcounter_val++;
 179    }
 180    return 0;
 181}
 182
 183int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
 184{
 185    uint32_t ctr_idx;
 186    int ret;
 187    CPURISCVState *env = &cpu->env;
 188    gpointer value;
 189
 190    if (!cpu->cfg.pmu_num) {
 191        return 0;
 192    }
 193    value = g_hash_table_lookup(cpu->pmu_event_ctr_map,
 194                                GUINT_TO_POINTER(event_idx));
 195    if (!value) {
 196        return -1;
 197    }
 198
 199    ctr_idx = GPOINTER_TO_UINT(value);
 200    if (!riscv_pmu_counter_enabled(cpu, ctr_idx) ||
 201        get_field(env->mcountinhibit, BIT(ctr_idx))) {
 202        return -1;
 203    }
 204
 205    if (riscv_cpu_mxl(env) == MXL_RV32) {
 206        ret = riscv_pmu_incr_ctr_rv32(cpu, ctr_idx);
 207    } else {
 208        ret = riscv_pmu_incr_ctr_rv64(cpu, ctr_idx);
 209    }
 210
 211    return ret;
 212}
 213
 214bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
 215                                        uint32_t target_ctr)
 216{
 217    RISCVCPU *cpu;
 218    uint32_t event_idx;
 219    uint32_t ctr_idx;
 220
 221    /* Fixed instret counter */
 222    if (target_ctr == 2) {
 223        return true;
 224    }
 225
 226    cpu = env_archcpu(env);
 227    if (!cpu->pmu_event_ctr_map) {
 228        return false;
 229    }
 230
 231    event_idx = RISCV_PMU_EVENT_HW_INSTRUCTIONS;
 232    ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
 233                               GUINT_TO_POINTER(event_idx)));
 234    if (!ctr_idx) {
 235        return false;
 236    }
 237
 238    return target_ctr == ctr_idx ? true : false;
 239}
 240
 241bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
 242{
 243    RISCVCPU *cpu;
 244    uint32_t event_idx;
 245    uint32_t ctr_idx;
 246
 247    /* Fixed mcycle counter */
 248    if (target_ctr == 0) {
 249        return true;
 250    }
 251
 252    cpu = env_archcpu(env);
 253    if (!cpu->pmu_event_ctr_map) {
 254        return false;
 255    }
 256
 257    event_idx = RISCV_PMU_EVENT_HW_CPU_CYCLES;
 258    ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
 259                               GUINT_TO_POINTER(event_idx)));
 260
 261    /* Counter zero is not used for event_ctr_map */
 262    if (!ctr_idx) {
 263        return false;
 264    }
 265
 266    return (target_ctr == ctr_idx) ? true : false;
 267}
 268
 269static gboolean pmu_remove_event_map(gpointer key, gpointer value,
 270                                     gpointer udata)
 271{
 272    return (GPOINTER_TO_UINT(value) == GPOINTER_TO_UINT(udata)) ? true : false;
 273}
 274
 275static int64_t pmu_icount_ticks_to_ns(int64_t value)
 276{
 277    int64_t ret = 0;
 278
 279    if (icount_enabled()) {
 280        ret = icount_to_ns(value);
 281    } else {
 282        ret = (NANOSECONDS_PER_SECOND / RISCV_TIMEBASE_FREQ) * value;
 283    }
 284
 285    return ret;
 286}
 287
 288int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
 289                               uint32_t ctr_idx)
 290{
 291    uint32_t event_idx;
 292    RISCVCPU *cpu = env_archcpu(env);
 293
 294    if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) {
 295        return -1;
 296    }
 297
 298    /*
 299     * Expected mhpmevent value is zero for reset case. Remove the current
 300     * mapping.
 301     */
 302    if (!value) {
 303        g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
 304                                    pmu_remove_event_map,
 305                                    GUINT_TO_POINTER(ctr_idx));
 306        return 0;
 307    }
 308
 309    event_idx = value & MHPMEVENT_IDX_MASK;
 310    if (g_hash_table_lookup(cpu->pmu_event_ctr_map,
 311                            GUINT_TO_POINTER(event_idx))) {
 312        return 0;
 313    }
 314
 315    switch (event_idx) {
 316    case RISCV_PMU_EVENT_HW_CPU_CYCLES:
 317    case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
 318    case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
 319    case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
 320    case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
 321        break;
 322    default:
 323        /* We don't support any raw events right now */
 324        return -1;
 325    }
 326    g_hash_table_insert(cpu->pmu_event_ctr_map, GUINT_TO_POINTER(event_idx),
 327                        GUINT_TO_POINTER(ctr_idx));
 328
 329    return 0;
 330}
 331
 332static void pmu_timer_trigger_irq(RISCVCPU *cpu,
 333                                  enum riscv_pmu_event_idx evt_idx)
 334{
 335    uint32_t ctr_idx;
 336    CPURISCVState *env = &cpu->env;
 337    PMUCTRState *counter;
 338    target_ulong *mhpmevent_val;
 339    uint64_t of_bit_mask;
 340    int64_t irq_trigger_at;
 341
 342    if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
 343        evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
 344        return;
 345    }
 346
 347    ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
 348                               GUINT_TO_POINTER(evt_idx)));
 349    if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
 350        return;
 351    }
 352
 353    if (riscv_cpu_mxl(env) == MXL_RV32) {
 354        mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
 355        of_bit_mask = MHPMEVENTH_BIT_OF;
 356     } else {
 357        mhpmevent_val = &env->mhpmevent_val[ctr_idx];
 358        of_bit_mask = MHPMEVENT_BIT_OF;
 359    }
 360
 361    counter = &env->pmu_ctrs[ctr_idx];
 362    if (counter->irq_overflow_left > 0) {
 363        irq_trigger_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
 364                        counter->irq_overflow_left;
 365        timer_mod_anticipate_ns(cpu->pmu_timer, irq_trigger_at);
 366        counter->irq_overflow_left = 0;
 367        return;
 368    }
 369
 370    if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
 371        /* Generate interrupt only if OF bit is clear */
 372        if (!(*mhpmevent_val & of_bit_mask)) {
 373            *mhpmevent_val |= of_bit_mask;
 374            riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
 375        }
 376    }
 377}
 378
 379/* Timer callback for instret and cycle counter overflow */
 380void riscv_pmu_timer_cb(void *priv)
 381{
 382    RISCVCPU *cpu = priv;
 383
 384    /* Timer event was triggered only for these events */
 385    pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
 386    pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
 387}
 388
 389int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
 390{
 391    uint64_t overflow_delta, overflow_at;
 392    int64_t overflow_ns, overflow_left = 0;
 393    RISCVCPU *cpu = env_archcpu(env);
 394    PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
 395
 396    if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) {
 397        return -1;
 398    }
 399
 400    if (value) {
 401        overflow_delta = UINT64_MAX - value + 1;
 402    } else {
 403        overflow_delta = UINT64_MAX;
 404    }
 405
 406    /*
 407     * QEMU supports only int64_t timers while RISC-V counters are uint64_t.
 408     * Compute the leftover and save it so that it can be reprogrammed again
 409     * when timer expires.
 410     */
 411    if (overflow_delta > INT64_MAX) {
 412        overflow_left = overflow_delta - INT64_MAX;
 413    }
 414
 415    if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
 416        riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
 417        overflow_ns = pmu_icount_ticks_to_ns((int64_t)overflow_delta);
 418        overflow_left = pmu_icount_ticks_to_ns(overflow_left) ;
 419    } else {
 420        return -1;
 421    }
 422    overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
 423                  overflow_ns;
 424
 425    if (overflow_at > INT64_MAX) {
 426        overflow_left += overflow_at - INT64_MAX;
 427        counter->irq_overflow_left = overflow_left;
 428        overflow_at = INT64_MAX;
 429    }
 430    timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
 431
 432    return 0;
 433}
 434
 435
 436int riscv_pmu_init(RISCVCPU *cpu, int num_counters)
 437{
 438    if (num_counters > (RV_MAX_MHPMCOUNTERS - 3)) {
 439        return -1;
 440    }
 441
 442    cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
 443    if (!cpu->pmu_event_ctr_map) {
 444        /* PMU support can not be enabled */
 445        qemu_log_mask(LOG_UNIMP, "PMU events can't be supported\n");
 446        cpu->cfg.pmu_num = 0;
 447        return -1;
 448    }
 449
 450    /* Create a bitmask of available programmable counters */
 451    cpu->pmu_avail_ctrs = MAKE_32BIT_MASK(3, num_counters);
 452
 453    return 0;
 454}
 455