linux/arch/x86/kernel/cpu/aperfmperf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * x86 APERF/MPERF KHz calculation for
   4 * /sys/.../cpufreq/scaling_cur_freq
   5 *
   6 * Copyright (C) 2017 Intel Corp.
   7 * Author: Len Brown <len.brown@intel.com>
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/ktime.h>
  12#include <linux/math64.h>
  13#include <linux/percpu.h>
  14#include <linux/cpufreq.h>
  15#include <linux/smp.h>
  16#include <linux/sched/isolation.h>
  17#include <linux/rcupdate.h>
  18
  19#include "cpu.h"
  20
  21struct aperfmperf_sample {
  22        unsigned int    khz;
  23        atomic_t        scfpending;
  24        ktime_t time;
  25        u64     aperf;
  26        u64     mperf;
  27};
  28
  29static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
  30
  31#define APERFMPERF_CACHE_THRESHOLD_MS   10
  32#define APERFMPERF_REFRESH_DELAY_MS     10
  33#define APERFMPERF_STALE_THRESHOLD_MS   1000
  34
  35/*
  36 * aperfmperf_snapshot_khz()
  37 * On the current CPU, snapshot APERF, MPERF, and jiffies
  38 * unless we already did it within 10ms
  39 * calculate kHz, save snapshot
  40 */
  41static void aperfmperf_snapshot_khz(void *dummy)
  42{
  43        u64 aperf, aperf_delta;
  44        u64 mperf, mperf_delta;
  45        struct aperfmperf_sample *s = this_cpu_ptr(&samples);
  46        unsigned long flags;
  47
  48        local_irq_save(flags);
  49        rdmsrl(MSR_IA32_APERF, aperf);
  50        rdmsrl(MSR_IA32_MPERF, mperf);
  51        local_irq_restore(flags);
  52
  53        aperf_delta = aperf - s->aperf;
  54        mperf_delta = mperf - s->mperf;
  55
  56        /*
  57         * There is no architectural guarantee that MPERF
  58         * increments faster than we can read it.
  59         */
  60        if (mperf_delta == 0)
  61                return;
  62
  63        s->time = ktime_get();
  64        s->aperf = aperf;
  65        s->mperf = mperf;
  66        s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
  67        atomic_set_release(&s->scfpending, 0);
  68}
  69
  70static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
  71{
  72        s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
  73        struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
  74
  75        /* Don't bother re-computing within the cache threshold time. */
  76        if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
  77                return true;
  78
  79        if (!atomic_xchg(&s->scfpending, 1) || wait)
  80                smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
  81
  82        /* Return false if the previous iteration was too long ago. */
  83        return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
  84}
  85
  86unsigned int aperfmperf_get_khz(int cpu)
  87{
  88        if (!cpu_khz)
  89                return 0;
  90
  91        if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
  92                return 0;
  93
  94        if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
  95                return 0;
  96
  97        if (rcu_is_idle_cpu(cpu))
  98                return 0; /* Idle CPUs are completely uninteresting. */
  99
 100        aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
 101        return per_cpu(samples.khz, cpu);
 102}
 103
 104void arch_freq_prepare_all(void)
 105{
 106        ktime_t now = ktime_get();
 107        bool wait = false;
 108        int cpu;
 109
 110        if (!cpu_khz)
 111                return;
 112
 113        if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
 114                return;
 115
 116        for_each_online_cpu(cpu) {
 117                if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
 118                        continue;
 119                if (rcu_is_idle_cpu(cpu))
 120                        continue; /* Idle CPUs are completely uninteresting. */
 121                if (!aperfmperf_snapshot_cpu(cpu, now, false))
 122                        wait = true;
 123        }
 124
 125        if (wait)
 126                msleep(APERFMPERF_REFRESH_DELAY_MS);
 127}
 128
 129unsigned int arch_freq_get_on_cpu(int cpu)
 130{
 131        struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
 132
 133        if (!cpu_khz)
 134                return 0;
 135
 136        if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
 137                return 0;
 138
 139        if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
 140                return 0;
 141
 142        if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
 143                return per_cpu(samples.khz, cpu);
 144
 145        msleep(APERFMPERF_REFRESH_DELAY_MS);
 146        atomic_set(&s->scfpending, 1);
 147        smp_mb(); /* ->scfpending before smp_call_function_single(). */
 148        smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
 149
 150        return per_cpu(samples.khz, cpu);
 151}
 152