linux/arch/x86/lib/msr-smp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/export.h>
   3#include <linux/preempt.h>
   4#include <linux/smp.h>
   5#include <linux/completion.h>
   6#include <asm/msr.h>
   7
   8static void __rdmsr_on_cpu(void *info)
   9{
  10        struct msr_info *rv = info;
  11        struct msr *reg;
  12        int this_cpu = raw_smp_processor_id();
  13
  14        if (rv->msrs)
  15                reg = per_cpu_ptr(rv->msrs, this_cpu);
  16        else
  17                reg = &rv->reg;
  18
  19        rdmsr(rv->msr_no, reg->l, reg->h);
  20}
  21
  22static void __wrmsr_on_cpu(void *info)
  23{
  24        struct msr_info *rv = info;
  25        struct msr *reg;
  26        int this_cpu = raw_smp_processor_id();
  27
  28        if (rv->msrs)
  29                reg = per_cpu_ptr(rv->msrs, this_cpu);
  30        else
  31                reg = &rv->reg;
  32
  33        wrmsr(rv->msr_no, reg->l, reg->h);
  34}
  35
  36int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  37{
  38        int err;
  39        struct msr_info rv;
  40
  41        memset(&rv, 0, sizeof(rv));
  42
  43        rv.msr_no = msr_no;
  44        err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  45        *l = rv.reg.l;
  46        *h = rv.reg.h;
  47
  48        return err;
  49}
  50EXPORT_SYMBOL(rdmsr_on_cpu);
  51
  52int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  53{
  54        int err;
  55        struct msr_info rv;
  56
  57        memset(&rv, 0, sizeof(rv));
  58
  59        rv.msr_no = msr_no;
  60        err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  61        *q = rv.reg.q;
  62
  63        return err;
  64}
  65EXPORT_SYMBOL(rdmsrl_on_cpu);
  66
  67int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  68{
  69        int err;
  70        struct msr_info rv;
  71
  72        memset(&rv, 0, sizeof(rv));
  73
  74        rv.msr_no = msr_no;
  75        rv.reg.l = l;
  76        rv.reg.h = h;
  77        err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  78
  79        return err;
  80}
  81EXPORT_SYMBOL(wrmsr_on_cpu);
  82
  83int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  84{
  85        int err;
  86        struct msr_info rv;
  87
  88        memset(&rv, 0, sizeof(rv));
  89
  90        rv.msr_no = msr_no;
  91        rv.reg.q = q;
  92
  93        err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  94
  95        return err;
  96}
  97EXPORT_SYMBOL(wrmsrl_on_cpu);
  98
  99static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
 100                            struct msr *msrs,
 101                            void (*msr_func) (void *info))
 102{
 103        struct msr_info rv;
 104        int this_cpu;
 105
 106        memset(&rv, 0, sizeof(rv));
 107
 108        rv.msrs   = msrs;
 109        rv.msr_no = msr_no;
 110
 111        this_cpu = get_cpu();
 112
 113        if (cpumask_test_cpu(this_cpu, mask))
 114                msr_func(&rv);
 115
 116        smp_call_function_many(mask, msr_func, &rv, 1);
 117        put_cpu();
 118}
 119
 120/* rdmsr on a bunch of CPUs
 121 *
 122 * @mask:       which CPUs
 123 * @msr_no:     which MSR
 124 * @msrs:       array of MSR values
 125 *
 126 */
 127void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
 128{
 129        __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
 130}
 131EXPORT_SYMBOL(rdmsr_on_cpus);
 132
 133/*
 134 * wrmsr on a bunch of CPUs
 135 *
 136 * @mask:       which CPUs
 137 * @msr_no:     which MSR
 138 * @msrs:       array of MSR values
 139 *
 140 */
 141void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
 142{
 143        __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
 144}
 145EXPORT_SYMBOL(wrmsr_on_cpus);
 146
 147struct msr_info_completion {
 148        struct msr_info         msr;
 149        struct completion       done;
 150};
 151
 152/* These "safe" variants are slower and should be used when the target MSR
 153   may not actually exist. */
 154static void __rdmsr_safe_on_cpu(void *info)
 155{
 156        struct msr_info_completion *rv = info;
 157
 158        rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
 159        complete(&rv->done);
 160}
 161
 162static void __wrmsr_safe_on_cpu(void *info)
 163{
 164        struct msr_info *rv = info;
 165
 166        rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
 167}
 168
 169int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
 170{
 171        struct msr_info_completion rv;
 172        call_single_data_t csd = {
 173                .func   = __rdmsr_safe_on_cpu,
 174                .info   = &rv,
 175        };
 176        int err;
 177
 178        memset(&rv, 0, sizeof(rv));
 179        init_completion(&rv.done);
 180        rv.msr.msr_no = msr_no;
 181
 182        err = smp_call_function_single_async(cpu, &csd);
 183        if (!err) {
 184                wait_for_completion(&rv.done);
 185                err = rv.msr.err;
 186        }
 187        *l = rv.msr.reg.l;
 188        *h = rv.msr.reg.h;
 189
 190        return err;
 191}
 192EXPORT_SYMBOL(rdmsr_safe_on_cpu);
 193
 194int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 195{
 196        int err;
 197        struct msr_info rv;
 198
 199        memset(&rv, 0, sizeof(rv));
 200
 201        rv.msr_no = msr_no;
 202        rv.reg.l = l;
 203        rv.reg.h = h;
 204        err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
 205
 206        return err ? err : rv.err;
 207}
 208EXPORT_SYMBOL(wrmsr_safe_on_cpu);
 209
 210int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
 211{
 212        int err;
 213        struct msr_info rv;
 214
 215        memset(&rv, 0, sizeof(rv));
 216
 217        rv.msr_no = msr_no;
 218        rv.reg.q = q;
 219
 220        err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
 221
 222        return err ? err : rv.err;
 223}
 224EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
 225
 226int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
 227{
 228        u32 low, high;
 229        int err;
 230
 231        err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
 232        *q = (u64)high << 32 | low;
 233
 234        return err;
 235}
 236EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
 237
 238/*
 239 * These variants are significantly slower, but allows control over
 240 * the entire 32-bit GPR set.
 241 */
 242static void __rdmsr_safe_regs_on_cpu(void *info)
 243{
 244        struct msr_regs_info *rv = info;
 245
 246        rv->err = rdmsr_safe_regs(rv->regs);
 247}
 248
 249static void __wrmsr_safe_regs_on_cpu(void *info)
 250{
 251        struct msr_regs_info *rv = info;
 252
 253        rv->err = wrmsr_safe_regs(rv->regs);
 254}
 255
 256int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
 257{
 258        int err;
 259        struct msr_regs_info rv;
 260
 261        rv.regs   = regs;
 262        rv.err    = -EIO;
 263        err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
 264
 265        return err ? err : rv.err;
 266}
 267EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
 268
 269int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
 270{
 271        int err;
 272        struct msr_regs_info rv;
 273
 274        rv.regs = regs;
 275        rv.err  = -EIO;
 276        err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
 277
 278        return err ? err : rv.err;
 279}
 280EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
 281