linux/arch/x86/hyperv/mmu.c
<<
>>
Prefs
   1#define pr_fmt(fmt)  "Hyper-V: " fmt
   2
   3#include <linux/hyperv.h>
   4#include <linux/log2.h>
   5#include <linux/slab.h>
   6#include <linux/types.h>
   7
   8#include <asm/fpu/api.h>
   9#include <asm/mshyperv.h>
  10#include <asm/msr.h>
  11#include <asm/tlbflush.h>
  12#include <asm/tlb.h>
  13
  14#define CREATE_TRACE_POINTS
  15#include <asm/trace/hyperv.h>
  16
  17/* Each gva in gva_list encodes up to 4096 pages to flush */
  18#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
  19
  20static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
  21                                      const struct flush_tlb_info *info);
  22
  23/*
  24 * Fills in gva_list starting from offset. Returns the number of items added.
  25 */
  26static inline int fill_gva_list(u64 gva_list[], int offset,
  27                                unsigned long start, unsigned long end)
  28{
  29        int gva_n = offset;
  30        unsigned long cur = start, diff;
  31
  32        do {
  33                diff = end > cur ? end - cur : 0;
  34
  35                gva_list[gva_n] = cur & PAGE_MASK;
  36                /*
  37                 * Lower 12 bits encode the number of additional
  38                 * pages to flush (in addition to the 'cur' page).
  39                 */
  40                if (diff >= HV_TLB_FLUSH_UNIT) {
  41                        gva_list[gva_n] |= ~PAGE_MASK;
  42                        cur += HV_TLB_FLUSH_UNIT;
  43                }  else if (diff) {
  44                        gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
  45                        cur = end;
  46                }
  47
  48                gva_n++;
  49
  50        } while (cur < end);
  51
  52        return gva_n - offset;
  53}
  54
  55static void hyperv_flush_tlb_others(const struct cpumask *cpus,
  56                                    const struct flush_tlb_info *info)
  57{
  58        int cpu, vcpu, gva_n, max_gvas;
  59        struct hv_tlb_flush **flush_pcpu;
  60        struct hv_tlb_flush *flush;
  61        u64 status;
  62        unsigned long flags;
  63
  64        trace_hyperv_mmu_flush_tlb_others(cpus, info);
  65
  66        if (!hv_hypercall_pg)
  67                goto do_native;
  68
  69        local_irq_save(flags);
  70
  71        flush_pcpu = (struct hv_tlb_flush **)
  72                     this_cpu_ptr(hyperv_pcpu_input_arg);
  73
  74        flush = *flush_pcpu;
  75
  76        if (unlikely(!flush)) {
  77                local_irq_restore(flags);
  78                goto do_native;
  79        }
  80
  81        if (info->mm) {
  82                /*
  83                 * AddressSpace argument must match the CR3 with PCID bits
  84                 * stripped out.
  85                 */
  86                flush->address_space = virt_to_phys(info->mm->pgd);
  87                flush->address_space &= CR3_ADDR_MASK;
  88                flush->flags = 0;
  89        } else {
  90                flush->address_space = 0;
  91                flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
  92        }
  93
  94        flush->processor_mask = 0;
  95        if (cpumask_equal(cpus, cpu_present_mask)) {
  96                flush->flags |= HV_FLUSH_ALL_PROCESSORS;
  97        } else {
  98                /*
  99                 * From the supplied CPU set we need to figure out if we can get
 100                 * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
 101                 * hypercalls. This is possible when the highest VP number in
 102                 * the set is < 64. As VP numbers are usually in ascending order
 103                 * and match Linux CPU ids, here is an optimization: we check
 104                 * the VP number for the highest bit in the supplied set first
 105                 * so we can quickly find out if using *_EX hypercalls is a
 106                 * must. We will also check all VP numbers when walking the
 107                 * supplied CPU set to remain correct in all cases.
 108                 */
 109                cpu = cpumask_last(cpus);
 110
 111                if (cpu < nr_cpumask_bits && hv_cpu_number_to_vp_number(cpu) >= 64)
 112                        goto do_ex_hypercall;
 113
 114                for_each_cpu(cpu, cpus) {
 115                        vcpu = hv_cpu_number_to_vp_number(cpu);
 116                        if (vcpu == VP_INVAL) {
 117                                local_irq_restore(flags);
 118                                goto do_native;
 119                        }
 120
 121                        if (vcpu >= 64)
 122                                goto do_ex_hypercall;
 123
 124                        __set_bit(vcpu, (unsigned long *)
 125                                  &flush->processor_mask);
 126                }
 127
 128                /* nothing to flush if 'processor_mask' ends up being empty */
 129                if (!flush->processor_mask) {
 130                        local_irq_restore(flags);
 131                        return;
 132                }
 133        }
 134
 135        /*
 136         * We can flush not more than max_gvas with one hypercall. Flush the
 137         * whole address space if we were asked to do more.
 138         */
 139        max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
 140
 141        if (info->end == TLB_FLUSH_ALL) {
 142                flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
 143                status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
 144                                         flush, NULL);
 145        } else if (info->end &&
 146                   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
 147                status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
 148                                         flush, NULL);
 149        } else {
 150                gva_n = fill_gva_list(flush->gva_list, 0,
 151                                      info->start, info->end);
 152                status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
 153                                             gva_n, 0, flush, NULL);
 154        }
 155        goto check_status;
 156
 157do_ex_hypercall:
 158        status = hyperv_flush_tlb_others_ex(cpus, info);
 159
 160check_status:
 161        local_irq_restore(flags);
 162
 163        if (hv_result_success(status))
 164                return;
 165do_native:
 166        native_flush_tlb_others(cpus, info);
 167}
 168
 169static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
 170                                      const struct flush_tlb_info *info)
 171{
 172        int nr_bank = 0, max_gvas, gva_n;
 173        struct hv_tlb_flush_ex **flush_pcpu;
 174        struct hv_tlb_flush_ex *flush;
 175        u64 status;
 176
 177        if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
 178                return HV_STATUS_INVALID_PARAMETER;
 179
 180        flush_pcpu = (struct hv_tlb_flush_ex **)
 181                     this_cpu_ptr(hyperv_pcpu_input_arg);
 182
 183        flush = *flush_pcpu;
 184
 185        if (info->mm) {
 186                /*
 187                 * AddressSpace argument must match the CR3 with PCID bits
 188                 * stripped out.
 189                 */
 190                flush->address_space = virt_to_phys(info->mm->pgd);
 191                flush->address_space &= CR3_ADDR_MASK;
 192                flush->flags = 0;
 193        } else {
 194                flush->address_space = 0;
 195                flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
 196        }
 197
 198        flush->hv_vp_set.valid_bank_mask = 0;
 199
 200        flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
 201        nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
 202        if (nr_bank < 0)
 203                return HV_STATUS_INVALID_PARAMETER;
 204
 205        /*
 206         * We can flush not more than max_gvas with one hypercall. Flush the
 207         * whole address space if we were asked to do more.
 208         */
 209        max_gvas =
 210                (PAGE_SIZE - sizeof(*flush) - nr_bank *
 211                 sizeof(flush->hv_vp_set.bank_contents[0])) /
 212                sizeof(flush->gva_list[0]);
 213
 214        if (info->end == TLB_FLUSH_ALL) {
 215                flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
 216                status = hv_do_rep_hypercall(
 217                        HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
 218                        0, nr_bank, flush, NULL);
 219        } else if (info->end &&
 220                   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
 221                status = hv_do_rep_hypercall(
 222                        HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
 223                        0, nr_bank, flush, NULL);
 224        } else {
 225                gva_n = fill_gva_list(flush->gva_list, nr_bank,
 226                                      info->start, info->end);
 227                status = hv_do_rep_hypercall(
 228                        HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
 229                        gva_n, nr_bank, flush, NULL);
 230        }
 231
 232        return status;
 233}
 234
 235void hyperv_setup_mmu_ops(void)
 236{
 237        if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
 238                return;
 239
 240        pr_info("Using hypercall for remote TLB flush\n");
 241        pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
 242        pv_mmu_ops.tlb_remove_table = tlb_remove_table;
 243}
 244