linux/arch/mips/kvm/tlb.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
   7 * TLB handlers run from KSEG0
   8 *
   9 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
  10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  11 */
  12
  13#include <linux/sched.h>
  14#include <linux/smp.h>
  15#include <linux/mm.h>
  16#include <linux/delay.h>
  17#include <linux/module.h>
  18#include <linux/kvm_host.h>
  19#include <linux/srcu.h>
  20
  21#include <asm/cpu.h>
  22#include <asm/bootinfo.h>
  23#include <asm/mmu_context.h>
  24#include <asm/pgtable.h>
  25#include <asm/cacheflush.h>
  26#include <asm/tlb.h>
  27
  28#undef CONFIG_MIPS_MT
  29#include <asm/r4kcache.h>
  30#define CONFIG_MIPS_MT
  31
  32#define KVM_GUEST_PC_TLB    0
  33#define KVM_GUEST_SP_TLB    1
  34
  35#define PRIx64 "llx"
  36
  37atomic_t kvm_mips_instance;
  38EXPORT_SYMBOL(kvm_mips_instance);
  39
  40/* These function pointers are initialized once the KVM module is loaded */
  41pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
  42EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
  43
  44void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
  45EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
  46
  47bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
  48EXPORT_SYMBOL(kvm_mips_is_error_pfn);
  49
  50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
  51{
  52        return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
  53}
  54
  55uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
  56{
  57        return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
  58}
  59
  60inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
  61{
  62        return vcpu->kvm->arch.commpage_tlb;
  63}
  64
  65/* Structure defining an tlb entry data set. */
  66
  67void kvm_mips_dump_host_tlbs(void)
  68{
  69        unsigned long old_entryhi;
  70        unsigned long old_pagemask;
  71        struct kvm_mips_tlb tlb;
  72        unsigned long flags;
  73        int i;
  74
  75        local_irq_save(flags);
  76
  77        old_entryhi = read_c0_entryhi();
  78        old_pagemask = read_c0_pagemask();
  79
  80        kvm_info("HOST TLBs:\n");
  81        kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
  82
  83        for (i = 0; i < current_cpu_data.tlbsize; i++) {
  84                write_c0_index(i);
  85                mtc0_tlbw_hazard();
  86
  87                tlb_read();
  88                tlbw_use_hazard();
  89
  90                tlb.tlb_hi = read_c0_entryhi();
  91                tlb.tlb_lo0 = read_c0_entrylo0();
  92                tlb.tlb_lo1 = read_c0_entrylo1();
  93                tlb.tlb_mask = read_c0_pagemask();
  94
  95                kvm_info("TLB%c%3d Hi 0x%08lx ",
  96                         (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
  97                         i, tlb.tlb_hi);
  98                kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
  99                         (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
 100                         (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
 101                         (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
 102                         (tlb.tlb_lo0 >> 3) & 7);
 103                kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
 104                         (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
 105                         (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
 106                         (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
 107                         (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
 108        }
 109        write_c0_entryhi(old_entryhi);
 110        write_c0_pagemask(old_pagemask);
 111        mtc0_tlbw_hazard();
 112        local_irq_restore(flags);
 113}
 114EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
 115
 116void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
 117{
 118        struct mips_coproc *cop0 = vcpu->arch.cop0;
 119        struct kvm_mips_tlb tlb;
 120        int i;
 121
 122        kvm_info("Guest TLBs:\n");
 123        kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
 124
 125        for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
 126                tlb = vcpu->arch.guest_tlb[i];
 127                kvm_info("TLB%c%3d Hi 0x%08lx ",
 128                         (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
 129                         i, tlb.tlb_hi);
 130                kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
 131                         (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
 132                         (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
 133                         (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
 134                         (tlb.tlb_lo0 >> 3) & 7);
 135                kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
 136                         (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
 137                         (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
 138                         (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
 139                         (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
 140        }
 141}
 142EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
 143
 144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
 145{
 146        int srcu_idx, err = 0;
 147        pfn_t pfn;
 148
 149        if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
 150                return 0;
 151
 152        srcu_idx = srcu_read_lock(&kvm->srcu);
 153        pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
 154
 155        if (kvm_mips_is_error_pfn(pfn)) {
 156                kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
 157                err = -EFAULT;
 158                goto out;
 159        }
 160
 161        kvm->arch.guest_pmap[gfn] = pfn;
 162out:
 163        srcu_read_unlock(&kvm->srcu, srcu_idx);
 164        return err;
 165}
 166
 167/* Translate guest KSEG0 addresses to Host PA */
 168unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
 169                                                    unsigned long gva)
 170{
 171        gfn_t gfn;
 172        uint32_t offset = gva & ~PAGE_MASK;
 173        struct kvm *kvm = vcpu->kvm;
 174
 175        if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
 176                kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
 177                        __builtin_return_address(0), gva);
 178                return KVM_INVALID_PAGE;
 179        }
 180
 181        gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
 182
 183        if (gfn >= kvm->arch.guest_pmap_npages) {
 184                kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
 185                        gva);
 186                return KVM_INVALID_PAGE;
 187        }
 188
 189        if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
 190                return KVM_INVALID_ADDR;
 191
 192        return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
 193}
 194EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
 195
 196/* XXXKYMA: Must be called with interrupts disabled */
 197/* set flush_dcache_mask == 0 if no dcache flush required */
 198int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
 199                            unsigned long entrylo0, unsigned long entrylo1,
 200                            int flush_dcache_mask)
 201{
 202        unsigned long flags;
 203        unsigned long old_entryhi;
 204        int idx;
 205
 206        local_irq_save(flags);
 207
 208        old_entryhi = read_c0_entryhi();
 209        write_c0_entryhi(entryhi);
 210        mtc0_tlbw_hazard();
 211
 212        tlb_probe();
 213        tlb_probe_hazard();
 214        idx = read_c0_index();
 215
 216        if (idx > current_cpu_data.tlbsize) {
 217                kvm_err("%s: Invalid Index: %d\n", __func__, idx);
 218                kvm_mips_dump_host_tlbs();
 219                local_irq_restore(flags);
 220                return -1;
 221        }
 222
 223        write_c0_entrylo0(entrylo0);
 224        write_c0_entrylo1(entrylo1);
 225        mtc0_tlbw_hazard();
 226
 227        if (idx < 0)
 228                tlb_write_random();
 229        else
 230                tlb_write_indexed();
 231        tlbw_use_hazard();
 232
 233        kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
 234                  vcpu->arch.pc, idx, read_c0_entryhi(),
 235                  read_c0_entrylo0(), read_c0_entrylo1());
 236
 237        /* Flush D-cache */
 238        if (flush_dcache_mask) {
 239                if (entrylo0 & MIPS3_PG_V) {
 240                        ++vcpu->stat.flush_dcache_exits;
 241                        flush_data_cache_page((entryhi & VPN2_MASK) &
 242                                              ~flush_dcache_mask);
 243                }
 244                if (entrylo1 & MIPS3_PG_V) {
 245                        ++vcpu->stat.flush_dcache_exits;
 246                        flush_data_cache_page(((entryhi & VPN2_MASK) &
 247                                               ~flush_dcache_mask) |
 248                                              (0x1 << PAGE_SHIFT));
 249                }
 250        }
 251
 252        /* Restore old ASID */
 253        write_c0_entryhi(old_entryhi);
 254        mtc0_tlbw_hazard();
 255        tlbw_use_hazard();
 256        local_irq_restore(flags);
 257        return 0;
 258}
 259
 260/* XXXKYMA: Must be called with interrupts disabled */
 261int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
 262                                    struct kvm_vcpu *vcpu)
 263{
 264        gfn_t gfn;
 265        pfn_t pfn0, pfn1;
 266        unsigned long vaddr = 0;
 267        unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
 268        int even;
 269        struct kvm *kvm = vcpu->kvm;
 270        const int flush_dcache_mask = 0;
 271
 272        if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
 273                kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
 274                kvm_mips_dump_host_tlbs();
 275                return -1;
 276        }
 277
 278        gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
 279        if (gfn >= kvm->arch.guest_pmap_npages) {
 280                kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
 281                        gfn, badvaddr);
 282                kvm_mips_dump_host_tlbs();
 283                return -1;
 284        }
 285        even = !(gfn & 0x1);
 286        vaddr = badvaddr & (PAGE_MASK << 1);
 287
 288        if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
 289                return -1;
 290
 291        if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
 292                return -1;
 293
 294        if (even) {
 295                pfn0 = kvm->arch.guest_pmap[gfn];
 296                pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
 297        } else {
 298                pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
 299                pfn1 = kvm->arch.guest_pmap[gfn];
 300        }
 301
 302        entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
 303        entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
 304                   (1 << 2) | (0x1 << 1);
 305        entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
 306                   (1 << 2) | (0x1 << 1);
 307
 308        return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
 309                                       flush_dcache_mask);
 310}
 311EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
 312
 313int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
 314        struct kvm_vcpu *vcpu)
 315{
 316        pfn_t pfn0, pfn1;
 317        unsigned long flags, old_entryhi = 0, vaddr = 0;
 318        unsigned long entrylo0 = 0, entrylo1 = 0;
 319
 320        pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
 321        pfn1 = 0;
 322        entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
 323                   (1 << 2) | (0x1 << 1);
 324        entrylo1 = 0;
 325
 326        local_irq_save(flags);
 327
 328        old_entryhi = read_c0_entryhi();
 329        vaddr = badvaddr & (PAGE_MASK << 1);
 330        write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
 331        mtc0_tlbw_hazard();
 332        write_c0_entrylo0(entrylo0);
 333        mtc0_tlbw_hazard();
 334        write_c0_entrylo1(entrylo1);
 335        mtc0_tlbw_hazard();
 336        write_c0_index(kvm_mips_get_commpage_asid(vcpu));
 337        mtc0_tlbw_hazard();
 338        tlb_write_indexed();
 339        mtc0_tlbw_hazard();
 340        tlbw_use_hazard();
 341
 342        kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
 343                  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
 344                  read_c0_entrylo0(), read_c0_entrylo1());
 345
 346        /* Restore old ASID */
 347        write_c0_entryhi(old_entryhi);
 348        mtc0_tlbw_hazard();
 349        tlbw_use_hazard();
 350        local_irq_restore(flags);
 351
 352        return 0;
 353}
 354EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
 355
 356int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
 357                                         struct kvm_mips_tlb *tlb,
 358                                         unsigned long *hpa0,
 359                                         unsigned long *hpa1)
 360{
 361        unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
 362        struct kvm *kvm = vcpu->kvm;
 363        pfn_t pfn0, pfn1;
 364
 365        if ((tlb->tlb_hi & VPN2_MASK) == 0) {
 366                pfn0 = 0;
 367                pfn1 = 0;
 368        } else {
 369                if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
 370                                           >> PAGE_SHIFT) < 0)
 371                        return -1;
 372
 373                if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
 374                                           >> PAGE_SHIFT) < 0)
 375                        return -1;
 376
 377                pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
 378                                            >> PAGE_SHIFT];
 379                pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
 380                                            >> PAGE_SHIFT];
 381        }
 382
 383        if (hpa0)
 384                *hpa0 = pfn0 << PAGE_SHIFT;
 385
 386        if (hpa1)
 387                *hpa1 = pfn1 << PAGE_SHIFT;
 388
 389        /* Get attributes from the Guest TLB */
 390        entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
 391                                               kvm_mips_get_kernel_asid(vcpu) :
 392                                               kvm_mips_get_user_asid(vcpu));
 393        entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
 394                   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
 395        entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
 396                   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
 397
 398        kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
 399                  tlb->tlb_lo0, tlb->tlb_lo1);
 400
 401        return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
 402                                       tlb->tlb_mask);
 403}
 404EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
 405
 406int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
 407{
 408        int i;
 409        int index = -1;
 410        struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
 411
 412        for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
 413                if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
 414                    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
 415                        index = i;
 416                        break;
 417                }
 418        }
 419
 420        kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
 421                  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
 422
 423        return index;
 424}
 425EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
 426
 427int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
 428{
 429        unsigned long old_entryhi, flags;
 430        int idx;
 431
 432        local_irq_save(flags);
 433
 434        old_entryhi = read_c0_entryhi();
 435
 436        if (KVM_GUEST_KERNEL_MODE(vcpu))
 437                write_c0_entryhi((vaddr & VPN2_MASK) |
 438                                 kvm_mips_get_kernel_asid(vcpu));
 439        else {
 440                write_c0_entryhi((vaddr & VPN2_MASK) |
 441                                 kvm_mips_get_user_asid(vcpu));
 442        }
 443
 444        mtc0_tlbw_hazard();
 445
 446        tlb_probe();
 447        tlb_probe_hazard();
 448        idx = read_c0_index();
 449
 450        /* Restore old ASID */
 451        write_c0_entryhi(old_entryhi);
 452        mtc0_tlbw_hazard();
 453        tlbw_use_hazard();
 454
 455        local_irq_restore(flags);
 456
 457        kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
 458
 459        return idx;
 460}
 461EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
 462
 463int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
 464{
 465        int idx;
 466        unsigned long flags, old_entryhi;
 467
 468        local_irq_save(flags);
 469
 470        old_entryhi = read_c0_entryhi();
 471
 472        write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
 473        mtc0_tlbw_hazard();
 474
 475        tlb_probe();
 476        tlb_probe_hazard();
 477        idx = read_c0_index();
 478
 479        if (idx >= current_cpu_data.tlbsize)
 480                BUG();
 481
 482        if (idx > 0) {
 483                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 484                mtc0_tlbw_hazard();
 485
 486                write_c0_entrylo0(0);
 487                mtc0_tlbw_hazard();
 488
 489                write_c0_entrylo1(0);
 490                mtc0_tlbw_hazard();
 491
 492                tlb_write_indexed();
 493                mtc0_tlbw_hazard();
 494        }
 495
 496        write_c0_entryhi(old_entryhi);
 497        mtc0_tlbw_hazard();
 498        tlbw_use_hazard();
 499
 500        local_irq_restore(flags);
 501
 502        if (idx > 0)
 503                kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
 504                          (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
 505
 506        return 0;
 507}
 508EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
 509
 510/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
 511int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
 512{
 513        unsigned long flags, old_entryhi;
 514
 515        if (index >= current_cpu_data.tlbsize)
 516                BUG();
 517
 518        local_irq_save(flags);
 519
 520        old_entryhi = read_c0_entryhi();
 521
 522        write_c0_entryhi(UNIQUE_ENTRYHI(index));
 523        mtc0_tlbw_hazard();
 524
 525        write_c0_index(index);
 526        mtc0_tlbw_hazard();
 527
 528        write_c0_entrylo0(0);
 529        mtc0_tlbw_hazard();
 530
 531        write_c0_entrylo1(0);
 532        mtc0_tlbw_hazard();
 533
 534        tlb_write_indexed();
 535        mtc0_tlbw_hazard();
 536        tlbw_use_hazard();
 537
 538        write_c0_entryhi(old_entryhi);
 539        mtc0_tlbw_hazard();
 540        tlbw_use_hazard();
 541
 542        local_irq_restore(flags);
 543
 544        return 0;
 545}
 546
 547void kvm_mips_flush_host_tlb(int skip_kseg0)
 548{
 549        unsigned long flags;
 550        unsigned long old_entryhi, entryhi;
 551        unsigned long old_pagemask;
 552        int entry = 0;
 553        int maxentry = current_cpu_data.tlbsize;
 554
 555        local_irq_save(flags);
 556
 557        old_entryhi = read_c0_entryhi();
 558        old_pagemask = read_c0_pagemask();
 559
 560        /* Blast 'em all away. */
 561        for (entry = 0; entry < maxentry; entry++) {
 562                write_c0_index(entry);
 563                mtc0_tlbw_hazard();
 564
 565                if (skip_kseg0) {
 566                        tlb_read();
 567                        tlbw_use_hazard();
 568
 569                        entryhi = read_c0_entryhi();
 570
 571                        /* Don't blow away guest kernel entries */
 572                        if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
 573                                continue;
 574                }
 575
 576                /* Make sure all entries differ. */
 577                write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 578                mtc0_tlbw_hazard();
 579                write_c0_entrylo0(0);
 580                mtc0_tlbw_hazard();
 581                write_c0_entrylo1(0);
 582                mtc0_tlbw_hazard();
 583
 584                tlb_write_indexed();
 585                mtc0_tlbw_hazard();
 586        }
 587
 588        tlbw_use_hazard();
 589
 590        write_c0_entryhi(old_entryhi);
 591        write_c0_pagemask(old_pagemask);
 592        mtc0_tlbw_hazard();
 593        tlbw_use_hazard();
 594
 595        local_irq_restore(flags);
 596}
 597EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
 598
 599void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
 600                             struct kvm_vcpu *vcpu)
 601{
 602        unsigned long asid = asid_cache(cpu);
 603
 604        asid += ASID_INC;
 605        if (!(asid & ASID_MASK)) {
 606                if (cpu_has_vtag_icache)
 607                        flush_icache_all();
 608
 609                kvm_local_flush_tlb_all();      /* start new asid cycle */
 610
 611                if (!asid)      /* fix version if needed */
 612                        asid = ASID_FIRST_VERSION;
 613        }
 614
 615        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 616}
 617
 618void kvm_local_flush_tlb_all(void)
 619{
 620        unsigned long flags;
 621        unsigned long old_ctx;
 622        int entry = 0;
 623
 624        local_irq_save(flags);
 625        /* Save old context and create impossible VPN2 value */
 626        old_ctx = read_c0_entryhi();
 627        write_c0_entrylo0(0);
 628        write_c0_entrylo1(0);
 629
 630        /* Blast 'em all away. */
 631        while (entry < current_cpu_data.tlbsize) {
 632                /* Make sure all entries differ. */
 633                write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 634                write_c0_index(entry);
 635                mtc0_tlbw_hazard();
 636                tlb_write_indexed();
 637                entry++;
 638        }
 639        tlbw_use_hazard();
 640        write_c0_entryhi(old_ctx);
 641        mtc0_tlbw_hazard();
 642
 643        local_irq_restore(flags);
 644}
 645EXPORT_SYMBOL(kvm_local_flush_tlb_all);
 646
 647/**
 648 * kvm_mips_migrate_count() - Migrate timer.
 649 * @vcpu:       Virtual CPU.
 650 *
 651 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
 652 * if it was running prior to being cancelled.
 653 *
 654 * Must be called when the VCPU is migrated to a different CPU to ensure that
 655 * timer expiry during guest execution interrupts the guest and causes the
 656 * interrupt to be delivered in a timely manner.
 657 */
 658static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
 659{
 660        if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
 661                hrtimer_restart(&vcpu->arch.comparecount_timer);
 662}
 663
 664/* Restore ASID once we are scheduled back after preemption */
 665void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 666{
 667        unsigned long flags;
 668        int newasid = 0;
 669
 670        kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
 671
 672        /* Alocate new kernel and user ASIDs if needed */
 673
 674        local_irq_save(flags);
 675
 676        if (((vcpu->arch.
 677              guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
 678                kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
 679                vcpu->arch.guest_kernel_asid[cpu] =
 680                    vcpu->arch.guest_kernel_mm.context.asid[cpu];
 681                kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
 682                vcpu->arch.guest_user_asid[cpu] =
 683                    vcpu->arch.guest_user_mm.context.asid[cpu];
 684                newasid++;
 685
 686                kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
 687                          cpu_context(cpu, current->mm));
 688                kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
 689                          cpu, vcpu->arch.guest_kernel_asid[cpu]);
 690                kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
 691                          vcpu->arch.guest_user_asid[cpu]);
 692        }
 693
 694        if (vcpu->arch.last_sched_cpu != cpu) {
 695                kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
 696                          vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
 697                /*
 698                 * Migrate the timer interrupt to the current CPU so that it
 699                 * always interrupts the guest and synchronously triggers a
 700                 * guest timer interrupt.
 701                 */
 702                kvm_mips_migrate_count(vcpu);
 703        }
 704
 705        if (!newasid) {
 706                /*
 707                 * If we preempted while the guest was executing, then reload
 708                 * the pre-empted ASID
 709                 */
 710                if (current->flags & PF_VCPU) {
 711                        write_c0_entryhi(vcpu->arch.
 712                                         preempt_entryhi & ASID_MASK);
 713                        ehb();
 714                }
 715        } else {
 716                /* New ASIDs were allocated for the VM */
 717
 718                /*
 719                 * Were we in guest context? If so then the pre-empted ASID is
 720                 * no longer valid, we need to set it to what it should be based
 721                 * on the mode of the Guest (Kernel/User)
 722                 */
 723                if (current->flags & PF_VCPU) {
 724                        if (KVM_GUEST_KERNEL_MODE(vcpu))
 725                                write_c0_entryhi(vcpu->arch.
 726                                                 guest_kernel_asid[cpu] &
 727                                                 ASID_MASK);
 728                        else
 729                                write_c0_entryhi(vcpu->arch.
 730                                                 guest_user_asid[cpu] &
 731                                                 ASID_MASK);
 732                        ehb();
 733                }
 734        }
 735
 736        local_irq_restore(flags);
 737
 738}
 739EXPORT_SYMBOL(kvm_arch_vcpu_load);
 740
 741/* ASID can change if another task is scheduled during preemption */
 742void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 743{
 744        unsigned long flags;
 745        uint32_t cpu;
 746
 747        local_irq_save(flags);
 748
 749        cpu = smp_processor_id();
 750
 751        vcpu->arch.preempt_entryhi = read_c0_entryhi();
 752        vcpu->arch.last_sched_cpu = cpu;
 753
 754        if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
 755             ASID_VERSION_MASK)) {
 756                kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
 757                          cpu_context(cpu, current->mm));
 758                drop_mmu_context(current->mm, cpu);
 759        }
 760        write_c0_entryhi(cpu_asid(cpu, current->mm));
 761        ehb();
 762
 763        local_irq_restore(flags);
 764}
 765EXPORT_SYMBOL(kvm_arch_vcpu_put);
 766
 767uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
 768{
 769        struct mips_coproc *cop0 = vcpu->arch.cop0;
 770        unsigned long paddr, flags, vpn2, asid;
 771        uint32_t inst;
 772        int index;
 773
 774        if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
 775            KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
 776                local_irq_save(flags);
 777                index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
 778                if (index >= 0) {
 779                        inst = *(opc);
 780                } else {
 781                        vpn2 = (unsigned long) opc & VPN2_MASK;
 782                        asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
 783                        index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
 784                        if (index < 0) {
 785                                kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
 786                                        __func__, opc, vcpu, read_c0_entryhi());
 787                                kvm_mips_dump_host_tlbs();
 788                                local_irq_restore(flags);
 789                                return KVM_INVALID_INST;
 790                        }
 791                        kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
 792                                                             &vcpu->arch.
 793                                                             guest_tlb[index],
 794                                                             NULL, NULL);
 795                        inst = *(opc);
 796                }
 797                local_irq_restore(flags);
 798        } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
 799                paddr =
 800                    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
 801                                                          (unsigned long) opc);
 802                inst = *(uint32_t *) CKSEG0ADDR(paddr);
 803        } else {
 804                kvm_err("%s: illegal address: %p\n", __func__, opc);
 805                return KVM_INVALID_INST;
 806        }
 807
 808        return inst;
 809}
 810EXPORT_SYMBOL(kvm_get_inst);
 811