linux/arch/powerpc/kvm/book3s_hv_rm_mmu.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/string.h>
  11#include <linux/kvm.h>
  12#include <linux/kvm_host.h>
  13#include <linux/hugetlb.h>
  14
  15#include <asm/tlbflush.h>
  16#include <asm/kvm_ppc.h>
  17#include <asm/kvm_book3s.h>
  18#include <asm/mmu-hash64.h>
  19#include <asm/hvcall.h>
  20#include <asm/synch.h>
  21#include <asm/ppc-opcode.h>
  22
  23/* For now use fixed-size 16MB page table */
  24#define HPT_ORDER       24
  25#define HPT_NPTEG       (1ul << (HPT_ORDER - 7))        /* 128B per pteg */
  26#define HPT_HASH_MASK   (HPT_NPTEG - 1)
  27
  28#define HPTE_V_HVLOCK   0x40UL
  29
  30static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
  31{
  32        unsigned long tmp, old;
  33
  34        asm volatile("  ldarx   %0,0,%2\n"
  35                     "  and.    %1,%0,%3\n"
  36                     "  bne     2f\n"
  37                     "  ori     %0,%0,%4\n"
  38                     "  stdcx.  %0,0,%2\n"
  39                     "  beq+    2f\n"
  40                     "  li      %1,%3\n"
  41                     "2:        isync"
  42                     : "=&r" (tmp), "=&r" (old)
  43                     : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
  44                     : "cc", "memory");
  45        return old == 0;
  46}
  47
  48long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
  49                    long pte_index, unsigned long pteh, unsigned long ptel)
  50{
  51        unsigned long porder;
  52        struct kvm *kvm = vcpu->kvm;
  53        unsigned long i, lpn, pa;
  54        unsigned long *hpte;
  55
  56        /* only handle 4k, 64k and 16M pages for now */
  57        porder = 12;
  58        if (pteh & HPTE_V_LARGE) {
  59                if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  60                    (ptel & 0xf000) == 0x1000) {
  61                        /* 64k page */
  62                        porder = 16;
  63                } else if ((ptel & 0xff000) == 0) {
  64                        /* 16M page */
  65                        porder = 24;
  66                        /* lowest AVA bit must be 0 for 16M pages */
  67                        if (pteh & 0x80)
  68                                return H_PARAMETER;
  69                } else
  70                        return H_PARAMETER;
  71        }
  72        lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
  73        if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
  74                return H_PARAMETER;
  75        pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
  76        if (!pa)
  77                return H_PARAMETER;
  78        /* Check WIMG */
  79        if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
  80            (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
  81                return H_PARAMETER;
  82        pteh &= ~0x60UL;
  83        ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
  84        ptel |= pa;
  85        if (pte_index >= (HPT_NPTEG << 3))
  86                return H_PARAMETER;
  87        if (likely((flags & H_EXACT) == 0)) {
  88                pte_index &= ~7UL;
  89                hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  90                for (i = 0; ; ++i) {
  91                        if (i == 8)
  92                                return H_PTEG_FULL;
  93                        if ((*hpte & HPTE_V_VALID) == 0 &&
  94                            lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
  95                                break;
  96                        hpte += 2;
  97                }
  98        } else {
  99                i = 0;
 100                hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
 101                if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
 102                        return H_PTEG_FULL;
 103        }
 104        hpte[1] = ptel;
 105        eieio();
 106        hpte[0] = pteh;
 107        asm volatile("ptesync" : : : "memory");
 108        atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
 109        vcpu->arch.gpr[4] = pte_index + i;
 110        return H_SUCCESS;
 111}
 112
 113static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
 114                                      unsigned long pte_index)
 115{
 116        unsigned long rb, va_low;
 117
 118        rb = (v & ~0x7fUL) << 16;               /* AVA field */
 119        va_low = pte_index >> 3;
 120        if (v & HPTE_V_SECONDARY)
 121                va_low = ~va_low;
 122        /* xor vsid from AVA */
 123        if (!(v & HPTE_V_1TB_SEG))
 124                va_low ^= v >> 12;
 125        else
 126                va_low ^= v >> 24;
 127        va_low &= 0x7ff;
 128        if (v & HPTE_V_LARGE) {
 129                rb |= 1;                        /* L field */
 130                if (cpu_has_feature(CPU_FTR_ARCH_206) &&
 131                    (r & 0xff000)) {
 132                        /* non-16MB large page, must be 64k */
 133                        /* (masks depend on page size) */
 134                        rb |= 0x1000;           /* page encoding in LP field */
 135                        rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
 136                        rb |= (va_low & 0xfe);  /* AVAL field (P7 doesn't seem to care) */
 137                }
 138        } else {
 139                /* 4kB page */
 140                rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of VA */
 141        }
 142        rb |= (v >> 54) & 0x300;                /* B field */
 143        return rb;
 144}
 145
 146#define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
 147
 148static inline int try_lock_tlbie(unsigned int *lock)
 149{
 150        unsigned int tmp, old;
 151        unsigned int token = LOCK_TOKEN;
 152
 153        asm volatile("1:lwarx   %1,0,%2\n"
 154                     "  cmpwi   cr0,%1,0\n"
 155                     "  bne     2f\n"
 156                     "  stwcx.  %3,0,%2\n"
 157                     "  bne-    1b\n"
 158                     "  isync\n"
 159                     "2:"
 160                     : "=&r" (tmp), "=&r" (old)
 161                     : "r" (lock), "r" (token)
 162                     : "cc", "memory");
 163        return old == 0;
 164}
 165
 166long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
 167                     unsigned long pte_index, unsigned long avpn,
 168                     unsigned long va)
 169{
 170        struct kvm *kvm = vcpu->kvm;
 171        unsigned long *hpte;
 172        unsigned long v, r, rb;
 173
 174        if (pte_index >= (HPT_NPTEG << 3))
 175                return H_PARAMETER;
 176        hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
 177        while (!lock_hpte(hpte, HPTE_V_HVLOCK))
 178                cpu_relax();
 179        if ((hpte[0] & HPTE_V_VALID) == 0 ||
 180            ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
 181            ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
 182                hpte[0] &= ~HPTE_V_HVLOCK;
 183                return H_NOT_FOUND;
 184        }
 185        if (atomic_read(&kvm->online_vcpus) == 1)
 186                flags |= H_LOCAL;
 187        vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
 188        vcpu->arch.gpr[5] = r = hpte[1];
 189        rb = compute_tlbie_rb(v, r, pte_index);
 190        hpte[0] = 0;
 191        if (!(flags & H_LOCAL)) {
 192                while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
 193                        cpu_relax();
 194                asm volatile("ptesync" : : : "memory");
 195                asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
 196                             : : "r" (rb), "r" (kvm->arch.lpid));
 197                asm volatile("ptesync" : : : "memory");
 198                kvm->arch.tlbie_lock = 0;
 199        } else {
 200                asm volatile("ptesync" : : : "memory");
 201                asm volatile("tlbiel %0" : : "r" (rb));
 202                asm volatile("ptesync" : : : "memory");
 203        }
 204        return H_SUCCESS;
 205}
 206
 207long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
 208{
 209        struct kvm *kvm = vcpu->kvm;
 210        unsigned long *args = &vcpu->arch.gpr[4];
 211        unsigned long *hp, tlbrb[4];
 212        long int i, found;
 213        long int n_inval = 0;
 214        unsigned long flags, req, pte_index;
 215        long int local = 0;
 216        long int ret = H_SUCCESS;
 217
 218        if (atomic_read(&kvm->online_vcpus) == 1)
 219                local = 1;
 220        for (i = 0; i < 4; ++i) {
 221                pte_index = args[i * 2];
 222                flags = pte_index >> 56;
 223                pte_index &= ((1ul << 56) - 1);
 224                req = flags >> 6;
 225                flags &= 3;
 226                if (req == 3)
 227                        break;
 228                if (req != 1 || flags == 3 ||
 229                    pte_index >= (HPT_NPTEG << 3)) {
 230                        /* parameter error */
 231                        args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
 232                        ret = H_PARAMETER;
 233                        break;
 234                }
 235                hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
 236                while (!lock_hpte(hp, HPTE_V_HVLOCK))
 237                        cpu_relax();
 238                found = 0;
 239                if (hp[0] & HPTE_V_VALID) {
 240                        switch (flags & 3) {
 241                        case 0:         /* absolute */
 242                                found = 1;
 243                                break;
 244                        case 1:         /* andcond */
 245                                if (!(hp[0] & args[i * 2 + 1]))
 246                                        found = 1;
 247                                break;
 248                        case 2:         /* AVPN */
 249                                if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
 250                                        found = 1;
 251                                break;
 252                        }
 253                }
 254                if (!found) {
 255                        hp[0] &= ~HPTE_V_HVLOCK;
 256                        args[i * 2] = ((0x90 | flags) << 56) + pte_index;
 257                        continue;
 258                }
 259                /* insert R and C bits from PTE */
 260                flags |= (hp[1] >> 5) & 0x0c;
 261                args[i * 2] = ((0x80 | flags) << 56) + pte_index;
 262                tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
 263                hp[0] = 0;
 264        }
 265        if (n_inval == 0)
 266                return ret;
 267
 268        if (!local) {
 269                while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
 270                        cpu_relax();
 271                asm volatile("ptesync" : : : "memory");
 272                for (i = 0; i < n_inval; ++i)
 273                        asm volatile(PPC_TLBIE(%1,%0)
 274                                     : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
 275                asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 276                kvm->arch.tlbie_lock = 0;
 277        } else {
 278                asm volatile("ptesync" : : : "memory");
 279                for (i = 0; i < n_inval; ++i)
 280                        asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
 281                asm volatile("ptesync" : : : "memory");
 282        }
 283        return ret;
 284}
 285
 286long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
 287                      unsigned long pte_index, unsigned long avpn,
 288                      unsigned long va)
 289{
 290        struct kvm *kvm = vcpu->kvm;
 291        unsigned long *hpte;
 292        unsigned long v, r, rb;
 293
 294        if (pte_index >= (HPT_NPTEG << 3))
 295                return H_PARAMETER;
 296        hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
 297        while (!lock_hpte(hpte, HPTE_V_HVLOCK))
 298                cpu_relax();
 299        if ((hpte[0] & HPTE_V_VALID) == 0 ||
 300            ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
 301                hpte[0] &= ~HPTE_V_HVLOCK;
 302                return H_NOT_FOUND;
 303        }
 304        if (atomic_read(&kvm->online_vcpus) == 1)
 305                flags |= H_LOCAL;
 306        v = hpte[0];
 307        r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
 308                        HPTE_R_KEY_HI | HPTE_R_KEY_LO);
 309        r |= (flags << 55) & HPTE_R_PP0;
 310        r |= (flags << 48) & HPTE_R_KEY_HI;
 311        r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
 312        rb = compute_tlbie_rb(v, r, pte_index);
 313        hpte[0] = v & ~HPTE_V_VALID;
 314        if (!(flags & H_LOCAL)) {
 315                while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
 316                        cpu_relax();
 317                asm volatile("ptesync" : : : "memory");
 318                asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
 319                             : : "r" (rb), "r" (kvm->arch.lpid));
 320                asm volatile("ptesync" : : : "memory");
 321                kvm->arch.tlbie_lock = 0;
 322        } else {
 323                asm volatile("ptesync" : : : "memory");
 324                asm volatile("tlbiel %0" : : "r" (rb));
 325                asm volatile("ptesync" : : : "memory");
 326        }
 327        hpte[1] = r;
 328        eieio();
 329        hpte[0] = v & ~HPTE_V_HVLOCK;
 330        asm volatile("ptesync" : : : "memory");
 331        return H_SUCCESS;
 332}
 333
 334static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
 335{
 336        long int i;
 337        unsigned long offset, rpn;
 338
 339        offset = realaddr & (kvm->arch.ram_psize - 1);
 340        rpn = (realaddr - offset) >> PAGE_SHIFT;
 341        for (i = 0; i < kvm->arch.ram_npages; ++i)
 342                if (rpn == kvm->arch.ram_pginfo[i].pfn)
 343                        return (i << PAGE_SHIFT) + offset;
 344        return HPTE_R_RPN;      /* all 1s in the RPN field */
 345}
 346
 347long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
 348                   unsigned long pte_index)
 349{
 350        struct kvm *kvm = vcpu->kvm;
 351        unsigned long *hpte, r;
 352        int i, n = 1;
 353
 354        if (pte_index >= (HPT_NPTEG << 3))
 355                return H_PARAMETER;
 356        if (flags & H_READ_4) {
 357                pte_index &= ~3;
 358                n = 4;
 359        }
 360        for (i = 0; i < n; ++i, ++pte_index) {
 361                hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
 362                r = hpte[1];
 363                if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
 364                        r = reverse_xlate(kvm, r & HPTE_R_RPN) |
 365                                (r & ~HPTE_R_RPN);
 366                vcpu->arch.gpr[4 + i * 2] = hpte[0];
 367                vcpu->arch.gpr[5 + i * 2] = r;
 368        }
 369        return H_SUCCESS;
 370}
 371