linux/arch/powerpc/kvm/book3s_64_mmu_host.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
   3 *
   4 * Authors:
   5 *     Alexander Graf <agraf@suse.de>
   6 *     Kevin Wolf <mail@kevin-wolf.de>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License, version 2, as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  20 */
  21
  22#include <linux/kvm_host.h>
  23
  24#include <asm/kvm_ppc.h>
  25#include <asm/kvm_book3s.h>
  26#include <asm/book3s/64/mmu-hash.h>
  27#include <asm/machdep.h>
  28#include <asm/mmu_context.h>
  29#include <asm/hw_irq.h>
  30#include "trace_pr.h"
  31#include "book3s.h"
  32
  33#define PTE_SIZE 12
  34
  35void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  36{
  37        mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
  38                                     pte->pagesize, pte->pagesize,
  39                                     MMU_SEGSIZE_256M, false);
  40}
  41
  42/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
  43 * a hash, so we don't waste cycles on looping */
  44static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
  45{
  46        return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
  47                     ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
  48                     ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
  49                     ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
  50                     ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
  51                     ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
  52                     ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
  53                     ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
  54}
  55
  56
  57static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
  58{
  59        struct kvmppc_sid_map *map;
  60        u16 sid_map_mask;
  61
  62        if (kvmppc_get_msr(vcpu) & MSR_PR)
  63                gvsid |= VSID_PR;
  64
  65        sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  66        map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  67        if (map->valid && (map->guest_vsid == gvsid)) {
  68                trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
  69                return map;
  70        }
  71
  72        map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
  73        if (map->valid && (map->guest_vsid == gvsid)) {
  74                trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
  75                return map;
  76        }
  77
  78        trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
  79        return NULL;
  80}
  81
  82int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
  83                        bool iswrite)
  84{
  85        unsigned long vpn;
  86        kvm_pfn_t hpaddr;
  87        ulong hash, hpteg;
  88        u64 vsid;
  89        int ret;
  90        int rflags = 0x192;
  91        int vflags = 0;
  92        int attempt = 0;
  93        struct kvmppc_sid_map *map;
  94        int r = 0;
  95        int hpsize = MMU_PAGE_4K;
  96        bool writable;
  97        unsigned long mmu_seq;
  98        struct kvm *kvm = vcpu->kvm;
  99        struct hpte_cache *cpte;
 100        unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
 101        unsigned long pfn;
 102
 103        /* used to check for invalidations in progress */
 104        mmu_seq = kvm->mmu_notifier_seq;
 105        smp_rmb();
 106
 107        /* Get host physical address for gpa */
 108        pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
 109        if (is_error_noslot_pfn(pfn)) {
 110                printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
 111                       orig_pte->raddr);
 112                r = -EINVAL;
 113                goto out;
 114        }
 115        hpaddr = pfn << PAGE_SHIFT;
 116
 117        /* and write the mapping ea -> hpa into the pt */
 118        vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
 119        map = find_sid_vsid(vcpu, vsid);
 120        if (!map) {
 121                ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
 122                WARN_ON(ret < 0);
 123                map = find_sid_vsid(vcpu, vsid);
 124        }
 125        if (!map) {
 126                printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
 127                                vsid, orig_pte->eaddr);
 128                WARN_ON(true);
 129                r = -EINVAL;
 130                goto out;
 131        }
 132
 133        vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
 134
 135        kvm_set_pfn_accessed(pfn);
 136        if (!orig_pte->may_write || !writable)
 137                rflags |= PP_RXRX;
 138        else {
 139                mark_page_dirty(vcpu->kvm, gfn);
 140                kvm_set_pfn_dirty(pfn);
 141        }
 142
 143        if (!orig_pte->may_execute)
 144                rflags |= HPTE_R_N;
 145        else
 146                kvmppc_mmu_flush_icache(pfn);
 147
 148        /*
 149         * Use 64K pages if possible; otherwise, on 64K page kernels,
 150         * we need to transfer 4 more bits from guest real to host real addr.
 151         */
 152        if (vsid & VSID_64K)
 153                hpsize = MMU_PAGE_64K;
 154        else
 155                hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
 156
 157        hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
 158
 159        cpte = kvmppc_mmu_hpte_cache_next(vcpu);
 160
 161        spin_lock(&kvm->mmu_lock);
 162        if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
 163                r = -EAGAIN;
 164                goto out_unlock;
 165        }
 166
 167map_again:
 168        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 169
 170        /* In case we tried normal mapping already, let's nuke old entries */
 171        if (attempt > 1)
 172                if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
 173                        r = -1;
 174                        goto out_unlock;
 175                }
 176
 177        ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
 178                                       hpsize, hpsize, MMU_SEGSIZE_256M);
 179
 180        if (ret < 0) {
 181                /* If we couldn't map a primary PTE, try a secondary */
 182                hash = ~hash;
 183                vflags ^= HPTE_V_SECONDARY;
 184                attempt++;
 185                goto map_again;
 186        } else {
 187                trace_kvm_book3s_64_mmu_map(rflags, hpteg,
 188                                            vpn, hpaddr, orig_pte);
 189
 190                /*
 191                 * The mmu_hash_ops code may give us a secondary entry even
 192                 * though we asked for a primary. Fix up.
 193                 */
 194                if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
 195                        hash = ~hash;
 196                        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 197                }
 198
 199                cpte->slot = hpteg + (ret & 7);
 200                cpte->host_vpn = vpn;
 201                cpte->pte = *orig_pte;
 202                cpte->pfn = pfn;
 203                cpte->pagesize = hpsize;
 204
 205                kvmppc_mmu_hpte_cache_map(vcpu, cpte);
 206                cpte = NULL;
 207        }
 208
 209out_unlock:
 210        spin_unlock(&kvm->mmu_lock);
 211        kvm_release_pfn_clean(pfn);
 212        if (cpte)
 213                kvmppc_mmu_hpte_cache_free(cpte);
 214
 215out:
 216        return r;
 217}
 218
 219void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
 220{
 221        u64 mask = 0xfffffffffULL;
 222        u64 vsid;
 223
 224        vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
 225        if (vsid & VSID_64K)
 226                mask = 0xffffffff0ULL;
 227        kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
 228}
 229
 230static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
 231{
 232        struct kvmppc_sid_map *map;
 233        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 234        u16 sid_map_mask;
 235        static int backwards_map = 0;
 236
 237        if (kvmppc_get_msr(vcpu) & MSR_PR)
 238                gvsid |= VSID_PR;
 239
 240        /* We might get collisions that trap in preceding order, so let's
 241           map them differently */
 242
 243        sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
 244        if (backwards_map)
 245                sid_map_mask = SID_MAP_MASK - sid_map_mask;
 246
 247        map = &to_book3s(vcpu)->sid_map[sid_map_mask];
 248
 249        /* Make sure we're taking the other map next time */
 250        backwards_map = !backwards_map;
 251
 252        /* Uh-oh ... out of mappings. Let's flush! */
 253        if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
 254                vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
 255                memset(vcpu_book3s->sid_map, 0,
 256                       sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
 257                kvmppc_mmu_pte_flush(vcpu, 0, 0);
 258                kvmppc_mmu_flush_segments(vcpu);
 259        }
 260        map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
 261
 262        map->guest_vsid = gvsid;
 263        map->valid = true;
 264
 265        trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
 266
 267        return map;
 268}
 269
 270static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
 271{
 272        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 273        int i;
 274        int max_slb_size = 64;
 275        int found_inval = -1;
 276        int r;
 277
 278        /* Are we overwriting? */
 279        for (i = 0; i < svcpu->slb_max; i++) {
 280                if (!(svcpu->slb[i].esid & SLB_ESID_V))
 281                        found_inval = i;
 282                else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
 283                        r = i;
 284                        goto out;
 285                }
 286        }
 287
 288        /* Found a spare entry that was invalidated before */
 289        if (found_inval >= 0) {
 290                r = found_inval;
 291                goto out;
 292        }
 293
 294        /* No spare invalid entry, so create one */
 295
 296        if (mmu_slb_size < 64)
 297                max_slb_size = mmu_slb_size;
 298
 299        /* Overflowing -> purge */
 300        if ((svcpu->slb_max) == max_slb_size)
 301                kvmppc_mmu_flush_segments(vcpu);
 302
 303        r = svcpu->slb_max;
 304        svcpu->slb_max++;
 305
 306out:
 307        svcpu_put(svcpu);
 308        return r;
 309}
 310
 311int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
 312{
 313        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 314        u64 esid = eaddr >> SID_SHIFT;
 315        u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
 316        u64 slb_vsid = SLB_VSID_USER;
 317        u64 gvsid;
 318        int slb_index;
 319        struct kvmppc_sid_map *map;
 320        int r = 0;
 321
 322        slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
 323
 324        if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
 325                /* Invalidate an entry */
 326                svcpu->slb[slb_index].esid = 0;
 327                r = -ENOENT;
 328                goto out;
 329        }
 330
 331        map = find_sid_vsid(vcpu, gvsid);
 332        if (!map)
 333                map = create_sid_map(vcpu, gvsid);
 334
 335        map->guest_esid = esid;
 336
 337        slb_vsid |= (map->host_vsid << 12);
 338        slb_vsid &= ~SLB_VSID_KP;
 339        slb_esid |= slb_index;
 340
 341#ifdef CONFIG_PPC_64K_PAGES
 342        /* Set host segment base page size to 64K if possible */
 343        if (gvsid & VSID_64K)
 344                slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
 345#endif
 346
 347        svcpu->slb[slb_index].esid = slb_esid;
 348        svcpu->slb[slb_index].vsid = slb_vsid;
 349
 350        trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
 351
 352out:
 353        svcpu_put(svcpu);
 354        return r;
 355}
 356
 357void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
 358{
 359        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 360        ulong seg_mask = -seg_size;
 361        int i;
 362
 363        for (i = 0; i < svcpu->slb_max; i++) {
 364                if ((svcpu->slb[i].esid & SLB_ESID_V) &&
 365                    (svcpu->slb[i].esid & seg_mask) == ea) {
 366                        /* Invalidate this entry */
 367                        svcpu->slb[i].esid = 0;
 368                }
 369        }
 370
 371        svcpu_put(svcpu);
 372}
 373
 374void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
 375{
 376        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 377        svcpu->slb_max = 0;
 378        svcpu->slb[0].esid = 0;
 379        svcpu_put(svcpu);
 380}
 381
 382void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
 383{
 384        kvmppc_mmu_hpte_destroy(vcpu);
 385        __destroy_context(to_book3s(vcpu)->context_id[0]);
 386}
 387
 388int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
 389{
 390        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 391        int err;
 392
 393        err = __init_new_context();
 394        if (err < 0)
 395                return -1;
 396        vcpu3s->context_id[0] = err;
 397
 398        vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
 399                                  << ESID_BITS) - 1;
 400        vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
 401        vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
 402
 403        kvmppc_mmu_hpte_init(vcpu);
 404
 405        return 0;
 406}
 407