linux/arch/powerpc/kvm/book3s_64_mmu_host.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
   4 *
   5 * Authors:
   6 *     Alexander Graf <agraf@suse.de>
   7 *     Kevin Wolf <mail@kevin-wolf.de>
   8 */
   9
  10#include <linux/kvm_host.h>
  11#include <linux/pkeys.h>
  12
  13#include <asm/kvm_ppc.h>
  14#include <asm/kvm_book3s.h>
  15#include <asm/book3s/64/mmu-hash.h>
  16#include <asm/machdep.h>
  17#include <asm/mmu_context.h>
  18#include <asm/hw_irq.h>
  19#include "trace_pr.h"
  20#include "book3s.h"
  21
  22#define PTE_SIZE 12
  23
  24void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  25{
  26        mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
  27                                     pte->pagesize, pte->pagesize,
  28                                     MMU_SEGSIZE_256M, false);
  29}
  30
  31/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
  32 * a hash, so we don't waste cycles on looping */
  33static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
  34{
  35        return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
  36                     ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
  37                     ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
  38                     ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
  39                     ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
  40                     ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
  41                     ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
  42                     ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
  43}
  44
  45
  46static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
  47{
  48        struct kvmppc_sid_map *map;
  49        u16 sid_map_mask;
  50
  51        if (kvmppc_get_msr(vcpu) & MSR_PR)
  52                gvsid |= VSID_PR;
  53
  54        sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  55        map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  56        if (map->valid && (map->guest_vsid == gvsid)) {
  57                trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
  58                return map;
  59        }
  60
  61        map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
  62        if (map->valid && (map->guest_vsid == gvsid)) {
  63                trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
  64                return map;
  65        }
  66
  67        trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
  68        return NULL;
  69}
  70
  71int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
  72                        bool iswrite)
  73{
  74        unsigned long vpn;
  75        kvm_pfn_t hpaddr;
  76        ulong hash, hpteg;
  77        u64 vsid;
  78        int ret;
  79        int rflags = 0x192;
  80        int vflags = 0;
  81        int attempt = 0;
  82        struct kvmppc_sid_map *map;
  83        int r = 0;
  84        int hpsize = MMU_PAGE_4K;
  85        bool writable;
  86        unsigned long mmu_seq;
  87        struct kvm *kvm = vcpu->kvm;
  88        struct hpte_cache *cpte;
  89        unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
  90        unsigned long pfn;
  91
  92        /* used to check for invalidations in progress */
  93        mmu_seq = kvm->mmu_notifier_seq;
  94        smp_rmb();
  95
  96        /* Get host physical address for gpa */
  97        pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
  98        if (is_error_noslot_pfn(pfn)) {
  99                printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
 100                       orig_pte->raddr);
 101                r = -EINVAL;
 102                goto out;
 103        }
 104        hpaddr = pfn << PAGE_SHIFT;
 105
 106        /* and write the mapping ea -> hpa into the pt */
 107        vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
 108        map = find_sid_vsid(vcpu, vsid);
 109        if (!map) {
 110                ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
 111                WARN_ON(ret < 0);
 112                map = find_sid_vsid(vcpu, vsid);
 113        }
 114        if (!map) {
 115                printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
 116                                vsid, orig_pte->eaddr);
 117                WARN_ON(true);
 118                r = -EINVAL;
 119                goto out;
 120        }
 121
 122        vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
 123
 124        kvm_set_pfn_accessed(pfn);
 125        if (!orig_pte->may_write || !writable)
 126                rflags |= PP_RXRX;
 127        else {
 128                mark_page_dirty(vcpu->kvm, gfn);
 129                kvm_set_pfn_dirty(pfn);
 130        }
 131
 132        if (!orig_pte->may_execute)
 133                rflags |= HPTE_R_N;
 134        else
 135                kvmppc_mmu_flush_icache(pfn);
 136
 137        rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
 138        rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
 139
 140        /*
 141         * Use 64K pages if possible; otherwise, on 64K page kernels,
 142         * we need to transfer 4 more bits from guest real to host real addr.
 143         */
 144        if (vsid & VSID_64K)
 145                hpsize = MMU_PAGE_64K;
 146        else
 147                hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
 148
 149        hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
 150
 151        cpte = kvmppc_mmu_hpte_cache_next(vcpu);
 152
 153        spin_lock(&kvm->mmu_lock);
 154        if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
 155                r = -EAGAIN;
 156                goto out_unlock;
 157        }
 158
 159map_again:
 160        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 161
 162        /* In case we tried normal mapping already, let's nuke old entries */
 163        if (attempt > 1)
 164                if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
 165                        r = -1;
 166                        goto out_unlock;
 167                }
 168
 169        ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
 170                                       hpsize, hpsize, MMU_SEGSIZE_256M);
 171
 172        if (ret == -1) {
 173                /* If we couldn't map a primary PTE, try a secondary */
 174                hash = ~hash;
 175                vflags ^= HPTE_V_SECONDARY;
 176                attempt++;
 177                goto map_again;
 178        } else if (ret < 0) {
 179                r = -EIO;
 180                goto out_unlock;
 181        } else {
 182                trace_kvm_book3s_64_mmu_map(rflags, hpteg,
 183                                            vpn, hpaddr, orig_pte);
 184
 185                /*
 186                 * The mmu_hash_ops code may give us a secondary entry even
 187                 * though we asked for a primary. Fix up.
 188                 */
 189                if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
 190                        hash = ~hash;
 191                        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 192                }
 193
 194                cpte->slot = hpteg + (ret & 7);
 195                cpte->host_vpn = vpn;
 196                cpte->pte = *orig_pte;
 197                cpte->pfn = pfn;
 198                cpte->pagesize = hpsize;
 199
 200                kvmppc_mmu_hpte_cache_map(vcpu, cpte);
 201                cpte = NULL;
 202        }
 203
 204out_unlock:
 205        spin_unlock(&kvm->mmu_lock);
 206        kvm_release_pfn_clean(pfn);
 207        if (cpte)
 208                kvmppc_mmu_hpte_cache_free(cpte);
 209
 210out:
 211        return r;
 212}
 213
 214void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
 215{
 216        u64 mask = 0xfffffffffULL;
 217        u64 vsid;
 218
 219        vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
 220        if (vsid & VSID_64K)
 221                mask = 0xffffffff0ULL;
 222        kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
 223}
 224
 225static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
 226{
 227        unsigned long vsid_bits = VSID_BITS_65_256M;
 228        struct kvmppc_sid_map *map;
 229        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 230        u16 sid_map_mask;
 231        static int backwards_map = 0;
 232
 233        if (kvmppc_get_msr(vcpu) & MSR_PR)
 234                gvsid |= VSID_PR;
 235
 236        /* We might get collisions that trap in preceding order, so let's
 237           map them differently */
 238
 239        sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
 240        if (backwards_map)
 241                sid_map_mask = SID_MAP_MASK - sid_map_mask;
 242
 243        map = &to_book3s(vcpu)->sid_map[sid_map_mask];
 244
 245        /* Make sure we're taking the other map next time */
 246        backwards_map = !backwards_map;
 247
 248        /* Uh-oh ... out of mappings. Let's flush! */
 249        if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
 250                vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
 251                memset(vcpu_book3s->sid_map, 0,
 252                       sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
 253                kvmppc_mmu_pte_flush(vcpu, 0, 0);
 254                kvmppc_mmu_flush_segments(vcpu);
 255        }
 256
 257        if (mmu_has_feature(MMU_FTR_68_BIT_VA))
 258                vsid_bits = VSID_BITS_256M;
 259
 260        map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++,
 261                                       VSID_MULTIPLIER_256M, vsid_bits);
 262
 263        map->guest_vsid = gvsid;
 264        map->valid = true;
 265
 266        trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
 267
 268        return map;
 269}
 270
 271static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
 272{
 273        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 274        int i;
 275        int max_slb_size = 64;
 276        int found_inval = -1;
 277        int r;
 278
 279        /* Are we overwriting? */
 280        for (i = 0; i < svcpu->slb_max; i++) {
 281                if (!(svcpu->slb[i].esid & SLB_ESID_V))
 282                        found_inval = i;
 283                else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
 284                        r = i;
 285                        goto out;
 286                }
 287        }
 288
 289        /* Found a spare entry that was invalidated before */
 290        if (found_inval >= 0) {
 291                r = found_inval;
 292                goto out;
 293        }
 294
 295        /* No spare invalid entry, so create one */
 296
 297        if (mmu_slb_size < 64)
 298                max_slb_size = mmu_slb_size;
 299
 300        /* Overflowing -> purge */
 301        if ((svcpu->slb_max) == max_slb_size)
 302                kvmppc_mmu_flush_segments(vcpu);
 303
 304        r = svcpu->slb_max;
 305        svcpu->slb_max++;
 306
 307out:
 308        svcpu_put(svcpu);
 309        return r;
 310}
 311
 312int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
 313{
 314        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 315        u64 esid = eaddr >> SID_SHIFT;
 316        u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
 317        u64 slb_vsid = SLB_VSID_USER;
 318        u64 gvsid;
 319        int slb_index;
 320        struct kvmppc_sid_map *map;
 321        int r = 0;
 322
 323        slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
 324
 325        if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
 326                /* Invalidate an entry */
 327                svcpu->slb[slb_index].esid = 0;
 328                r = -ENOENT;
 329                goto out;
 330        }
 331
 332        map = find_sid_vsid(vcpu, gvsid);
 333        if (!map)
 334                map = create_sid_map(vcpu, gvsid);
 335
 336        map->guest_esid = esid;
 337
 338        slb_vsid |= (map->host_vsid << 12);
 339        slb_vsid &= ~SLB_VSID_KP;
 340        slb_esid |= slb_index;
 341
 342#ifdef CONFIG_PPC_64K_PAGES
 343        /* Set host segment base page size to 64K if possible */
 344        if (gvsid & VSID_64K)
 345                slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
 346#endif
 347
 348        svcpu->slb[slb_index].esid = slb_esid;
 349        svcpu->slb[slb_index].vsid = slb_vsid;
 350
 351        trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
 352
 353out:
 354        svcpu_put(svcpu);
 355        return r;
 356}
 357
 358void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
 359{
 360        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 361        ulong seg_mask = -seg_size;
 362        int i;
 363
 364        for (i = 0; i < svcpu->slb_max; i++) {
 365                if ((svcpu->slb[i].esid & SLB_ESID_V) &&
 366                    (svcpu->slb[i].esid & seg_mask) == ea) {
 367                        /* Invalidate this entry */
 368                        svcpu->slb[i].esid = 0;
 369                }
 370        }
 371
 372        svcpu_put(svcpu);
 373}
 374
 375void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
 376{
 377        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 378        svcpu->slb_max = 0;
 379        svcpu->slb[0].esid = 0;
 380        svcpu_put(svcpu);
 381}
 382
 383void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
 384{
 385        kvmppc_mmu_hpte_destroy(vcpu);
 386        __destroy_context(to_book3s(vcpu)->context_id[0]);
 387}
 388
 389int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
 390{
 391        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 392        int err;
 393
 394        err = hash__alloc_context_id();
 395        if (err < 0)
 396                return -1;
 397        vcpu3s->context_id[0] = err;
 398
 399        vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
 400                                  << ESID_BITS) - 1;
 401        vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
 402        vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
 403
 404        kvmppc_mmu_hpte_init(vcpu);
 405
 406        return 0;
 407}
 408