linux/arch/powerpc/kvm/book3s_64_mmu_host.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
   4 *
   5 * Authors:
   6 *     Alexander Graf <agraf@suse.de>
   7 *     Kevin Wolf <mail@kevin-wolf.de>
   8 */
   9
  10#include <linux/kvm_host.h>
  11
  12#include <asm/kvm_ppc.h>
  13#include <asm/kvm_book3s.h>
  14#include <asm/book3s/64/mmu-hash.h>
  15#include <asm/machdep.h>
  16#include <asm/mmu_context.h>
  17#include <asm/hw_irq.h>
  18#include "trace_pr.h"
  19#include "book3s.h"
  20
  21#define PTE_SIZE 12
  22
  23void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  24{
  25        mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
  26                                     pte->pagesize, pte->pagesize,
  27                                     MMU_SEGSIZE_256M, false);
  28}
  29
  30/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
  31 * a hash, so we don't waste cycles on looping */
  32static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
  33{
  34        return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
  35                     ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
  36                     ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
  37                     ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
  38                     ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
  39                     ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
  40                     ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
  41                     ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
  42}
  43
  44
  45static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
  46{
  47        struct kvmppc_sid_map *map;
  48        u16 sid_map_mask;
  49
  50        if (kvmppc_get_msr(vcpu) & MSR_PR)
  51                gvsid |= VSID_PR;
  52
  53        sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  54        map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  55        if (map->valid && (map->guest_vsid == gvsid)) {
  56                trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
  57                return map;
  58        }
  59
  60        map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
  61        if (map->valid && (map->guest_vsid == gvsid)) {
  62                trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
  63                return map;
  64        }
  65
  66        trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
  67        return NULL;
  68}
  69
  70int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
  71                        bool iswrite)
  72{
  73        unsigned long vpn;
  74        kvm_pfn_t hpaddr;
  75        ulong hash, hpteg;
  76        u64 vsid;
  77        int ret;
  78        int rflags = 0x192;
  79        int vflags = 0;
  80        int attempt = 0;
  81        struct kvmppc_sid_map *map;
  82        int r = 0;
  83        int hpsize = MMU_PAGE_4K;
  84        bool writable;
  85        unsigned long mmu_seq;
  86        struct kvm *kvm = vcpu->kvm;
  87        struct hpte_cache *cpte;
  88        unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
  89        unsigned long pfn;
  90
  91        /* used to check for invalidations in progress */
  92        mmu_seq = kvm->mmu_notifier_seq;
  93        smp_rmb();
  94
  95        /* Get host physical address for gpa */
  96        pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
  97        if (is_error_noslot_pfn(pfn)) {
  98                printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
  99                       orig_pte->raddr);
 100                r = -EINVAL;
 101                goto out;
 102        }
 103        hpaddr = pfn << PAGE_SHIFT;
 104
 105        /* and write the mapping ea -> hpa into the pt */
 106        vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
 107        map = find_sid_vsid(vcpu, vsid);
 108        if (!map) {
 109                ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
 110                WARN_ON(ret < 0);
 111                map = find_sid_vsid(vcpu, vsid);
 112        }
 113        if (!map) {
 114                printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
 115                                vsid, orig_pte->eaddr);
 116                WARN_ON(true);
 117                r = -EINVAL;
 118                goto out;
 119        }
 120
 121        vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
 122
 123        kvm_set_pfn_accessed(pfn);
 124        if (!orig_pte->may_write || !writable)
 125                rflags |= PP_RXRX;
 126        else {
 127                mark_page_dirty(vcpu->kvm, gfn);
 128                kvm_set_pfn_dirty(pfn);
 129        }
 130
 131        if (!orig_pte->may_execute)
 132                rflags |= HPTE_R_N;
 133        else
 134                kvmppc_mmu_flush_icache(pfn);
 135
 136        rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
 137
 138        /*
 139         * Use 64K pages if possible; otherwise, on 64K page kernels,
 140         * we need to transfer 4 more bits from guest real to host real addr.
 141         */
 142        if (vsid & VSID_64K)
 143                hpsize = MMU_PAGE_64K;
 144        else
 145                hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
 146
 147        hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
 148
 149        cpte = kvmppc_mmu_hpte_cache_next(vcpu);
 150
 151        spin_lock(&kvm->mmu_lock);
 152        if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
 153                r = -EAGAIN;
 154                goto out_unlock;
 155        }
 156
 157map_again:
 158        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 159
 160        /* In case we tried normal mapping already, let's nuke old entries */
 161        if (attempt > 1)
 162                if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
 163                        r = -1;
 164                        goto out_unlock;
 165                }
 166
 167        ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
 168                                       hpsize, hpsize, MMU_SEGSIZE_256M);
 169
 170        if (ret == -1) {
 171                /* If we couldn't map a primary PTE, try a secondary */
 172                hash = ~hash;
 173                vflags ^= HPTE_V_SECONDARY;
 174                attempt++;
 175                goto map_again;
 176        } else if (ret < 0) {
 177                r = -EIO;
 178                goto out_unlock;
 179        } else {
 180                trace_kvm_book3s_64_mmu_map(rflags, hpteg,
 181                                            vpn, hpaddr, orig_pte);
 182
 183                /*
 184                 * The mmu_hash_ops code may give us a secondary entry even
 185                 * though we asked for a primary. Fix up.
 186                 */
 187                if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
 188                        hash = ~hash;
 189                        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 190                }
 191
 192                cpte->slot = hpteg + (ret & 7);
 193                cpte->host_vpn = vpn;
 194                cpte->pte = *orig_pte;
 195                cpte->pfn = pfn;
 196                cpte->pagesize = hpsize;
 197
 198                kvmppc_mmu_hpte_cache_map(vcpu, cpte);
 199                cpte = NULL;
 200        }
 201
 202out_unlock:
 203        spin_unlock(&kvm->mmu_lock);
 204        kvm_release_pfn_clean(pfn);
 205        if (cpte)
 206                kvmppc_mmu_hpte_cache_free(cpte);
 207
 208out:
 209        return r;
 210}
 211
 212void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
 213{
 214        u64 mask = 0xfffffffffULL;
 215        u64 vsid;
 216
 217        vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
 218        if (vsid & VSID_64K)
 219                mask = 0xffffffff0ULL;
 220        kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
 221}
 222
 223static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
 224{
 225        unsigned long vsid_bits = VSID_BITS_65_256M;
 226        struct kvmppc_sid_map *map;
 227        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 228        u16 sid_map_mask;
 229        static int backwards_map = 0;
 230
 231        if (kvmppc_get_msr(vcpu) & MSR_PR)
 232                gvsid |= VSID_PR;
 233
 234        /* We might get collisions that trap in preceding order, so let's
 235           map them differently */
 236
 237        sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
 238        if (backwards_map)
 239                sid_map_mask = SID_MAP_MASK - sid_map_mask;
 240
 241        map = &to_book3s(vcpu)->sid_map[sid_map_mask];
 242
 243        /* Make sure we're taking the other map next time */
 244        backwards_map = !backwards_map;
 245
 246        /* Uh-oh ... out of mappings. Let's flush! */
 247        if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
 248                vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
 249                memset(vcpu_book3s->sid_map, 0,
 250                       sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
 251                kvmppc_mmu_pte_flush(vcpu, 0, 0);
 252                kvmppc_mmu_flush_segments(vcpu);
 253        }
 254
 255        if (mmu_has_feature(MMU_FTR_68_BIT_VA))
 256                vsid_bits = VSID_BITS_256M;
 257
 258        map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++,
 259                                       VSID_MULTIPLIER_256M, vsid_bits);
 260
 261        map->guest_vsid = gvsid;
 262        map->valid = true;
 263
 264        trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
 265
 266        return map;
 267}
 268
 269static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
 270{
 271        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 272        int i;
 273        int max_slb_size = 64;
 274        int found_inval = -1;
 275        int r;
 276
 277        /* Are we overwriting? */
 278        for (i = 0; i < svcpu->slb_max; i++) {
 279                if (!(svcpu->slb[i].esid & SLB_ESID_V))
 280                        found_inval = i;
 281                else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
 282                        r = i;
 283                        goto out;
 284                }
 285        }
 286
 287        /* Found a spare entry that was invalidated before */
 288        if (found_inval >= 0) {
 289                r = found_inval;
 290                goto out;
 291        }
 292
 293        /* No spare invalid entry, so create one */
 294
 295        if (mmu_slb_size < 64)
 296                max_slb_size = mmu_slb_size;
 297
 298        /* Overflowing -> purge */
 299        if ((svcpu->slb_max) == max_slb_size)
 300                kvmppc_mmu_flush_segments(vcpu);
 301
 302        r = svcpu->slb_max;
 303        svcpu->slb_max++;
 304
 305out:
 306        svcpu_put(svcpu);
 307        return r;
 308}
 309
 310int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
 311{
 312        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 313        u64 esid = eaddr >> SID_SHIFT;
 314        u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
 315        u64 slb_vsid = SLB_VSID_USER;
 316        u64 gvsid;
 317        int slb_index;
 318        struct kvmppc_sid_map *map;
 319        int r = 0;
 320
 321        slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
 322
 323        if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
 324                /* Invalidate an entry */
 325                svcpu->slb[slb_index].esid = 0;
 326                r = -ENOENT;
 327                goto out;
 328        }
 329
 330        map = find_sid_vsid(vcpu, gvsid);
 331        if (!map)
 332                map = create_sid_map(vcpu, gvsid);
 333
 334        map->guest_esid = esid;
 335
 336        slb_vsid |= (map->host_vsid << 12);
 337        slb_vsid &= ~SLB_VSID_KP;
 338        slb_esid |= slb_index;
 339
 340#ifdef CONFIG_PPC_64K_PAGES
 341        /* Set host segment base page size to 64K if possible */
 342        if (gvsid & VSID_64K)
 343                slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
 344#endif
 345
 346        svcpu->slb[slb_index].esid = slb_esid;
 347        svcpu->slb[slb_index].vsid = slb_vsid;
 348
 349        trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
 350
 351out:
 352        svcpu_put(svcpu);
 353        return r;
 354}
 355
 356void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
 357{
 358        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 359        ulong seg_mask = -seg_size;
 360        int i;
 361
 362        for (i = 0; i < svcpu->slb_max; i++) {
 363                if ((svcpu->slb[i].esid & SLB_ESID_V) &&
 364                    (svcpu->slb[i].esid & seg_mask) == ea) {
 365                        /* Invalidate this entry */
 366                        svcpu->slb[i].esid = 0;
 367                }
 368        }
 369
 370        svcpu_put(svcpu);
 371}
 372
 373void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
 374{
 375        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 376        svcpu->slb_max = 0;
 377        svcpu->slb[0].esid = 0;
 378        svcpu_put(svcpu);
 379}
 380
 381void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
 382{
 383        kvmppc_mmu_hpte_destroy(vcpu);
 384        __destroy_context(to_book3s(vcpu)->context_id[0]);
 385}
 386
 387int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
 388{
 389        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 390        int err;
 391
 392        err = hash__alloc_context_id();
 393        if (err < 0)
 394                return -1;
 395        vcpu3s->context_id[0] = err;
 396
 397        vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
 398                                  << ESID_BITS) - 1;
 399        vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
 400        vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
 401
 402        kvmppc_mmu_hpte_init(vcpu);
 403
 404        return 0;
 405}
 406