linux/arch/powerpc/kvm/book3s_mmu_hpte.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
   4 *
   5 * Authors:
   6 *     Alexander Graf <agraf@suse.de>
   7 */
   8
   9#include <linux/kvm_host.h>
  10#include <linux/hash.h>
  11#include <linux/slab.h>
  12#include <linux/rculist.h>
  13
  14#include <asm/kvm_ppc.h>
  15#include <asm/kvm_book3s.h>
  16#include <asm/machdep.h>
  17#include <asm/mmu_context.h>
  18#include <asm/hw_irq.h>
  19
  20#include "trace_pr.h"
  21
  22#define PTE_SIZE        12
  23
  24static struct kmem_cache *hpte_cache;
  25
  26static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
  27{
  28        return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
  29}
  30
  31static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
  32{
  33        return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
  34                       HPTEG_HASH_BITS_PTE_LONG);
  35}
  36
  37static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
  38{
  39        return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
  40}
  41
  42static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
  43{
  44        return hash_64((vpage & 0xffffff000ULL) >> 12,
  45                       HPTEG_HASH_BITS_VPTE_LONG);
  46}
  47
  48#ifdef CONFIG_PPC_BOOK3S_64
  49static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
  50{
  51        return hash_64((vpage & 0xffffffff0ULL) >> 4,
  52                       HPTEG_HASH_BITS_VPTE_64K);
  53}
  54#endif
  55
  56void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  57{
  58        u64 index;
  59        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  60
  61        trace_kvm_book3s_mmu_map(pte);
  62
  63        spin_lock(&vcpu3s->mmu_lock);
  64
  65        /* Add to ePTE list */
  66        index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
  67        hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
  68
  69        /* Add to ePTE_long list */
  70        index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
  71        hlist_add_head_rcu(&pte->list_pte_long,
  72                           &vcpu3s->hpte_hash_pte_long[index]);
  73
  74        /* Add to vPTE list */
  75        index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
  76        hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
  77
  78        /* Add to vPTE_long list */
  79        index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
  80        hlist_add_head_rcu(&pte->list_vpte_long,
  81                           &vcpu3s->hpte_hash_vpte_long[index]);
  82
  83#ifdef CONFIG_PPC_BOOK3S_64
  84        /* Add to vPTE_64k list */
  85        index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
  86        hlist_add_head_rcu(&pte->list_vpte_64k,
  87                           &vcpu3s->hpte_hash_vpte_64k[index]);
  88#endif
  89
  90        vcpu3s->hpte_cache_count++;
  91
  92        spin_unlock(&vcpu3s->mmu_lock);
  93}
  94
  95static void free_pte_rcu(struct rcu_head *head)
  96{
  97        struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
  98        kmem_cache_free(hpte_cache, pte);
  99}
 100
 101static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 102{
 103        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 104
 105        trace_kvm_book3s_mmu_invalidate(pte);
 106
 107        /* Different for 32 and 64 bit */
 108        kvmppc_mmu_invalidate_pte(vcpu, pte);
 109
 110        spin_lock(&vcpu3s->mmu_lock);
 111
 112        /* pte already invalidated in between? */
 113        if (hlist_unhashed(&pte->list_pte)) {
 114                spin_unlock(&vcpu3s->mmu_lock);
 115                return;
 116        }
 117
 118        hlist_del_init_rcu(&pte->list_pte);
 119        hlist_del_init_rcu(&pte->list_pte_long);
 120        hlist_del_init_rcu(&pte->list_vpte);
 121        hlist_del_init_rcu(&pte->list_vpte_long);
 122#ifdef CONFIG_PPC_BOOK3S_64
 123        hlist_del_init_rcu(&pte->list_vpte_64k);
 124#endif
 125        vcpu3s->hpte_cache_count--;
 126
 127        spin_unlock(&vcpu3s->mmu_lock);
 128
 129        call_rcu(&pte->rcu_head, free_pte_rcu);
 130}
 131
 132static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
 133{
 134        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 135        struct hpte_cache *pte;
 136        int i;
 137
 138        rcu_read_lock();
 139
 140        for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
 141                struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
 142
 143                hlist_for_each_entry_rcu(pte, list, list_vpte_long)
 144                        invalidate_pte(vcpu, pte);
 145        }
 146
 147        rcu_read_unlock();
 148}
 149
 150static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
 151{
 152        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 153        struct hlist_head *list;
 154        struct hpte_cache *pte;
 155
 156        /* Find the list of entries in the map */
 157        list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
 158
 159        rcu_read_lock();
 160
 161        /* Check the list for matching entries and invalidate */
 162        hlist_for_each_entry_rcu(pte, list, list_pte)
 163                if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
 164                        invalidate_pte(vcpu, pte);
 165
 166        rcu_read_unlock();
 167}
 168
 169static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
 170{
 171        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 172        struct hlist_head *list;
 173        struct hpte_cache *pte;
 174
 175        /* Find the list of entries in the map */
 176        list = &vcpu3s->hpte_hash_pte_long[
 177                        kvmppc_mmu_hash_pte_long(guest_ea)];
 178
 179        rcu_read_lock();
 180
 181        /* Check the list for matching entries and invalidate */
 182        hlist_for_each_entry_rcu(pte, list, list_pte_long)
 183                if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
 184                        invalidate_pte(vcpu, pte);
 185
 186        rcu_read_unlock();
 187}
 188
 189void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
 190{
 191        trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
 192        guest_ea &= ea_mask;
 193
 194        switch (ea_mask) {
 195        case ~0xfffUL:
 196                kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
 197                break;
 198        case 0x0ffff000:
 199                kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
 200                break;
 201        case 0:
 202                /* Doing a complete flush -> start from scratch */
 203                kvmppc_mmu_pte_flush_all(vcpu);
 204                break;
 205        default:
 206                WARN_ON(1);
 207                break;
 208        }
 209}
 210
 211/* Flush with mask 0xfffffffff */
 212static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
 213{
 214        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 215        struct hlist_head *list;
 216        struct hpte_cache *pte;
 217        u64 vp_mask = 0xfffffffffULL;
 218
 219        list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
 220
 221        rcu_read_lock();
 222
 223        /* Check the list for matching entries and invalidate */
 224        hlist_for_each_entry_rcu(pte, list, list_vpte)
 225                if ((pte->pte.vpage & vp_mask) == guest_vp)
 226                        invalidate_pte(vcpu, pte);
 227
 228        rcu_read_unlock();
 229}
 230
 231#ifdef CONFIG_PPC_BOOK3S_64
 232/* Flush with mask 0xffffffff0 */
 233static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
 234{
 235        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 236        struct hlist_head *list;
 237        struct hpte_cache *pte;
 238        u64 vp_mask = 0xffffffff0ULL;
 239
 240        list = &vcpu3s->hpte_hash_vpte_64k[
 241                kvmppc_mmu_hash_vpte_64k(guest_vp)];
 242
 243        rcu_read_lock();
 244
 245        /* Check the list for matching entries and invalidate */
 246        hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
 247                if ((pte->pte.vpage & vp_mask) == guest_vp)
 248                        invalidate_pte(vcpu, pte);
 249
 250        rcu_read_unlock();
 251}
 252#endif
 253
 254/* Flush with mask 0xffffff000 */
 255static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
 256{
 257        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 258        struct hlist_head *list;
 259        struct hpte_cache *pte;
 260        u64 vp_mask = 0xffffff000ULL;
 261
 262        list = &vcpu3s->hpte_hash_vpte_long[
 263                kvmppc_mmu_hash_vpte_long(guest_vp)];
 264
 265        rcu_read_lock();
 266
 267        /* Check the list for matching entries and invalidate */
 268        hlist_for_each_entry_rcu(pte, list, list_vpte_long)
 269                if ((pte->pte.vpage & vp_mask) == guest_vp)
 270                        invalidate_pte(vcpu, pte);
 271
 272        rcu_read_unlock();
 273}
 274
 275void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
 276{
 277        trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
 278        guest_vp &= vp_mask;
 279
 280        switch(vp_mask) {
 281        case 0xfffffffffULL:
 282                kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
 283                break;
 284#ifdef CONFIG_PPC_BOOK3S_64
 285        case 0xffffffff0ULL:
 286                kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
 287                break;
 288#endif
 289        case 0xffffff000ULL:
 290                kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
 291                break;
 292        default:
 293                WARN_ON(1);
 294                return;
 295        }
 296}
 297
 298void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
 299{
 300        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 301        struct hpte_cache *pte;
 302        int i;
 303
 304        trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
 305
 306        rcu_read_lock();
 307
 308        for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
 309                struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
 310
 311                hlist_for_each_entry_rcu(pte, list, list_vpte_long)
 312                        if ((pte->pte.raddr >= pa_start) &&
 313                            (pte->pte.raddr < pa_end))
 314                                invalidate_pte(vcpu, pte);
 315        }
 316
 317        rcu_read_unlock();
 318}
 319
 320struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
 321{
 322        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 323        struct hpte_cache *pte;
 324
 325        if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
 326                kvmppc_mmu_pte_flush_all(vcpu);
 327
 328        pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
 329
 330        return pte;
 331}
 332
 333void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
 334{
 335        kmem_cache_free(hpte_cache, pte);
 336}
 337
 338void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
 339{
 340        kvmppc_mmu_pte_flush(vcpu, 0, 0);
 341}
 342
 343static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
 344{
 345        int i;
 346
 347        for (i = 0; i < len; i++)
 348                INIT_HLIST_HEAD(&hash_list[i]);
 349}
 350
 351int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
 352{
 353        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
 354
 355        /* init hpte lookup hashes */
 356        kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
 357                                  ARRAY_SIZE(vcpu3s->hpte_hash_pte));
 358        kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
 359                                  ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
 360        kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
 361                                  ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
 362        kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
 363                                  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
 364#ifdef CONFIG_PPC_BOOK3S_64
 365        kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
 366                                  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
 367#endif
 368
 369        spin_lock_init(&vcpu3s->mmu_lock);
 370
 371        return 0;
 372}
 373
 374int kvmppc_mmu_hpte_sysinit(void)
 375{
 376        /* init hpte slab cache */
 377        hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
 378                                       sizeof(struct hpte_cache), 0, NULL);
 379
 380        return 0;
 381}
 382
 383void kvmppc_mmu_hpte_sysexit(void)
 384{
 385        kmem_cache_destroy(hpte_cache);
 386}
 387