linux/arch/powerpc/kvm/book3s_pr_papr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011. Freescale Inc. All rights reserved.
   4 *
   5 * Authors:
   6 *    Alexander Graf <agraf@suse.de>
   7 *    Paul Mackerras <paulus@samba.org>
   8 *
   9 * Description:
  10 *
  11 * Hypercall handling for running PAPR guests in PR KVM on Book 3S
  12 * processors.
  13 */
  14
  15#include <linux/anon_inodes.h>
  16
  17#include <linux/uaccess.h>
  18#include <asm/kvm_ppc.h>
  19#include <asm/kvm_book3s.h>
  20
  21#define HPTE_SIZE       16              /* bytes per HPT entry */
  22
  23static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
  24{
  25        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  26        unsigned long pteg_addr;
  27
  28        pte_index <<= 4;
  29        pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
  30        pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
  31        pteg_addr |= pte_index;
  32
  33        return pteg_addr;
  34}
  35
  36static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
  37{
  38        long flags = kvmppc_get_gpr(vcpu, 4);
  39        long pte_index = kvmppc_get_gpr(vcpu, 5);
  40        __be64 pteg[2 * 8];
  41        __be64 *hpte;
  42        unsigned long pteg_addr, i;
  43        long int ret;
  44
  45        i = pte_index & 7;
  46        pte_index &= ~7UL;
  47        pteg_addr = get_pteg_addr(vcpu, pte_index);
  48
  49        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
  50        ret = H_FUNCTION;
  51        if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
  52                goto done;
  53        hpte = pteg;
  54
  55        ret = H_PTEG_FULL;
  56        if (likely((flags & H_EXACT) == 0)) {
  57                for (i = 0; ; ++i) {
  58                        if (i == 8)
  59                                goto done;
  60                        if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0)
  61                                break;
  62                        hpte += 2;
  63                }
  64        } else {
  65                hpte += i * 2;
  66                if (*hpte & HPTE_V_VALID)
  67                        goto done;
  68        }
  69
  70        hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
  71        hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
  72        pteg_addr += i * HPTE_SIZE;
  73        ret = H_FUNCTION;
  74        if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
  75                goto done;
  76        kvmppc_set_gpr(vcpu, 4, pte_index | i);
  77        ret = H_SUCCESS;
  78
  79 done:
  80        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
  81        kvmppc_set_gpr(vcpu, 3, ret);
  82
  83        return EMULATE_DONE;
  84}
  85
  86static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
  87{
  88        unsigned long flags= kvmppc_get_gpr(vcpu, 4);
  89        unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
  90        unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
  91        unsigned long v = 0, pteg, rb;
  92        unsigned long pte[2];
  93        long int ret;
  94
  95        pteg = get_pteg_addr(vcpu, pte_index);
  96        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
  97        ret = H_FUNCTION;
  98        if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
  99                goto done;
 100        pte[0] = be64_to_cpu((__force __be64)pte[0]);
 101        pte[1] = be64_to_cpu((__force __be64)pte[1]);
 102
 103        ret = H_NOT_FOUND;
 104        if ((pte[0] & HPTE_V_VALID) == 0 ||
 105            ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
 106            ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
 107                goto done;
 108
 109        ret = H_FUNCTION;
 110        if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
 111                goto done;
 112
 113        rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
 114        vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 115
 116        ret = H_SUCCESS;
 117        kvmppc_set_gpr(vcpu, 4, pte[0]);
 118        kvmppc_set_gpr(vcpu, 5, pte[1]);
 119
 120 done:
 121        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 122        kvmppc_set_gpr(vcpu, 3, ret);
 123
 124        return EMULATE_DONE;
 125}
 126
 127/* Request defs for kvmppc_h_pr_bulk_remove() */
 128#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
 129#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
 130#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
 131#define   H_BULK_REMOVE_END            0xc000000000000000ULL
 132#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
 133#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
 134#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
 135#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
 136#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
 137#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
 138#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
 139#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
 140#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
 141#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
 142#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
 143#define H_BULK_REMOVE_MAX_BATCH        4
 144
 145static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
 146{
 147        int i;
 148        int paramnr = 4;
 149        int ret = H_SUCCESS;
 150
 151        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 152        for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 153                unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
 154                unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
 155                unsigned long pteg, rb, flags;
 156                unsigned long pte[2];
 157                unsigned long v = 0;
 158
 159                if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
 160                        break; /* Exit success */
 161                } else if ((tsh & H_BULK_REMOVE_TYPE) !=
 162                           H_BULK_REMOVE_REQUEST) {
 163                        ret = H_PARAMETER;
 164                        break; /* Exit fail */
 165                }
 166
 167                tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
 168                tsh |= H_BULK_REMOVE_RESPONSE;
 169
 170                if ((tsh & H_BULK_REMOVE_ANDCOND) &&
 171                    (tsh & H_BULK_REMOVE_AVPN)) {
 172                        tsh |= H_BULK_REMOVE_PARM;
 173                        kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
 174                        ret = H_PARAMETER;
 175                        break; /* Exit fail */
 176                }
 177
 178                pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
 179                if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
 180                        ret = H_FUNCTION;
 181                        break;
 182                }
 183                pte[0] = be64_to_cpu((__force __be64)pte[0]);
 184                pte[1] = be64_to_cpu((__force __be64)pte[1]);
 185
 186                /* tsl = AVPN */
 187                flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
 188
 189                if ((pte[0] & HPTE_V_VALID) == 0 ||
 190                    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
 191                    ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
 192                        tsh |= H_BULK_REMOVE_NOT_FOUND;
 193                } else {
 194                        /* Splat the pteg in (userland) hpt */
 195                        if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
 196                                ret = H_FUNCTION;
 197                                break;
 198                        }
 199
 200                        rb = compute_tlbie_rb(pte[0], pte[1],
 201                                              tsh & H_BULK_REMOVE_PTEX);
 202                        vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 203                        tsh |= H_BULK_REMOVE_SUCCESS;
 204                        tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
 205                }
 206                kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
 207        }
 208        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 209        kvmppc_set_gpr(vcpu, 3, ret);
 210
 211        return EMULATE_DONE;
 212}
 213
 214static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
 215{
 216        unsigned long flags = kvmppc_get_gpr(vcpu, 4);
 217        unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
 218        unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
 219        unsigned long rb, pteg, r, v;
 220        unsigned long pte[2];
 221        long int ret;
 222
 223        pteg = get_pteg_addr(vcpu, pte_index);
 224        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 225        ret = H_FUNCTION;
 226        if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
 227                goto done;
 228        pte[0] = be64_to_cpu((__force __be64)pte[0]);
 229        pte[1] = be64_to_cpu((__force __be64)pte[1]);
 230
 231        ret = H_NOT_FOUND;
 232        if ((pte[0] & HPTE_V_VALID) == 0 ||
 233            ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
 234                goto done;
 235
 236        v = pte[0];
 237        r = pte[1];
 238        r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
 239               HPTE_R_KEY_LO);
 240        r |= (flags << 55) & HPTE_R_PP0;
 241        r |= (flags << 48) & HPTE_R_KEY_HI;
 242        r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
 243
 244        pte[1] = r;
 245
 246        rb = compute_tlbie_rb(v, r, pte_index);
 247        vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 248        pte[0] = (__force u64)cpu_to_be64(pte[0]);
 249        pte[1] = (__force u64)cpu_to_be64(pte[1]);
 250        ret = H_FUNCTION;
 251        if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
 252                goto done;
 253        ret = H_SUCCESS;
 254
 255 done:
 256        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 257        kvmppc_set_gpr(vcpu, 3, ret);
 258
 259        return EMULATE_DONE;
 260}
 261
 262static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
 263{
 264        long rc;
 265
 266        rc = kvmppc_h_logical_ci_load(vcpu);
 267        if (rc == H_TOO_HARD)
 268                return EMULATE_FAIL;
 269        kvmppc_set_gpr(vcpu, 3, rc);
 270        return EMULATE_DONE;
 271}
 272
 273static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
 274{
 275        long rc;
 276
 277        rc = kvmppc_h_logical_ci_store(vcpu);
 278        if (rc == H_TOO_HARD)
 279                return EMULATE_FAIL;
 280        kvmppc_set_gpr(vcpu, 3, rc);
 281        return EMULATE_DONE;
 282}
 283
 284#ifdef CONFIG_SPAPR_TCE_IOMMU
 285static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
 286{
 287        unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
 288        unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
 289        unsigned long tce = kvmppc_get_gpr(vcpu, 6);
 290        long rc;
 291
 292        rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
 293        if (rc == H_TOO_HARD)
 294                return EMULATE_FAIL;
 295        kvmppc_set_gpr(vcpu, 3, rc);
 296        return EMULATE_DONE;
 297}
 298
 299static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
 300{
 301        unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
 302        unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
 303        unsigned long tce = kvmppc_get_gpr(vcpu, 6);
 304        unsigned long npages = kvmppc_get_gpr(vcpu, 7);
 305        long rc;
 306
 307        rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba,
 308                        tce, npages);
 309        if (rc == H_TOO_HARD)
 310                return EMULATE_FAIL;
 311        kvmppc_set_gpr(vcpu, 3, rc);
 312        return EMULATE_DONE;
 313}
 314
 315static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
 316{
 317        unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
 318        unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
 319        unsigned long tce_value = kvmppc_get_gpr(vcpu, 6);
 320        unsigned long npages = kvmppc_get_gpr(vcpu, 7);
 321        long rc;
 322
 323        rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages);
 324        if (rc == H_TOO_HARD)
 325                return EMULATE_FAIL;
 326        kvmppc_set_gpr(vcpu, 3, rc);
 327        return EMULATE_DONE;
 328}
 329
 330#else /* CONFIG_SPAPR_TCE_IOMMU */
 331static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
 332{
 333        return EMULATE_FAIL;
 334}
 335
 336static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
 337{
 338        return EMULATE_FAIL;
 339}
 340
 341static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
 342{
 343        return EMULATE_FAIL;
 344}
 345#endif /* CONFIG_SPAPR_TCE_IOMMU */
 346
 347static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
 348{
 349        long rc = kvmppc_xics_hcall(vcpu, cmd);
 350        kvmppc_set_gpr(vcpu, 3, rc);
 351        return EMULATE_DONE;
 352}
 353
 354int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
 355{
 356        int rc, idx;
 357
 358        if (cmd <= MAX_HCALL_OPCODE &&
 359            !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
 360                return EMULATE_FAIL;
 361
 362        switch (cmd) {
 363        case H_ENTER:
 364                return kvmppc_h_pr_enter(vcpu);
 365        case H_REMOVE:
 366                return kvmppc_h_pr_remove(vcpu);
 367        case H_PROTECT:
 368                return kvmppc_h_pr_protect(vcpu);
 369        case H_BULK_REMOVE:
 370                return kvmppc_h_pr_bulk_remove(vcpu);
 371        case H_PUT_TCE:
 372                return kvmppc_h_pr_put_tce(vcpu);
 373        case H_PUT_TCE_INDIRECT:
 374                return kvmppc_h_pr_put_tce_indirect(vcpu);
 375        case H_STUFF_TCE:
 376                return kvmppc_h_pr_stuff_tce(vcpu);
 377        case H_CEDE:
 378                kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
 379                kvm_vcpu_block(vcpu);
 380                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
 381                vcpu->stat.generic.halt_wakeup++;
 382                return EMULATE_DONE;
 383        case H_LOGICAL_CI_LOAD:
 384                return kvmppc_h_pr_logical_ci_load(vcpu);
 385        case H_LOGICAL_CI_STORE:
 386                return kvmppc_h_pr_logical_ci_store(vcpu);
 387        case H_XIRR:
 388        case H_CPPR:
 389        case H_EOI:
 390        case H_IPI:
 391        case H_IPOLL:
 392        case H_XIRR_X:
 393                if (kvmppc_xics_enabled(vcpu))
 394                        return kvmppc_h_pr_xics_hcall(vcpu, cmd);
 395                break;
 396        case H_RTAS:
 397                if (list_empty(&vcpu->kvm->arch.rtas_tokens))
 398                        break;
 399                idx = srcu_read_lock(&vcpu->kvm->srcu);
 400                rc = kvmppc_rtas_hcall(vcpu);
 401                srcu_read_unlock(&vcpu->kvm->srcu, idx);
 402                if (rc)
 403                        break;
 404                kvmppc_set_gpr(vcpu, 3, 0);
 405                return EMULATE_DONE;
 406        }
 407
 408        return EMULATE_FAIL;
 409}
 410
 411int kvmppc_hcall_impl_pr(unsigned long cmd)
 412{
 413        switch (cmd) {
 414        case H_ENTER:
 415        case H_REMOVE:
 416        case H_PROTECT:
 417        case H_BULK_REMOVE:
 418        case H_PUT_TCE:
 419        case H_PUT_TCE_INDIRECT:
 420        case H_STUFF_TCE:
 421        case H_CEDE:
 422        case H_LOGICAL_CI_LOAD:
 423        case H_LOGICAL_CI_STORE:
 424#ifdef CONFIG_KVM_XICS
 425        case H_XIRR:
 426        case H_CPPR:
 427        case H_EOI:
 428        case H_IPI:
 429        case H_IPOLL:
 430        case H_XIRR_X:
 431#endif
 432                return 1;
 433        }
 434        return 0;
 435}
 436
 437/*
 438 * List of hcall numbers to enable by default.
 439 * For compatibility with old userspace, we enable by default
 440 * all hcalls that were implemented before the hcall-enabling
 441 * facility was added.  Note this list should not include H_RTAS.
 442 */
 443static unsigned int default_hcall_list[] = {
 444        H_ENTER,
 445        H_REMOVE,
 446        H_PROTECT,
 447        H_BULK_REMOVE,
 448        H_PUT_TCE,
 449        H_CEDE,
 450#ifdef CONFIG_KVM_XICS
 451        H_XIRR,
 452        H_CPPR,
 453        H_EOI,
 454        H_IPI,
 455        H_IPOLL,
 456        H_XIRR_X,
 457#endif
 458        0
 459};
 460
 461void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
 462{
 463        int i;
 464        unsigned int hcall;
 465
 466        for (i = 0; default_hcall_list[i]; ++i) {
 467                hcall = default_hcall_list[i];
 468                WARN_ON(!kvmppc_hcall_impl_pr(hcall));
 469                __set_bit(hcall / 4, kvm->arch.enabled_hcalls);
 470        }
 471}
 472