linux/arch/powerpc/kvm/book3s_pr_papr.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011. Freescale Inc. All rights reserved.
   3 *
   4 * Authors:
   5 *    Alexander Graf <agraf@suse.de>
   6 *    Paul Mackerras <paulus@samba.org>
   7 *
   8 * Description:
   9 *
  10 * Hypercall handling for running PAPR guests in PR KVM on Book 3S
  11 * processors.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License, version 2, as
  15 * published by the Free Software Foundation.
  16 */
  17
  18#include <linux/anon_inodes.h>
  19
  20#include <linux/uaccess.h>
  21#include <asm/kvm_ppc.h>
  22#include <asm/kvm_book3s.h>
  23
  24#define HPTE_SIZE       16              /* bytes per HPT entry */
  25
  26static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
  27{
  28        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  29        unsigned long pteg_addr;
  30
  31        pte_index <<= 4;
  32        pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
  33        pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
  34        pteg_addr |= pte_index;
  35
  36        return pteg_addr;
  37}
  38
  39static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
  40{
  41        long flags = kvmppc_get_gpr(vcpu, 4);
  42        long pte_index = kvmppc_get_gpr(vcpu, 5);
  43        __be64 pteg[2 * 8];
  44        __be64 *hpte;
  45        unsigned long pteg_addr, i;
  46        long int ret;
  47
  48        i = pte_index & 7;
  49        pte_index &= ~7UL;
  50        pteg_addr = get_pteg_addr(vcpu, pte_index);
  51
  52        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
  53        ret = H_FUNCTION;
  54        if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
  55                goto done;
  56        hpte = pteg;
  57
  58        ret = H_PTEG_FULL;
  59        if (likely((flags & H_EXACT) == 0)) {
  60                for (i = 0; ; ++i) {
  61                        if (i == 8)
  62                                goto done;
  63                        if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0)
  64                                break;
  65                        hpte += 2;
  66                }
  67        } else {
  68                hpte += i * 2;
  69                if (*hpte & HPTE_V_VALID)
  70                        goto done;
  71        }
  72
  73        hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
  74        hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
  75        pteg_addr += i * HPTE_SIZE;
  76        ret = H_FUNCTION;
  77        if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
  78                goto done;
  79        kvmppc_set_gpr(vcpu, 4, pte_index | i);
  80        ret = H_SUCCESS;
  81
  82 done:
  83        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
  84        kvmppc_set_gpr(vcpu, 3, ret);
  85
  86        return EMULATE_DONE;
  87}
  88
  89static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
  90{
  91        unsigned long flags= kvmppc_get_gpr(vcpu, 4);
  92        unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
  93        unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
  94        unsigned long v = 0, pteg, rb;
  95        unsigned long pte[2];
  96        long int ret;
  97
  98        pteg = get_pteg_addr(vcpu, pte_index);
  99        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 100        ret = H_FUNCTION;
 101        if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
 102                goto done;
 103        pte[0] = be64_to_cpu((__force __be64)pte[0]);
 104        pte[1] = be64_to_cpu((__force __be64)pte[1]);
 105
 106        ret = H_NOT_FOUND;
 107        if ((pte[0] & HPTE_V_VALID) == 0 ||
 108            ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
 109            ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
 110                goto done;
 111
 112        ret = H_FUNCTION;
 113        if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
 114                goto done;
 115
 116        rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
 117        vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 118
 119        ret = H_SUCCESS;
 120        kvmppc_set_gpr(vcpu, 4, pte[0]);
 121        kvmppc_set_gpr(vcpu, 5, pte[1]);
 122
 123 done:
 124        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 125        kvmppc_set_gpr(vcpu, 3, ret);
 126
 127        return EMULATE_DONE;
 128}
 129
 130/* Request defs for kvmppc_h_pr_bulk_remove() */
 131#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
 132#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
 133#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
 134#define   H_BULK_REMOVE_END            0xc000000000000000ULL
 135#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
 136#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
 137#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
 138#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
 139#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
 140#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
 141#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
 142#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
 143#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
 144#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
 145#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
 146#define H_BULK_REMOVE_MAX_BATCH        4
 147
 148static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
 149{
 150        int i;
 151        int paramnr = 4;
 152        int ret = H_SUCCESS;
 153
 154        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 155        for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 156                unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
 157                unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
 158                unsigned long pteg, rb, flags;
 159                unsigned long pte[2];
 160                unsigned long v = 0;
 161
 162                if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
 163                        break; /* Exit success */
 164                } else if ((tsh & H_BULK_REMOVE_TYPE) !=
 165                           H_BULK_REMOVE_REQUEST) {
 166                        ret = H_PARAMETER;
 167                        break; /* Exit fail */
 168                }
 169
 170                tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
 171                tsh |= H_BULK_REMOVE_RESPONSE;
 172
 173                if ((tsh & H_BULK_REMOVE_ANDCOND) &&
 174                    (tsh & H_BULK_REMOVE_AVPN)) {
 175                        tsh |= H_BULK_REMOVE_PARM;
 176                        kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
 177                        ret = H_PARAMETER;
 178                        break; /* Exit fail */
 179                }
 180
 181                pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
 182                if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
 183                        ret = H_FUNCTION;
 184                        break;
 185                }
 186                pte[0] = be64_to_cpu((__force __be64)pte[0]);
 187                pte[1] = be64_to_cpu((__force __be64)pte[1]);
 188
 189                /* tsl = AVPN */
 190                flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
 191
 192                if ((pte[0] & HPTE_V_VALID) == 0 ||
 193                    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
 194                    ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
 195                        tsh |= H_BULK_REMOVE_NOT_FOUND;
 196                } else {
 197                        /* Splat the pteg in (userland) hpt */
 198                        if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
 199                                ret = H_FUNCTION;
 200                                break;
 201                        }
 202
 203                        rb = compute_tlbie_rb(pte[0], pte[1],
 204                                              tsh & H_BULK_REMOVE_PTEX);
 205                        vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 206                        tsh |= H_BULK_REMOVE_SUCCESS;
 207                        tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
 208                }
 209                kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
 210        }
 211        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 212        kvmppc_set_gpr(vcpu, 3, ret);
 213
 214        return EMULATE_DONE;
 215}
 216
 217static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
 218{
 219        unsigned long flags = kvmppc_get_gpr(vcpu, 4);
 220        unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
 221        unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
 222        unsigned long rb, pteg, r, v;
 223        unsigned long pte[2];
 224        long int ret;
 225
 226        pteg = get_pteg_addr(vcpu, pte_index);
 227        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 228        ret = H_FUNCTION;
 229        if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
 230                goto done;
 231        pte[0] = be64_to_cpu((__force __be64)pte[0]);
 232        pte[1] = be64_to_cpu((__force __be64)pte[1]);
 233
 234        ret = H_NOT_FOUND;
 235        if ((pte[0] & HPTE_V_VALID) == 0 ||
 236            ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
 237                goto done;
 238
 239        v = pte[0];
 240        r = pte[1];
 241        r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
 242               HPTE_R_KEY_LO);
 243        r |= (flags << 55) & HPTE_R_PP0;
 244        r |= (flags << 48) & HPTE_R_KEY_HI;
 245        r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
 246
 247        pte[1] = r;
 248
 249        rb = compute_tlbie_rb(v, r, pte_index);
 250        vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 251        pte[0] = (__force u64)cpu_to_be64(pte[0]);
 252        pte[1] = (__force u64)cpu_to_be64(pte[1]);
 253        ret = H_FUNCTION;
 254        if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
 255                goto done;
 256        ret = H_SUCCESS;
 257
 258 done:
 259        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 260        kvmppc_set_gpr(vcpu, 3, ret);
 261
 262        return EMULATE_DONE;
 263}
 264
 265static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
 266{
 267        long rc;
 268
 269        rc = kvmppc_h_logical_ci_load(vcpu);
 270        if (rc == H_TOO_HARD)
 271                return EMULATE_FAIL;
 272        kvmppc_set_gpr(vcpu, 3, rc);
 273        return EMULATE_DONE;
 274}
 275
 276static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
 277{
 278        long rc;
 279
 280        rc = kvmppc_h_logical_ci_store(vcpu);
 281        if (rc == H_TOO_HARD)
 282                return EMULATE_FAIL;
 283        kvmppc_set_gpr(vcpu, 3, rc);
 284        return EMULATE_DONE;
 285}
 286
 287#ifdef CONFIG_SPAPR_TCE_IOMMU
 288static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
 289{
 290        unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
 291        unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
 292        unsigned long tce = kvmppc_get_gpr(vcpu, 6);
 293        long rc;
 294
 295        rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
 296        if (rc == H_TOO_HARD)
 297                return EMULATE_FAIL;
 298        kvmppc_set_gpr(vcpu, 3, rc);
 299        return EMULATE_DONE;
 300}
 301
 302static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
 303{
 304        unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
 305        unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
 306        unsigned long tce = kvmppc_get_gpr(vcpu, 6);
 307        unsigned long npages = kvmppc_get_gpr(vcpu, 7);
 308        long rc;
 309
 310        rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba,
 311                        tce, npages);
 312        if (rc == H_TOO_HARD)
 313                return EMULATE_FAIL;
 314        kvmppc_set_gpr(vcpu, 3, rc);
 315        return EMULATE_DONE;
 316}
 317
 318static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
 319{
 320        unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
 321        unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
 322        unsigned long tce_value = kvmppc_get_gpr(vcpu, 6);
 323        unsigned long npages = kvmppc_get_gpr(vcpu, 7);
 324        long rc;
 325
 326        rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages);
 327        if (rc == H_TOO_HARD)
 328                return EMULATE_FAIL;
 329        kvmppc_set_gpr(vcpu, 3, rc);
 330        return EMULATE_DONE;
 331}
 332
 333#else /* CONFIG_SPAPR_TCE_IOMMU */
 334static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
 335{
 336        return EMULATE_FAIL;
 337}
 338
 339static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
 340{
 341        return EMULATE_FAIL;
 342}
 343
 344static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
 345{
 346        return EMULATE_FAIL;
 347}
 348#endif /* CONFIG_SPAPR_TCE_IOMMU */
 349
 350static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
 351{
 352        long rc = kvmppc_xics_hcall(vcpu, cmd);
 353        kvmppc_set_gpr(vcpu, 3, rc);
 354        return EMULATE_DONE;
 355}
 356
 357int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
 358{
 359        int rc, idx;
 360
 361        if (cmd <= MAX_HCALL_OPCODE &&
 362            !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
 363                return EMULATE_FAIL;
 364
 365        switch (cmd) {
 366        case H_ENTER:
 367                return kvmppc_h_pr_enter(vcpu);
 368        case H_REMOVE:
 369                return kvmppc_h_pr_remove(vcpu);
 370        case H_PROTECT:
 371                return kvmppc_h_pr_protect(vcpu);
 372        case H_BULK_REMOVE:
 373                return kvmppc_h_pr_bulk_remove(vcpu);
 374        case H_PUT_TCE:
 375                return kvmppc_h_pr_put_tce(vcpu);
 376        case H_PUT_TCE_INDIRECT:
 377                return kvmppc_h_pr_put_tce_indirect(vcpu);
 378        case H_STUFF_TCE:
 379                return kvmppc_h_pr_stuff_tce(vcpu);
 380        case H_CEDE:
 381                kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
 382                kvm_vcpu_block(vcpu);
 383                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
 384                vcpu->stat.halt_wakeup++;
 385                return EMULATE_DONE;
 386        case H_LOGICAL_CI_LOAD:
 387                return kvmppc_h_pr_logical_ci_load(vcpu);
 388        case H_LOGICAL_CI_STORE:
 389                return kvmppc_h_pr_logical_ci_store(vcpu);
 390        case H_XIRR:
 391        case H_CPPR:
 392        case H_EOI:
 393        case H_IPI:
 394        case H_IPOLL:
 395        case H_XIRR_X:
 396                if (kvmppc_xics_enabled(vcpu))
 397                        return kvmppc_h_pr_xics_hcall(vcpu, cmd);
 398                break;
 399        case H_RTAS:
 400                if (list_empty(&vcpu->kvm->arch.rtas_tokens))
 401                        break;
 402                idx = srcu_read_lock(&vcpu->kvm->srcu);
 403                rc = kvmppc_rtas_hcall(vcpu);
 404                srcu_read_unlock(&vcpu->kvm->srcu, idx);
 405                if (rc)
 406                        break;
 407                kvmppc_set_gpr(vcpu, 3, 0);
 408                return EMULATE_DONE;
 409        }
 410
 411        return EMULATE_FAIL;
 412}
 413
 414int kvmppc_hcall_impl_pr(unsigned long cmd)
 415{
 416        switch (cmd) {
 417        case H_ENTER:
 418        case H_REMOVE:
 419        case H_PROTECT:
 420        case H_BULK_REMOVE:
 421        case H_PUT_TCE:
 422        case H_PUT_TCE_INDIRECT:
 423        case H_STUFF_TCE:
 424        case H_CEDE:
 425        case H_LOGICAL_CI_LOAD:
 426        case H_LOGICAL_CI_STORE:
 427#ifdef CONFIG_KVM_XICS
 428        case H_XIRR:
 429        case H_CPPR:
 430        case H_EOI:
 431        case H_IPI:
 432        case H_IPOLL:
 433        case H_XIRR_X:
 434#endif
 435                return 1;
 436        }
 437        return 0;
 438}
 439
 440/*
 441 * List of hcall numbers to enable by default.
 442 * For compatibility with old userspace, we enable by default
 443 * all hcalls that were implemented before the hcall-enabling
 444 * facility was added.  Note this list should not include H_RTAS.
 445 */
 446static unsigned int default_hcall_list[] = {
 447        H_ENTER,
 448        H_REMOVE,
 449        H_PROTECT,
 450        H_BULK_REMOVE,
 451        H_PUT_TCE,
 452        H_CEDE,
 453#ifdef CONFIG_KVM_XICS
 454        H_XIRR,
 455        H_CPPR,
 456        H_EOI,
 457        H_IPI,
 458        H_IPOLL,
 459        H_XIRR_X,
 460#endif
 461        0
 462};
 463
 464void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
 465{
 466        int i;
 467        unsigned int hcall;
 468
 469        for (i = 0; default_hcall_list[i]; ++i) {
 470                hcall = default_hcall_list[i];
 471                WARN_ON(!kvmppc_hcall_impl_pr(hcall));
 472                __set_bit(hcall / 4, kvm->arch.enabled_hcalls);
 473        }
 474}
 475