qemu/hw/spapr_hcall.c
<<
>>
Prefs
   1#include "sysemu.h"
   2#include "cpu.h"
   3#include "dyngen-exec.h"
   4#include "qemu-char.h"
   5#include "sysemu.h"
   6#include "qemu-char.h"
   7#include "helper_regs.h"
   8#include "hw/spapr.h"
   9
  10#define HPTES_PER_GROUP 8
  11
  12#define HPTE_V_SSIZE_SHIFT      62
  13#define HPTE_V_AVPN_SHIFT       7
  14#define HPTE_V_AVPN             0x3fffffffffffff80ULL
  15#define HPTE_V_AVPN_VAL(x)      (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
  16#define HPTE_V_COMPARE(x, y)    (!(((x) ^ (y)) & 0xffffffffffffff80UL))
  17#define HPTE_V_BOLTED           0x0000000000000010ULL
  18#define HPTE_V_LOCK             0x0000000000000008ULL
  19#define HPTE_V_LARGE            0x0000000000000004ULL
  20#define HPTE_V_SECONDARY        0x0000000000000002ULL
  21#define HPTE_V_VALID            0x0000000000000001ULL
  22
  23#define HPTE_R_PP0              0x8000000000000000ULL
  24#define HPTE_R_TS               0x4000000000000000ULL
  25#define HPTE_R_KEY_HI           0x3000000000000000ULL
  26#define HPTE_R_RPN_SHIFT        12
  27#define HPTE_R_RPN              0x3ffffffffffff000ULL
  28#define HPTE_R_FLAGS            0x00000000000003ffULL
  29#define HPTE_R_PP               0x0000000000000003ULL
  30#define HPTE_R_N                0x0000000000000004ULL
  31#define HPTE_R_G                0x0000000000000008ULL
  32#define HPTE_R_M                0x0000000000000010ULL
  33#define HPTE_R_I                0x0000000000000020ULL
  34#define HPTE_R_W                0x0000000000000040ULL
  35#define HPTE_R_WIMG             0x0000000000000078ULL
  36#define HPTE_R_C                0x0000000000000080ULL
  37#define HPTE_R_R                0x0000000000000100ULL
  38#define HPTE_R_KEY_LO           0x0000000000000e00ULL
  39
  40#define HPTE_V_1TB_SEG          0x4000000000000000ULL
  41#define HPTE_V_VRMA_MASK        0x4001ffffff000000ULL
  42
  43#define HPTE_V_HVLOCK           0x40ULL
  44
  45static inline int lock_hpte(void *hpte, target_ulong bits)
  46{
  47    uint64_t pteh;
  48
  49    pteh = ldq_p(hpte);
  50
  51    /* We're protected by qemu's global lock here */
  52    if (pteh & bits) {
  53        return 0;
  54    }
  55    stq_p(hpte, pteh | HPTE_V_HVLOCK);
  56    return 1;
  57}
  58
  59static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
  60                                     target_ulong pte_index)
  61{
  62    target_ulong rb, va_low;
  63
  64    rb = (v & ~0x7fULL) << 16; /* AVA field */
  65    va_low = pte_index >> 3;
  66    if (v & HPTE_V_SECONDARY) {
  67        va_low = ~va_low;
  68    }
  69    /* xor vsid from AVA */
  70    if (!(v & HPTE_V_1TB_SEG)) {
  71        va_low ^= v >> 12;
  72    } else {
  73        va_low ^= v >> 24;
  74    }
  75    va_low &= 0x7ff;
  76    if (v & HPTE_V_LARGE) {
  77        rb |= 1;                         /* L field */
  78#if 0 /* Disable that P7 specific bit for now */
  79        if (r & 0xff000) {
  80            /* non-16MB large page, must be 64k */
  81            /* (masks depend on page size) */
  82            rb |= 0x1000;                /* page encoding in LP field */
  83            rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
  84            rb |= (va_low & 0xfe);       /* AVAL field */
  85        }
  86#endif
  87    } else {
  88        /* 4kB page */
  89        rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of AVA */
  90    }
  91    rb |= (v >> 54) & 0x300;            /* B field */
  92    return rb;
  93}
  94
  95static target_ulong h_enter(CPUState *env, sPAPREnvironment *spapr,
  96                            target_ulong opcode, target_ulong *args)
  97{
  98    target_ulong flags = args[0];
  99    target_ulong pte_index = args[1];
 100    target_ulong pteh = args[2];
 101    target_ulong ptel = args[3];
 102    target_ulong page_shift = 12;
 103    target_ulong raddr;
 104    target_ulong i;
 105    uint8_t *hpte;
 106
 107    /* only handle 4k and 16M pages for now */
 108    if (pteh & HPTE_V_LARGE) {
 109#if 0 /* We don't support 64k pages yet */
 110        if ((ptel & 0xf000) == 0x1000) {
 111            /* 64k page */
 112        } else
 113#endif
 114        if ((ptel & 0xff000) == 0) {
 115            /* 16M page */
 116            page_shift = 24;
 117            /* lowest AVA bit must be 0 for 16M pages */
 118            if (pteh & 0x80) {
 119                return H_PARAMETER;
 120            }
 121        } else {
 122            return H_PARAMETER;
 123        }
 124    }
 125
 126    raddr = (ptel & HPTE_R_RPN) & ~((1ULL << page_shift) - 1);
 127
 128    if (raddr < spapr->ram_limit) {
 129        /* Regular RAM - should have WIMG=0010 */
 130        if ((ptel & HPTE_R_WIMG) != HPTE_R_M) {
 131            return H_PARAMETER;
 132        }
 133    } else {
 134        /* Looks like an IO address */
 135        /* FIXME: What WIMG combinations could be sensible for IO?
 136         * For now we allow WIMG=010x, but are there others? */
 137        /* FIXME: Should we check against registered IO addresses? */
 138        if ((ptel & (HPTE_R_W | HPTE_R_I | HPTE_R_M)) != HPTE_R_I) {
 139            return H_PARAMETER;
 140        }
 141    }
 142
 143    pteh &= ~0x60ULL;
 144
 145    if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 146        return H_PARAMETER;
 147    }
 148    if (likely((flags & H_EXACT) == 0)) {
 149        pte_index &= ~7ULL;
 150        hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
 151        for (i = 0; ; ++i) {
 152            if (i == 8) {
 153                return H_PTEG_FULL;
 154            }
 155            if (((ldq_p(hpte) & HPTE_V_VALID) == 0) &&
 156                lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) {
 157                break;
 158            }
 159            hpte += HASH_PTE_SIZE_64;
 160        }
 161    } else {
 162        i = 0;
 163        hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
 164        if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) {
 165            return H_PTEG_FULL;
 166        }
 167    }
 168    stq_p(hpte + (HASH_PTE_SIZE_64/2), ptel);
 169    /* eieio();  FIXME: need some sort of barrier for smp? */
 170    stq_p(hpte, pteh);
 171
 172    assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
 173    args[0] = pte_index + i;
 174    return H_SUCCESS;
 175}
 176
 177enum {
 178    REMOVE_SUCCESS = 0,
 179    REMOVE_NOT_FOUND = 1,
 180    REMOVE_PARM = 2,
 181    REMOVE_HW = 3,
 182};
 183
 184static target_ulong remove_hpte(CPUState *env, target_ulong ptex,
 185                                target_ulong avpn,
 186                                target_ulong flags,
 187                                target_ulong *vp, target_ulong *rp)
 188{
 189    uint8_t *hpte;
 190    target_ulong v, r, rb;
 191
 192    if ((ptex * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 193        return REMOVE_PARM;
 194    }
 195
 196    hpte = env->external_htab + (ptex * HASH_PTE_SIZE_64);
 197    while (!lock_hpte(hpte, HPTE_V_HVLOCK)) {
 198        /* We have no real concurrency in qemu soft-emulation, so we
 199         * will never actually have a contested lock */
 200        assert(0);
 201    }
 202
 203    v = ldq_p(hpte);
 204    r = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
 205
 206    if ((v & HPTE_V_VALID) == 0 ||
 207        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
 208        ((flags & H_ANDCOND) && (v & avpn) != 0)) {
 209        stq_p(hpte, v & ~HPTE_V_HVLOCK);
 210        assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
 211        return REMOVE_NOT_FOUND;
 212    }
 213    *vp = v & ~HPTE_V_HVLOCK;
 214    *rp = r;
 215    stq_p(hpte, 0);
 216    rb = compute_tlbie_rb(v, r, ptex);
 217    ppc_tlb_invalidate_one(env, rb);
 218    assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
 219    return REMOVE_SUCCESS;
 220}
 221
 222static target_ulong h_remove(CPUState *env, sPAPREnvironment *spapr,
 223                             target_ulong opcode, target_ulong *args)
 224{
 225    target_ulong flags = args[0];
 226    target_ulong pte_index = args[1];
 227    target_ulong avpn = args[2];
 228    int ret;
 229
 230    ret = remove_hpte(env, pte_index, avpn, flags,
 231                      &args[0], &args[1]);
 232
 233    switch (ret) {
 234    case REMOVE_SUCCESS:
 235        return H_SUCCESS;
 236
 237    case REMOVE_NOT_FOUND:
 238        return H_NOT_FOUND;
 239
 240    case REMOVE_PARM:
 241        return H_PARAMETER;
 242
 243    case REMOVE_HW:
 244        return H_HARDWARE;
 245    }
 246
 247    assert(0);
 248}
 249
 250#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
 251#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
 252#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
 253#define   H_BULK_REMOVE_END            0xc000000000000000ULL
 254#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
 255#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
 256#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
 257#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
 258#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
 259#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
 260#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
 261#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
 262#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
 263#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
 264#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
 265
 266#define H_BULK_REMOVE_MAX_BATCH        4
 267
 268static target_ulong h_bulk_remove(CPUState *env, sPAPREnvironment *spapr,
 269                                  target_ulong opcode, target_ulong *args)
 270{
 271    int i;
 272
 273    for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 274        target_ulong *tsh = &args[i*2];
 275        target_ulong tsl = args[i*2 + 1];
 276        target_ulong v, r, ret;
 277
 278        if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
 279            break;
 280        } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
 281            return H_PARAMETER;
 282        }
 283
 284        *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
 285        *tsh |= H_BULK_REMOVE_RESPONSE;
 286
 287        if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
 288            *tsh |= H_BULK_REMOVE_PARM;
 289            return H_PARAMETER;
 290        }
 291
 292        ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl,
 293                          (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
 294                          &v, &r);
 295
 296        *tsh |= ret << 60;
 297
 298        switch (ret) {
 299        case REMOVE_SUCCESS:
 300            *tsh |= (r & (HPTE_R_C | HPTE_R_R)) << 43;
 301            break;
 302
 303        case REMOVE_PARM:
 304            return H_PARAMETER;
 305
 306        case REMOVE_HW:
 307            return H_HARDWARE;
 308        }
 309    }
 310
 311    return H_SUCCESS;
 312}
 313
 314static target_ulong h_protect(CPUState *env, sPAPREnvironment *spapr,
 315                              target_ulong opcode, target_ulong *args)
 316{
 317    target_ulong flags = args[0];
 318    target_ulong pte_index = args[1];
 319    target_ulong avpn = args[2];
 320    uint8_t *hpte;
 321    target_ulong v, r, rb;
 322
 323    if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 324        return H_PARAMETER;
 325    }
 326
 327    hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
 328    while (!lock_hpte(hpte, HPTE_V_HVLOCK)) {
 329        /* We have no real concurrency in qemu soft-emulation, so we
 330         * will never actually have a contested lock */
 331        assert(0);
 332    }
 333
 334    v = ldq_p(hpte);
 335    r = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
 336
 337    if ((v & HPTE_V_VALID) == 0 ||
 338        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
 339        stq_p(hpte, v & ~HPTE_V_HVLOCK);
 340        assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
 341        return H_NOT_FOUND;
 342    }
 343
 344    r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
 345           HPTE_R_KEY_HI | HPTE_R_KEY_LO);
 346    r |= (flags << 55) & HPTE_R_PP0;
 347    r |= (flags << 48) & HPTE_R_KEY_HI;
 348    r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
 349    rb = compute_tlbie_rb(v, r, pte_index);
 350    stq_p(hpte, v & ~HPTE_V_VALID);
 351    ppc_tlb_invalidate_one(env, rb);
 352    stq_p(hpte + (HASH_PTE_SIZE_64/2), r);
 353    /* Don't need a memory barrier, due to qemu's global lock */
 354    stq_p(hpte, v & ~HPTE_V_HVLOCK);
 355    assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
 356    return H_SUCCESS;
 357}
 358
 359static target_ulong h_set_dabr(CPUState *env, sPAPREnvironment *spapr,
 360                               target_ulong opcode, target_ulong *args)
 361{
 362    /* FIXME: actually implement this */
 363    return H_HARDWARE;
 364}
 365
 366#define FLAGS_REGISTER_VPA         0x0000200000000000ULL
 367#define FLAGS_REGISTER_DTL         0x0000400000000000ULL
 368#define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
 369#define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
 370#define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
 371#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
 372
 373#define VPA_MIN_SIZE           640
 374#define VPA_SIZE_OFFSET        0x4
 375#define VPA_SHARED_PROC_OFFSET 0x9
 376#define VPA_SHARED_PROC_VAL    0x2
 377
 378static target_ulong register_vpa(CPUState *env, target_ulong vpa)
 379{
 380    uint16_t size;
 381    uint8_t tmp;
 382
 383    if (vpa == 0) {
 384        hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
 385        return H_HARDWARE;
 386    }
 387
 388    if (vpa % env->dcache_line_size) {
 389        return H_PARAMETER;
 390    }
 391    /* FIXME: bounds check the address */
 392
 393    size = lduw_be_phys(vpa + 0x4);
 394
 395    if (size < VPA_MIN_SIZE) {
 396        return H_PARAMETER;
 397    }
 398
 399    /* VPA is not allowed to cross a page boundary */
 400    if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
 401        return H_PARAMETER;
 402    }
 403
 404    env->vpa = vpa;
 405
 406    tmp = ldub_phys(env->vpa + VPA_SHARED_PROC_OFFSET);
 407    tmp |= VPA_SHARED_PROC_VAL;
 408    stb_phys(env->vpa + VPA_SHARED_PROC_OFFSET, tmp);
 409
 410    return H_SUCCESS;
 411}
 412
 413static target_ulong deregister_vpa(CPUState *env, target_ulong vpa)
 414{
 415    if (env->slb_shadow) {
 416        return H_RESOURCE;
 417    }
 418
 419    if (env->dispatch_trace_log) {
 420        return H_RESOURCE;
 421    }
 422
 423    env->vpa = 0;
 424    return H_SUCCESS;
 425}
 426
 427static target_ulong register_slb_shadow(CPUState *env, target_ulong addr)
 428{
 429    uint32_t size;
 430
 431    if (addr == 0) {
 432        hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
 433        return H_HARDWARE;
 434    }
 435
 436    size = ldl_be_phys(addr + 0x4);
 437    if (size < 0x8) {
 438        return H_PARAMETER;
 439    }
 440
 441    if ((addr / 4096) != ((addr + size - 1) / 4096)) {
 442        return H_PARAMETER;
 443    }
 444
 445    if (!env->vpa) {
 446        return H_RESOURCE;
 447    }
 448
 449    env->slb_shadow = addr;
 450
 451    return H_SUCCESS;
 452}
 453
 454static target_ulong deregister_slb_shadow(CPUState *env, target_ulong addr)
 455{
 456    env->slb_shadow = 0;
 457    return H_SUCCESS;
 458}
 459
 460static target_ulong register_dtl(CPUState *env, target_ulong addr)
 461{
 462    uint32_t size;
 463
 464    if (addr == 0) {
 465        hcall_dprintf("Can't cope with DTL at logical 0\n");
 466        return H_HARDWARE;
 467    }
 468
 469    size = ldl_be_phys(addr + 0x4);
 470
 471    if (size < 48) {
 472        return H_PARAMETER;
 473    }
 474
 475    if (!env->vpa) {
 476        return H_RESOURCE;
 477    }
 478
 479    env->dispatch_trace_log = addr;
 480    env->dtl_size = size;
 481
 482    return H_SUCCESS;
 483}
 484
 485static target_ulong deregister_dtl(CPUState *emv, target_ulong addr)
 486{
 487    env->dispatch_trace_log = 0;
 488    env->dtl_size = 0;
 489
 490    return H_SUCCESS;
 491}
 492
 493static target_ulong h_register_vpa(CPUState *env, sPAPREnvironment *spapr,
 494                                   target_ulong opcode, target_ulong *args)
 495{
 496    target_ulong flags = args[0];
 497    target_ulong procno = args[1];
 498    target_ulong vpa = args[2];
 499    target_ulong ret = H_PARAMETER;
 500    CPUState *tenv;
 501
 502    for (tenv = first_cpu; tenv; tenv = tenv->next_cpu) {
 503        if (tenv->cpu_index == procno) {
 504            break;
 505        }
 506    }
 507
 508    if (!tenv) {
 509        return H_PARAMETER;
 510    }
 511
 512    switch (flags) {
 513    case FLAGS_REGISTER_VPA:
 514        ret = register_vpa(tenv, vpa);
 515        break;
 516
 517    case FLAGS_DEREGISTER_VPA:
 518        ret = deregister_vpa(tenv, vpa);
 519        break;
 520
 521    case FLAGS_REGISTER_SLBSHADOW:
 522        ret = register_slb_shadow(tenv, vpa);
 523        break;
 524
 525    case FLAGS_DEREGISTER_SLBSHADOW:
 526        ret = deregister_slb_shadow(tenv, vpa);
 527        break;
 528
 529    case FLAGS_REGISTER_DTL:
 530        ret = register_dtl(tenv, vpa);
 531        break;
 532
 533    case FLAGS_DEREGISTER_DTL:
 534        ret = deregister_dtl(tenv, vpa);
 535        break;
 536    }
 537
 538    return ret;
 539}
 540
 541static target_ulong h_cede(CPUState *env, sPAPREnvironment *spapr,
 542                           target_ulong opcode, target_ulong *args)
 543{
 544    env->msr |= (1ULL << MSR_EE);
 545    hreg_compute_hflags(env);
 546    if (!cpu_has_work(env)) {
 547        env->halted = 1;
 548    }
 549    return H_SUCCESS;
 550}
 551
 552static target_ulong h_rtas(CPUState *env, sPAPREnvironment *spapr,
 553                           target_ulong opcode, target_ulong *args)
 554{
 555    target_ulong rtas_r3 = args[0];
 556    uint32_t token = ldl_be_phys(rtas_r3);
 557    uint32_t nargs = ldl_be_phys(rtas_r3 + 4);
 558    uint32_t nret = ldl_be_phys(rtas_r3 + 8);
 559
 560    return spapr_rtas_call(spapr, token, nargs, rtas_r3 + 12,
 561                           nret, rtas_r3 + 12 + 4*nargs);
 562}
 563
 564static target_ulong h_logical_load(CPUState *env, sPAPREnvironment *spapr,
 565                                   target_ulong opcode, target_ulong *args)
 566{
 567    target_ulong size = args[0];
 568    target_ulong addr = args[1];
 569
 570    switch (size) {
 571    case 1:
 572        args[0] = ldub_phys(addr);
 573        return H_SUCCESS;
 574    case 2:
 575        args[0] = lduw_phys(addr);
 576        return H_SUCCESS;
 577    case 4:
 578        args[0] = ldl_phys(addr);
 579        return H_SUCCESS;
 580    case 8:
 581        args[0] = ldq_phys(addr);
 582        return H_SUCCESS;
 583    }
 584    return H_PARAMETER;
 585}
 586
 587static target_ulong h_logical_store(CPUState *env, sPAPREnvironment *spapr,
 588                                    target_ulong opcode, target_ulong *args)
 589{
 590    target_ulong size = args[0];
 591    target_ulong addr = args[1];
 592    target_ulong val  = args[2];
 593
 594    switch (size) {
 595    case 1:
 596        stb_phys(addr, val);
 597        return H_SUCCESS;
 598    case 2:
 599        stw_phys(addr, val);
 600        return H_SUCCESS;
 601    case 4:
 602        stl_phys(addr, val);
 603        return H_SUCCESS;
 604    case 8:
 605        stq_phys(addr, val);
 606        return H_SUCCESS;
 607    }
 608    return H_PARAMETER;
 609}
 610
 611static target_ulong h_logical_icbi(CPUState *env, sPAPREnvironment *spapr,
 612                                   target_ulong opcode, target_ulong *args)
 613{
 614    /* Nothing to do on emulation, KVM will trap this in the kernel */
 615    return H_SUCCESS;
 616}
 617
 618static target_ulong h_logical_dcbf(CPUState *env, sPAPREnvironment *spapr,
 619                                   target_ulong opcode, target_ulong *args)
 620{
 621    /* Nothing to do on emulation, KVM will trap this in the kernel */
 622    return H_SUCCESS;
 623}
 624
 625static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
 626static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
 627
 628void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
 629{
 630    spapr_hcall_fn *slot;
 631
 632    if (opcode <= MAX_HCALL_OPCODE) {
 633        assert((opcode & 0x3) == 0);
 634
 635        slot = &papr_hypercall_table[opcode / 4];
 636    } else {
 637        assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
 638
 639
 640        slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
 641    }
 642
 643    assert(!(*slot) || (fn == *slot));
 644    *slot = fn;
 645}
 646
 647target_ulong spapr_hypercall(CPUState *env, target_ulong opcode,
 648                             target_ulong *args)
 649{
 650    if (msr_pr) {
 651        hcall_dprintf("Hypercall made with MSR[PR]=1\n");
 652        return H_PRIVILEGE;
 653    }
 654
 655    if ((opcode <= MAX_HCALL_OPCODE)
 656        && ((opcode & 0x3) == 0)) {
 657        spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
 658
 659        if (fn) {
 660            return fn(env, spapr, opcode, args);
 661        }
 662    } else if ((opcode >= KVMPPC_HCALL_BASE) &&
 663               (opcode <= KVMPPC_HCALL_MAX)) {
 664        spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
 665
 666        if (fn) {
 667            return fn(env, spapr, opcode, args);
 668        }
 669    }
 670
 671    hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode);
 672    return H_FUNCTION;
 673}
 674
 675static void hypercall_init(void)
 676{
 677    /* hcall-pft */
 678    spapr_register_hypercall(H_ENTER, h_enter);
 679    spapr_register_hypercall(H_REMOVE, h_remove);
 680    spapr_register_hypercall(H_PROTECT, h_protect);
 681
 682    /* hcall-bulk */
 683    spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
 684
 685    /* hcall-dabr */
 686    spapr_register_hypercall(H_SET_DABR, h_set_dabr);
 687
 688    /* hcall-splpar */
 689    spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
 690    spapr_register_hypercall(H_CEDE, h_cede);
 691
 692    /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
 693     * here between the "CI" and the "CACHE" variants, they will use whatever
 694     * mapping attributes qemu is using. When using KVM, the kernel will
 695     * enforce the attributes more strongly
 696     */
 697    spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
 698    spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
 699    spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
 700    spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
 701    spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
 702    spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
 703
 704    /* qemu/KVM-PPC specific hcalls */
 705    spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
 706}
 707device_init(hypercall_init);
 708