qemu/hw/ppc/spapr_hcall.c
<<
>>
Prefs
   1#include "sysemu/sysemu.h"
   2#include "cpu.h"
   3#include "sysemu/sysemu.h"
   4#include "helper_regs.h"
   5#include "hw/ppc/spapr.h"
   6#include "mmu-hash64.h"
   7
   8static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
   9                                     target_ulong pte_index)
  10{
  11    target_ulong rb, va_low;
  12
  13    rb = (v & ~0x7fULL) << 16; /* AVA field */
  14    va_low = pte_index >> 3;
  15    if (v & HPTE64_V_SECONDARY) {
  16        va_low = ~va_low;
  17    }
  18    /* xor vsid from AVA */
  19    if (!(v & HPTE64_V_1TB_SEG)) {
  20        va_low ^= v >> 12;
  21    } else {
  22        va_low ^= v >> 24;
  23    }
  24    va_low &= 0x7ff;
  25    if (v & HPTE64_V_LARGE) {
  26        rb |= 1;                         /* L field */
  27#if 0 /* Disable that P7 specific bit for now */
  28        if (r & 0xff000) {
  29            /* non-16MB large page, must be 64k */
  30            /* (masks depend on page size) */
  31            rb |= 0x1000;                /* page encoding in LP field */
  32            rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
  33            rb |= (va_low & 0xfe);       /* AVAL field */
  34        }
  35#endif
  36    } else {
  37        /* 4kB page */
  38        rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of AVA */
  39    }
  40    rb |= (v >> 54) & 0x300;            /* B field */
  41    return rb;
  42}
  43
  44static target_ulong h_enter(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  45                            target_ulong opcode, target_ulong *args)
  46{
  47    CPUPPCState *env = &cpu->env;
  48    target_ulong flags = args[0];
  49    target_ulong pte_index = args[1];
  50    target_ulong pteh = args[2];
  51    target_ulong ptel = args[3];
  52    target_ulong page_shift = 12;
  53    target_ulong raddr;
  54    target_ulong i;
  55    hwaddr hpte;
  56
  57    /* only handle 4k and 16M pages for now */
  58    if (pteh & HPTE64_V_LARGE) {
  59#if 0 /* We don't support 64k pages yet */
  60        if ((ptel & 0xf000) == 0x1000) {
  61            /* 64k page */
  62        } else
  63#endif
  64        if ((ptel & 0xff000) == 0) {
  65            /* 16M page */
  66            page_shift = 24;
  67            /* lowest AVA bit must be 0 for 16M pages */
  68            if (pteh & 0x80) {
  69                return H_PARAMETER;
  70            }
  71        } else {
  72            return H_PARAMETER;
  73        }
  74    }
  75
  76    raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1);
  77
  78    if (raddr < spapr->ram_limit) {
  79        /* Regular RAM - should have WIMG=0010 */
  80        if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
  81            return H_PARAMETER;
  82        }
  83    } else {
  84        /* Looks like an IO address */
  85        /* FIXME: What WIMG combinations could be sensible for IO?
  86         * For now we allow WIMG=010x, but are there others? */
  87        /* FIXME: Should we check against registered IO addresses? */
  88        if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
  89            return H_PARAMETER;
  90        }
  91    }
  92
  93    pteh &= ~0x60ULL;
  94
  95    if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
  96        return H_PARAMETER;
  97    }
  98    if (likely((flags & H_EXACT) == 0)) {
  99        pte_index &= ~7ULL;
 100        hpte = pte_index * HASH_PTE_SIZE_64;
 101        for (i = 0; ; ++i) {
 102            if (i == 8) {
 103                return H_PTEG_FULL;
 104            }
 105            if ((ppc_hash64_load_hpte0(env, hpte) & HPTE64_V_VALID) == 0) {
 106                break;
 107            }
 108            hpte += HASH_PTE_SIZE_64;
 109        }
 110    } else {
 111        i = 0;
 112        hpte = pte_index * HASH_PTE_SIZE_64;
 113        if (ppc_hash64_load_hpte0(env, hpte) & HPTE64_V_VALID) {
 114            return H_PTEG_FULL;
 115        }
 116    }
 117    ppc_hash64_store_hpte1(env, hpte, ptel);
 118    /* eieio();  FIXME: need some sort of barrier for smp? */
 119    ppc_hash64_store_hpte0(env, hpte, pteh);
 120
 121    args[0] = pte_index + i;
 122    return H_SUCCESS;
 123}
 124
 125enum {
 126    REMOVE_SUCCESS = 0,
 127    REMOVE_NOT_FOUND = 1,
 128    REMOVE_PARM = 2,
 129    REMOVE_HW = 3,
 130};
 131
 132static target_ulong remove_hpte(CPUPPCState *env, target_ulong ptex,
 133                                target_ulong avpn,
 134                                target_ulong flags,
 135                                target_ulong *vp, target_ulong *rp)
 136{
 137    hwaddr hpte;
 138    target_ulong v, r, rb;
 139
 140    if ((ptex * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 141        return REMOVE_PARM;
 142    }
 143
 144    hpte = ptex * HASH_PTE_SIZE_64;
 145
 146    v = ppc_hash64_load_hpte0(env, hpte);
 147    r = ppc_hash64_load_hpte1(env, hpte);
 148
 149    if ((v & HPTE64_V_VALID) == 0 ||
 150        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
 151        ((flags & H_ANDCOND) && (v & avpn) != 0)) {
 152        return REMOVE_NOT_FOUND;
 153    }
 154    *vp = v;
 155    *rp = r;
 156    ppc_hash64_store_hpte0(env, hpte, 0);
 157    rb = compute_tlbie_rb(v, r, ptex);
 158    ppc_tlb_invalidate_one(env, rb);
 159    return REMOVE_SUCCESS;
 160}
 161
 162static target_ulong h_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 163                             target_ulong opcode, target_ulong *args)
 164{
 165    CPUPPCState *env = &cpu->env;
 166    target_ulong flags = args[0];
 167    target_ulong pte_index = args[1];
 168    target_ulong avpn = args[2];
 169    int ret;
 170
 171    ret = remove_hpte(env, pte_index, avpn, flags,
 172                      &args[0], &args[1]);
 173
 174    switch (ret) {
 175    case REMOVE_SUCCESS:
 176        return H_SUCCESS;
 177
 178    case REMOVE_NOT_FOUND:
 179        return H_NOT_FOUND;
 180
 181    case REMOVE_PARM:
 182        return H_PARAMETER;
 183
 184    case REMOVE_HW:
 185        return H_HARDWARE;
 186    }
 187
 188    assert(0);
 189}
 190
 191#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
 192#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
 193#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
 194#define   H_BULK_REMOVE_END            0xc000000000000000ULL
 195#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
 196#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
 197#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
 198#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
 199#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
 200#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
 201#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
 202#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
 203#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
 204#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
 205#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
 206
 207#define H_BULK_REMOVE_MAX_BATCH        4
 208
 209static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 210                                  target_ulong opcode, target_ulong *args)
 211{
 212    CPUPPCState *env = &cpu->env;
 213    int i;
 214
 215    for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 216        target_ulong *tsh = &args[i*2];
 217        target_ulong tsl = args[i*2 + 1];
 218        target_ulong v, r, ret;
 219
 220        if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
 221            break;
 222        } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
 223            return H_PARAMETER;
 224        }
 225
 226        *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
 227        *tsh |= H_BULK_REMOVE_RESPONSE;
 228
 229        if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
 230            *tsh |= H_BULK_REMOVE_PARM;
 231            return H_PARAMETER;
 232        }
 233
 234        ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl,
 235                          (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
 236                          &v, &r);
 237
 238        *tsh |= ret << 60;
 239
 240        switch (ret) {
 241        case REMOVE_SUCCESS:
 242            *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
 243            break;
 244
 245        case REMOVE_PARM:
 246            return H_PARAMETER;
 247
 248        case REMOVE_HW:
 249            return H_HARDWARE;
 250        }
 251    }
 252
 253    return H_SUCCESS;
 254}
 255
 256static target_ulong h_protect(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 257                              target_ulong opcode, target_ulong *args)
 258{
 259    CPUPPCState *env = &cpu->env;
 260    target_ulong flags = args[0];
 261    target_ulong pte_index = args[1];
 262    target_ulong avpn = args[2];
 263    hwaddr hpte;
 264    target_ulong v, r, rb;
 265
 266    if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 267        return H_PARAMETER;
 268    }
 269
 270    hpte = pte_index * HASH_PTE_SIZE_64;
 271
 272    v = ppc_hash64_load_hpte0(env, hpte);
 273    r = ppc_hash64_load_hpte1(env, hpte);
 274
 275    if ((v & HPTE64_V_VALID) == 0 ||
 276        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
 277        return H_NOT_FOUND;
 278    }
 279
 280    r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
 281           HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
 282    r |= (flags << 55) & HPTE64_R_PP0;
 283    r |= (flags << 48) & HPTE64_R_KEY_HI;
 284    r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
 285    rb = compute_tlbie_rb(v, r, pte_index);
 286    ppc_hash64_store_hpte0(env, hpte, v & ~HPTE64_V_VALID);
 287    ppc_tlb_invalidate_one(env, rb);
 288    ppc_hash64_store_hpte1(env, hpte, r);
 289    /* Don't need a memory barrier, due to qemu's global lock */
 290    ppc_hash64_store_hpte0(env, hpte, v);
 291    return H_SUCCESS;
 292}
 293
 294static target_ulong h_read(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 295                           target_ulong opcode, target_ulong *args)
 296{
 297    CPUPPCState *env = &cpu->env;
 298    target_ulong flags = args[0];
 299    target_ulong pte_index = args[1];
 300    uint8_t *hpte;
 301    int i, ridx, n_entries = 1;
 302
 303    if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 304        return H_PARAMETER;
 305    }
 306
 307    if (flags & H_READ_4) {
 308        /* Clear the two low order bits */
 309        pte_index &= ~(3ULL);
 310        n_entries = 4;
 311    }
 312
 313    hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
 314
 315    for (i = 0, ridx = 0; i < n_entries; i++) {
 316        args[ridx++] = ldq_p(hpte);
 317        args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
 318        hpte += HASH_PTE_SIZE_64;
 319    }
 320
 321    return H_SUCCESS;
 322}
 323
 324static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 325                               target_ulong opcode, target_ulong *args)
 326{
 327    /* FIXME: actually implement this */
 328    return H_HARDWARE;
 329}
 330
 331#define FLAGS_REGISTER_VPA         0x0000200000000000ULL
 332#define FLAGS_REGISTER_DTL         0x0000400000000000ULL
 333#define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
 334#define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
 335#define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
 336#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
 337
 338#define VPA_MIN_SIZE           640
 339#define VPA_SIZE_OFFSET        0x4
 340#define VPA_SHARED_PROC_OFFSET 0x9
 341#define VPA_SHARED_PROC_VAL    0x2
 342
 343static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
 344{
 345    uint16_t size;
 346    uint8_t tmp;
 347
 348    if (vpa == 0) {
 349        hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
 350        return H_HARDWARE;
 351    }
 352
 353    if (vpa % env->dcache_line_size) {
 354        return H_PARAMETER;
 355    }
 356    /* FIXME: bounds check the address */
 357
 358    size = lduw_be_phys(vpa + 0x4);
 359
 360    if (size < VPA_MIN_SIZE) {
 361        return H_PARAMETER;
 362    }
 363
 364    /* VPA is not allowed to cross a page boundary */
 365    if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
 366        return H_PARAMETER;
 367    }
 368
 369    env->vpa_addr = vpa;
 370
 371    tmp = ldub_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET);
 372    tmp |= VPA_SHARED_PROC_VAL;
 373    stb_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
 374
 375    return H_SUCCESS;
 376}
 377
 378static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
 379{
 380    if (env->slb_shadow_addr) {
 381        return H_RESOURCE;
 382    }
 383
 384    if (env->dtl_addr) {
 385        return H_RESOURCE;
 386    }
 387
 388    env->vpa_addr = 0;
 389    return H_SUCCESS;
 390}
 391
 392static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
 393{
 394    uint32_t size;
 395
 396    if (addr == 0) {
 397        hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
 398        return H_HARDWARE;
 399    }
 400
 401    size = ldl_be_phys(addr + 0x4);
 402    if (size < 0x8) {
 403        return H_PARAMETER;
 404    }
 405
 406    if ((addr / 4096) != ((addr + size - 1) / 4096)) {
 407        return H_PARAMETER;
 408    }
 409
 410    if (!env->vpa_addr) {
 411        return H_RESOURCE;
 412    }
 413
 414    env->slb_shadow_addr = addr;
 415    env->slb_shadow_size = size;
 416
 417    return H_SUCCESS;
 418}
 419
 420static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
 421{
 422    env->slb_shadow_addr = 0;
 423    env->slb_shadow_size = 0;
 424    return H_SUCCESS;
 425}
 426
 427static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
 428{
 429    uint32_t size;
 430
 431    if (addr == 0) {
 432        hcall_dprintf("Can't cope with DTL at logical 0\n");
 433        return H_HARDWARE;
 434    }
 435
 436    size = ldl_be_phys(addr + 0x4);
 437
 438    if (size < 48) {
 439        return H_PARAMETER;
 440    }
 441
 442    if (!env->vpa_addr) {
 443        return H_RESOURCE;
 444    }
 445
 446    env->dtl_addr = addr;
 447    env->dtl_size = size;
 448
 449    return H_SUCCESS;
 450}
 451
 452static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
 453{
 454    env->dtl_addr = 0;
 455    env->dtl_size = 0;
 456
 457    return H_SUCCESS;
 458}
 459
 460static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 461                                   target_ulong opcode, target_ulong *args)
 462{
 463    target_ulong flags = args[0];
 464    target_ulong procno = args[1];
 465    target_ulong vpa = args[2];
 466    target_ulong ret = H_PARAMETER;
 467    CPUPPCState *tenv;
 468    CPUState *tcpu;
 469
 470    tcpu = qemu_get_cpu(procno);
 471    if (!tcpu) {
 472        return H_PARAMETER;
 473    }
 474    tenv = tcpu->env_ptr;
 475
 476    switch (flags) {
 477    case FLAGS_REGISTER_VPA:
 478        ret = register_vpa(tenv, vpa);
 479        break;
 480
 481    case FLAGS_DEREGISTER_VPA:
 482        ret = deregister_vpa(tenv, vpa);
 483        break;
 484
 485    case FLAGS_REGISTER_SLBSHADOW:
 486        ret = register_slb_shadow(tenv, vpa);
 487        break;
 488
 489    case FLAGS_DEREGISTER_SLBSHADOW:
 490        ret = deregister_slb_shadow(tenv, vpa);
 491        break;
 492
 493    case FLAGS_REGISTER_DTL:
 494        ret = register_dtl(tenv, vpa);
 495        break;
 496
 497    case FLAGS_DEREGISTER_DTL:
 498        ret = deregister_dtl(tenv, vpa);
 499        break;
 500    }
 501
 502    return ret;
 503}
 504
 505static target_ulong h_cede(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 506                           target_ulong opcode, target_ulong *args)
 507{
 508    CPUPPCState *env = &cpu->env;
 509    CPUState *cs = CPU(cpu);
 510
 511    env->msr |= (1ULL << MSR_EE);
 512    hreg_compute_hflags(env);
 513    if (!cpu_has_work(cs)) {
 514        cs->halted = 1;
 515        env->exception_index = EXCP_HLT;
 516        cs->exit_request = 1;
 517    }
 518    return H_SUCCESS;
 519}
 520
 521static target_ulong h_rtas(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 522                           target_ulong opcode, target_ulong *args)
 523{
 524    target_ulong rtas_r3 = args[0];
 525    uint32_t token = ldl_be_phys(rtas_r3);
 526    uint32_t nargs = ldl_be_phys(rtas_r3 + 4);
 527    uint32_t nret = ldl_be_phys(rtas_r3 + 8);
 528
 529    return spapr_rtas_call(spapr, token, nargs, rtas_r3 + 12,
 530                           nret, rtas_r3 + 12 + 4*nargs);
 531}
 532
 533static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 534                                   target_ulong opcode, target_ulong *args)
 535{
 536    target_ulong size = args[0];
 537    target_ulong addr = args[1];
 538
 539    switch (size) {
 540    case 1:
 541        args[0] = ldub_phys(addr);
 542        return H_SUCCESS;
 543    case 2:
 544        args[0] = lduw_phys(addr);
 545        return H_SUCCESS;
 546    case 4:
 547        args[0] = ldl_phys(addr);
 548        return H_SUCCESS;
 549    case 8:
 550        args[0] = ldq_phys(addr);
 551        return H_SUCCESS;
 552    }
 553    return H_PARAMETER;
 554}
 555
 556static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 557                                    target_ulong opcode, target_ulong *args)
 558{
 559    target_ulong size = args[0];
 560    target_ulong addr = args[1];
 561    target_ulong val  = args[2];
 562
 563    switch (size) {
 564    case 1:
 565        stb_phys(addr, val);
 566        return H_SUCCESS;
 567    case 2:
 568        stw_phys(addr, val);
 569        return H_SUCCESS;
 570    case 4:
 571        stl_phys(addr, val);
 572        return H_SUCCESS;
 573    case 8:
 574        stq_phys(addr, val);
 575        return H_SUCCESS;
 576    }
 577    return H_PARAMETER;
 578}
 579
 580static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 581                                    target_ulong opcode, target_ulong *args)
 582{
 583    target_ulong dst   = args[0]; /* Destination address */
 584    target_ulong src   = args[1]; /* Source address */
 585    target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
 586    target_ulong count = args[3]; /* Element count */
 587    target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
 588    uint64_t tmp;
 589    unsigned int mask = (1 << esize) - 1;
 590    int step = 1 << esize;
 591
 592    if (count > 0x80000000) {
 593        return H_PARAMETER;
 594    }
 595
 596    if ((dst & mask) || (src & mask) || (op > 1)) {
 597        return H_PARAMETER;
 598    }
 599
 600    if (dst >= src && dst < (src + (count << esize))) {
 601            dst = dst + ((count - 1) << esize);
 602            src = src + ((count - 1) << esize);
 603            step = -step;
 604    }
 605
 606    while (count--) {
 607        switch (esize) {
 608        case 0:
 609            tmp = ldub_phys(src);
 610            break;
 611        case 1:
 612            tmp = lduw_phys(src);
 613            break;
 614        case 2:
 615            tmp = ldl_phys(src);
 616            break;
 617        case 3:
 618            tmp = ldq_phys(src);
 619            break;
 620        default:
 621            return H_PARAMETER;
 622        }
 623        if (op == 1) {
 624            tmp = ~tmp;
 625        }
 626        switch (esize) {
 627        case 0:
 628            stb_phys(dst, tmp);
 629            break;
 630        case 1:
 631            stw_phys(dst, tmp);
 632            break;
 633        case 2:
 634            stl_phys(dst, tmp);
 635            break;
 636        case 3:
 637            stq_phys(dst, tmp);
 638            break;
 639        }
 640        dst = dst + step;
 641        src = src + step;
 642    }
 643
 644    return H_SUCCESS;
 645}
 646
 647static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 648                                   target_ulong opcode, target_ulong *args)
 649{
 650    /* Nothing to do on emulation, KVM will trap this in the kernel */
 651    return H_SUCCESS;
 652}
 653
 654static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 655                                   target_ulong opcode, target_ulong *args)
 656{
 657    /* Nothing to do on emulation, KVM will trap this in the kernel */
 658    return H_SUCCESS;
 659}
 660
 661static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
 662static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
 663
 664void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
 665{
 666    spapr_hcall_fn *slot;
 667
 668    if (opcode <= MAX_HCALL_OPCODE) {
 669        assert((opcode & 0x3) == 0);
 670
 671        slot = &papr_hypercall_table[opcode / 4];
 672    } else {
 673        assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
 674
 675        slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
 676    }
 677
 678    assert(!(*slot));
 679    *slot = fn;
 680}
 681
 682target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
 683                             target_ulong *args)
 684{
 685    if ((opcode <= MAX_HCALL_OPCODE)
 686        && ((opcode & 0x3) == 0)) {
 687        spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
 688
 689        if (fn) {
 690            return fn(cpu, spapr, opcode, args);
 691        }
 692    } else if ((opcode >= KVMPPC_HCALL_BASE) &&
 693               (opcode <= KVMPPC_HCALL_MAX)) {
 694        spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
 695
 696        if (fn) {
 697            return fn(cpu, spapr, opcode, args);
 698        }
 699    }
 700
 701    hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode);
 702    return H_FUNCTION;
 703}
 704
 705static void hypercall_register_types(void)
 706{
 707    /* hcall-pft */
 708    spapr_register_hypercall(H_ENTER, h_enter);
 709    spapr_register_hypercall(H_REMOVE, h_remove);
 710    spapr_register_hypercall(H_PROTECT, h_protect);
 711    spapr_register_hypercall(H_READ, h_read);
 712
 713    /* hcall-bulk */
 714    spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
 715
 716    /* hcall-dabr */
 717    spapr_register_hypercall(H_SET_DABR, h_set_dabr);
 718
 719    /* hcall-splpar */
 720    spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
 721    spapr_register_hypercall(H_CEDE, h_cede);
 722
 723    /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
 724     * here between the "CI" and the "CACHE" variants, they will use whatever
 725     * mapping attributes qemu is using. When using KVM, the kernel will
 726     * enforce the attributes more strongly
 727     */
 728    spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
 729    spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
 730    spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
 731    spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
 732    spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
 733    spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
 734    spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
 735
 736    /* qemu/KVM-PPC specific hcalls */
 737    spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
 738}
 739
 740type_init(hypercall_register_types)
 741