qemu/hw/ppc/spapr_hcall.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "qapi/error.h"
   3#include "sysemu/sysemu.h"
   4#include "cpu.h"
   5#include "helper_regs.h"
   6#include "hw/ppc/spapr.h"
   7#include "mmu-hash64.h"
   8#include "cpu-models.h"
   9#include "trace.h"
  10#include "kvm_ppc.h"
  11
  12struct SPRSyncState {
  13    CPUState *cs;
  14    int spr;
  15    target_ulong value;
  16    target_ulong mask;
  17};
  18
  19static void do_spr_sync(void *arg)
  20{
  21    struct SPRSyncState *s = arg;
  22    PowerPCCPU *cpu = POWERPC_CPU(s->cs);
  23    CPUPPCState *env = &cpu->env;
  24
  25    cpu_synchronize_state(s->cs);
  26    env->spr[s->spr] &= ~s->mask;
  27    env->spr[s->spr] |= s->value;
  28}
  29
  30static void set_spr(CPUState *cs, int spr, target_ulong value,
  31                    target_ulong mask)
  32{
  33    struct SPRSyncState s = {
  34        .cs = cs,
  35        .spr = spr,
  36        .value = value,
  37        .mask = mask
  38    };
  39    run_on_cpu(cs, do_spr_sync, &s);
  40}
  41
  42static bool has_spr(PowerPCCPU *cpu, int spr)
  43{
  44    /* We can test whether the SPR is defined by checking for a valid name */
  45    return cpu->env.spr_cb[spr].name != NULL;
  46}
  47
  48static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
  49{
  50    /*
  51     * hash value/pteg group index is normalized by htab_mask
  52     */
  53    if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
  54        return false;
  55    }
  56    return true;
  57}
  58
  59static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr)
  60{
  61    MachineState *machine = MACHINE(spapr);
  62    MemoryHotplugState *hpms = &spapr->hotplug_memory;
  63
  64    if (addr < machine->ram_size) {
  65        return true;
  66    }
  67    if ((addr >= hpms->base)
  68        && ((addr - hpms->base) < memory_region_size(&hpms->mr))) {
  69        return true;
  70    }
  71
  72    return false;
  73}
  74
  75static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
  76                            target_ulong opcode, target_ulong *args)
  77{
  78    CPUPPCState *env = &cpu->env;
  79    target_ulong flags = args[0];
  80    target_ulong pte_index = args[1];
  81    target_ulong pteh = args[2];
  82    target_ulong ptel = args[3];
  83    unsigned apshift, spshift;
  84    target_ulong raddr;
  85    target_ulong index;
  86    uint64_t token;
  87
  88    apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift);
  89    if (!apshift) {
  90        /* Bad page size encoding */
  91        return H_PARAMETER;
  92    }
  93
  94    raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
  95
  96    if (is_ram_address(spapr, raddr)) {
  97        /* Regular RAM - should have WIMG=0010 */
  98        if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
  99            return H_PARAMETER;
 100        }
 101    } else {
 102        /* Looks like an IO address */
 103        /* FIXME: What WIMG combinations could be sensible for IO?
 104         * For now we allow WIMG=010x, but are there others? */
 105        /* FIXME: Should we check against registered IO addresses? */
 106        if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
 107            return H_PARAMETER;
 108        }
 109    }
 110
 111    pteh &= ~0x60ULL;
 112
 113    if (!valid_pte_index(env, pte_index)) {
 114        return H_PARAMETER;
 115    }
 116
 117    index = 0;
 118    if (likely((flags & H_EXACT) == 0)) {
 119        pte_index &= ~7ULL;
 120        token = ppc_hash64_start_access(cpu, pte_index);
 121        for (; index < 8; index++) {
 122            if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) {
 123                break;
 124            }
 125        }
 126        ppc_hash64_stop_access(cpu, token);
 127        if (index == 8) {
 128            return H_PTEG_FULL;
 129        }
 130    } else {
 131        token = ppc_hash64_start_access(cpu, pte_index);
 132        if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
 133            ppc_hash64_stop_access(cpu, token);
 134            return H_PTEG_FULL;
 135        }
 136        ppc_hash64_stop_access(cpu, token);
 137    }
 138
 139    ppc_hash64_store_hpte(cpu, pte_index + index,
 140                          pteh | HPTE64_V_HPTE_DIRTY, ptel);
 141
 142    args[0] = pte_index + index;
 143    return H_SUCCESS;
 144}
 145
 146typedef enum {
 147    REMOVE_SUCCESS = 0,
 148    REMOVE_NOT_FOUND = 1,
 149    REMOVE_PARM = 2,
 150    REMOVE_HW = 3,
 151} RemoveResult;
 152
 153static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
 154                                target_ulong avpn,
 155                                target_ulong flags,
 156                                target_ulong *vp, target_ulong *rp)
 157{
 158    CPUPPCState *env = &cpu->env;
 159    uint64_t token;
 160    target_ulong v, r;
 161
 162    if (!valid_pte_index(env, ptex)) {
 163        return REMOVE_PARM;
 164    }
 165
 166    token = ppc_hash64_start_access(cpu, ptex);
 167    v = ppc_hash64_load_hpte0(cpu, token, 0);
 168    r = ppc_hash64_load_hpte1(cpu, token, 0);
 169    ppc_hash64_stop_access(cpu, token);
 170
 171    if ((v & HPTE64_V_VALID) == 0 ||
 172        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
 173        ((flags & H_ANDCOND) && (v & avpn) != 0)) {
 174        return REMOVE_NOT_FOUND;
 175    }
 176    *vp = v;
 177    *rp = r;
 178    ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
 179    ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
 180    return REMOVE_SUCCESS;
 181}
 182
 183static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 184                             target_ulong opcode, target_ulong *args)
 185{
 186    target_ulong flags = args[0];
 187    target_ulong pte_index = args[1];
 188    target_ulong avpn = args[2];
 189    RemoveResult ret;
 190
 191    ret = remove_hpte(cpu, pte_index, avpn, flags,
 192                      &args[0], &args[1]);
 193
 194    switch (ret) {
 195    case REMOVE_SUCCESS:
 196        return H_SUCCESS;
 197
 198    case REMOVE_NOT_FOUND:
 199        return H_NOT_FOUND;
 200
 201    case REMOVE_PARM:
 202        return H_PARAMETER;
 203
 204    case REMOVE_HW:
 205        return H_HARDWARE;
 206    }
 207
 208    g_assert_not_reached();
 209}
 210
 211#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
 212#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
 213#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
 214#define   H_BULK_REMOVE_END            0xc000000000000000ULL
 215#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
 216#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
 217#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
 218#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
 219#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
 220#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
 221#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
 222#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
 223#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
 224#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
 225#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
 226
 227#define H_BULK_REMOVE_MAX_BATCH        4
 228
 229static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 230                                  target_ulong opcode, target_ulong *args)
 231{
 232    int i;
 233
 234    for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 235        target_ulong *tsh = &args[i*2];
 236        target_ulong tsl = args[i*2 + 1];
 237        target_ulong v, r, ret;
 238
 239        if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
 240            break;
 241        } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
 242            return H_PARAMETER;
 243        }
 244
 245        *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
 246        *tsh |= H_BULK_REMOVE_RESPONSE;
 247
 248        if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
 249            *tsh |= H_BULK_REMOVE_PARM;
 250            return H_PARAMETER;
 251        }
 252
 253        ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
 254                          (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
 255                          &v, &r);
 256
 257        *tsh |= ret << 60;
 258
 259        switch (ret) {
 260        case REMOVE_SUCCESS:
 261            *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
 262            break;
 263
 264        case REMOVE_PARM:
 265            return H_PARAMETER;
 266
 267        case REMOVE_HW:
 268            return H_HARDWARE;
 269        }
 270    }
 271
 272    return H_SUCCESS;
 273}
 274
 275static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 276                              target_ulong opcode, target_ulong *args)
 277{
 278    CPUPPCState *env = &cpu->env;
 279    target_ulong flags = args[0];
 280    target_ulong pte_index = args[1];
 281    target_ulong avpn = args[2];
 282    uint64_t token;
 283    target_ulong v, r;
 284
 285    if (!valid_pte_index(env, pte_index)) {
 286        return H_PARAMETER;
 287    }
 288
 289    token = ppc_hash64_start_access(cpu, pte_index);
 290    v = ppc_hash64_load_hpte0(cpu, token, 0);
 291    r = ppc_hash64_load_hpte1(cpu, token, 0);
 292    ppc_hash64_stop_access(cpu, token);
 293
 294    if ((v & HPTE64_V_VALID) == 0 ||
 295        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
 296        return H_NOT_FOUND;
 297    }
 298
 299    r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
 300           HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
 301    r |= (flags << 55) & HPTE64_R_PP0;
 302    r |= (flags << 48) & HPTE64_R_KEY_HI;
 303    r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
 304    ppc_hash64_store_hpte(cpu, pte_index,
 305                          (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
 306    ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
 307    /* Don't need a memory barrier, due to qemu's global lock */
 308    ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
 309    return H_SUCCESS;
 310}
 311
 312static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 313                           target_ulong opcode, target_ulong *args)
 314{
 315    CPUPPCState *env = &cpu->env;
 316    target_ulong flags = args[0];
 317    target_ulong pte_index = args[1];
 318    uint8_t *hpte;
 319    int i, ridx, n_entries = 1;
 320
 321    if (!valid_pte_index(env, pte_index)) {
 322        return H_PARAMETER;
 323    }
 324
 325    if (flags & H_READ_4) {
 326        /* Clear the two low order bits */
 327        pte_index &= ~(3ULL);
 328        n_entries = 4;
 329    }
 330
 331    hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
 332
 333    for (i = 0, ridx = 0; i < n_entries; i++) {
 334        args[ridx++] = ldq_p(hpte);
 335        args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
 336        hpte += HASH_PTE_SIZE_64;
 337    }
 338
 339    return H_SUCCESS;
 340}
 341
 342static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 343                                target_ulong opcode, target_ulong *args)
 344{
 345    cpu_synchronize_state(CPU(cpu));
 346    cpu->env.spr[SPR_SPRG0] = args[0];
 347
 348    return H_SUCCESS;
 349}
 350
 351static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 352                               target_ulong opcode, target_ulong *args)
 353{
 354    if (!has_spr(cpu, SPR_DABR)) {
 355        return H_HARDWARE;              /* DABR register not available */
 356    }
 357    cpu_synchronize_state(CPU(cpu));
 358
 359    if (has_spr(cpu, SPR_DABRX)) {
 360        cpu->env.spr[SPR_DABRX] = 0x3;  /* Use Problem and Privileged state */
 361    } else if (!(args[0] & 0x4)) {      /* Breakpoint Translation set? */
 362        return H_RESERVED_DABR;
 363    }
 364
 365    cpu->env.spr[SPR_DABR] = args[0];
 366    return H_SUCCESS;
 367}
 368
 369static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 370                                target_ulong opcode, target_ulong *args)
 371{
 372    target_ulong dabrx = args[1];
 373
 374    if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
 375        return H_HARDWARE;
 376    }
 377
 378    if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
 379        || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
 380        return H_PARAMETER;
 381    }
 382
 383    cpu_synchronize_state(CPU(cpu));
 384    cpu->env.spr[SPR_DABRX] = dabrx;
 385    cpu->env.spr[SPR_DABR] = args[0];
 386
 387    return H_SUCCESS;
 388}
 389
 390static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 391                                target_ulong opcode, target_ulong *args)
 392{
 393    target_ulong flags = args[0];
 394    hwaddr dst = args[1];
 395    hwaddr src = args[2];
 396    hwaddr len = TARGET_PAGE_SIZE;
 397    uint8_t *pdst, *psrc;
 398    target_long ret = H_SUCCESS;
 399
 400    if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
 401                  | H_COPY_PAGE | H_ZERO_PAGE)) {
 402        qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
 403                      flags);
 404        return H_PARAMETER;
 405    }
 406
 407    /* Map-in destination */
 408    if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
 409        return H_PARAMETER;
 410    }
 411    pdst = cpu_physical_memory_map(dst, &len, 1);
 412    if (!pdst || len != TARGET_PAGE_SIZE) {
 413        return H_PARAMETER;
 414    }
 415
 416    if (flags & H_COPY_PAGE) {
 417        /* Map-in source, copy to destination, and unmap source again */
 418        if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
 419            ret = H_PARAMETER;
 420            goto unmap_out;
 421        }
 422        psrc = cpu_physical_memory_map(src, &len, 0);
 423        if (!psrc || len != TARGET_PAGE_SIZE) {
 424            ret = H_PARAMETER;
 425            goto unmap_out;
 426        }
 427        memcpy(pdst, psrc, len);
 428        cpu_physical_memory_unmap(psrc, len, 0, len);
 429    } else if (flags & H_ZERO_PAGE) {
 430        memset(pdst, 0, len);          /* Just clear the destination page */
 431    }
 432
 433    if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
 434        kvmppc_dcbst_range(cpu, pdst, len);
 435    }
 436    if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
 437        if (kvm_enabled()) {
 438            kvmppc_icbi_range(cpu, pdst, len);
 439        } else {
 440            tb_flush(CPU(cpu));
 441        }
 442    }
 443
 444unmap_out:
 445    cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
 446    return ret;
 447}
 448
 449#define FLAGS_REGISTER_VPA         0x0000200000000000ULL
 450#define FLAGS_REGISTER_DTL         0x0000400000000000ULL
 451#define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
 452#define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
 453#define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
 454#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
 455
 456#define VPA_MIN_SIZE           640
 457#define VPA_SIZE_OFFSET        0x4
 458#define VPA_SHARED_PROC_OFFSET 0x9
 459#define VPA_SHARED_PROC_VAL    0x2
 460
 461static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
 462{
 463    CPUState *cs = CPU(ppc_env_get_cpu(env));
 464    uint16_t size;
 465    uint8_t tmp;
 466
 467    if (vpa == 0) {
 468        hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
 469        return H_HARDWARE;
 470    }
 471
 472    if (vpa % env->dcache_line_size) {
 473        return H_PARAMETER;
 474    }
 475    /* FIXME: bounds check the address */
 476
 477    size = lduw_be_phys(cs->as, vpa + 0x4);
 478
 479    if (size < VPA_MIN_SIZE) {
 480        return H_PARAMETER;
 481    }
 482
 483    /* VPA is not allowed to cross a page boundary */
 484    if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
 485        return H_PARAMETER;
 486    }
 487
 488    env->vpa_addr = vpa;
 489
 490    tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
 491    tmp |= VPA_SHARED_PROC_VAL;
 492    stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
 493
 494    return H_SUCCESS;
 495}
 496
 497static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
 498{
 499    if (env->slb_shadow_addr) {
 500        return H_RESOURCE;
 501    }
 502
 503    if (env->dtl_addr) {
 504        return H_RESOURCE;
 505    }
 506
 507    env->vpa_addr = 0;
 508    return H_SUCCESS;
 509}
 510
 511static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
 512{
 513    CPUState *cs = CPU(ppc_env_get_cpu(env));
 514    uint32_t size;
 515
 516    if (addr == 0) {
 517        hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
 518        return H_HARDWARE;
 519    }
 520
 521    size = ldl_be_phys(cs->as, addr + 0x4);
 522    if (size < 0x8) {
 523        return H_PARAMETER;
 524    }
 525
 526    if ((addr / 4096) != ((addr + size - 1) / 4096)) {
 527        return H_PARAMETER;
 528    }
 529
 530    if (!env->vpa_addr) {
 531        return H_RESOURCE;
 532    }
 533
 534    env->slb_shadow_addr = addr;
 535    env->slb_shadow_size = size;
 536
 537    return H_SUCCESS;
 538}
 539
 540static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
 541{
 542    env->slb_shadow_addr = 0;
 543    env->slb_shadow_size = 0;
 544    return H_SUCCESS;
 545}
 546
 547static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
 548{
 549    CPUState *cs = CPU(ppc_env_get_cpu(env));
 550    uint32_t size;
 551
 552    if (addr == 0) {
 553        hcall_dprintf("Can't cope with DTL at logical 0\n");
 554        return H_HARDWARE;
 555    }
 556
 557    size = ldl_be_phys(cs->as, addr + 0x4);
 558
 559    if (size < 48) {
 560        return H_PARAMETER;
 561    }
 562
 563    if (!env->vpa_addr) {
 564        return H_RESOURCE;
 565    }
 566
 567    env->dtl_addr = addr;
 568    env->dtl_size = size;
 569
 570    return H_SUCCESS;
 571}
 572
 573static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
 574{
 575    env->dtl_addr = 0;
 576    env->dtl_size = 0;
 577
 578    return H_SUCCESS;
 579}
 580
 581static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 582                                   target_ulong opcode, target_ulong *args)
 583{
 584    target_ulong flags = args[0];
 585    target_ulong procno = args[1];
 586    target_ulong vpa = args[2];
 587    target_ulong ret = H_PARAMETER;
 588    CPUPPCState *tenv;
 589    PowerPCCPU *tcpu;
 590
 591    tcpu = ppc_get_vcpu_by_dt_id(procno);
 592    if (!tcpu) {
 593        return H_PARAMETER;
 594    }
 595    tenv = &tcpu->env;
 596
 597    switch (flags) {
 598    case FLAGS_REGISTER_VPA:
 599        ret = register_vpa(tenv, vpa);
 600        break;
 601
 602    case FLAGS_DEREGISTER_VPA:
 603        ret = deregister_vpa(tenv, vpa);
 604        break;
 605
 606    case FLAGS_REGISTER_SLBSHADOW:
 607        ret = register_slb_shadow(tenv, vpa);
 608        break;
 609
 610    case FLAGS_DEREGISTER_SLBSHADOW:
 611        ret = deregister_slb_shadow(tenv, vpa);
 612        break;
 613
 614    case FLAGS_REGISTER_DTL:
 615        ret = register_dtl(tenv, vpa);
 616        break;
 617
 618    case FLAGS_DEREGISTER_DTL:
 619        ret = deregister_dtl(tenv, vpa);
 620        break;
 621    }
 622
 623    return ret;
 624}
 625
 626static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 627                           target_ulong opcode, target_ulong *args)
 628{
 629    CPUPPCState *env = &cpu->env;
 630    CPUState *cs = CPU(cpu);
 631
 632    env->msr |= (1ULL << MSR_EE);
 633    hreg_compute_hflags(env);
 634    if (!cpu_has_work(cs)) {
 635        cs->halted = 1;
 636        cs->exception_index = EXCP_HLT;
 637        cs->exit_request = 1;
 638    }
 639    return H_SUCCESS;
 640}
 641
 642static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 643                           target_ulong opcode, target_ulong *args)
 644{
 645    target_ulong rtas_r3 = args[0];
 646    uint32_t token = rtas_ld(rtas_r3, 0);
 647    uint32_t nargs = rtas_ld(rtas_r3, 1);
 648    uint32_t nret = rtas_ld(rtas_r3, 2);
 649
 650    return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
 651                           nret, rtas_r3 + 12 + 4*nargs);
 652}
 653
 654static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 655                                   target_ulong opcode, target_ulong *args)
 656{
 657    CPUState *cs = CPU(cpu);
 658    target_ulong size = args[0];
 659    target_ulong addr = args[1];
 660
 661    switch (size) {
 662    case 1:
 663        args[0] = ldub_phys(cs->as, addr);
 664        return H_SUCCESS;
 665    case 2:
 666        args[0] = lduw_phys(cs->as, addr);
 667        return H_SUCCESS;
 668    case 4:
 669        args[0] = ldl_phys(cs->as, addr);
 670        return H_SUCCESS;
 671    case 8:
 672        args[0] = ldq_phys(cs->as, addr);
 673        return H_SUCCESS;
 674    }
 675    return H_PARAMETER;
 676}
 677
 678static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 679                                    target_ulong opcode, target_ulong *args)
 680{
 681    CPUState *cs = CPU(cpu);
 682
 683    target_ulong size = args[0];
 684    target_ulong addr = args[1];
 685    target_ulong val  = args[2];
 686
 687    switch (size) {
 688    case 1:
 689        stb_phys(cs->as, addr, val);
 690        return H_SUCCESS;
 691    case 2:
 692        stw_phys(cs->as, addr, val);
 693        return H_SUCCESS;
 694    case 4:
 695        stl_phys(cs->as, addr, val);
 696        return H_SUCCESS;
 697    case 8:
 698        stq_phys(cs->as, addr, val);
 699        return H_SUCCESS;
 700    }
 701    return H_PARAMETER;
 702}
 703
 704static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 705                                    target_ulong opcode, target_ulong *args)
 706{
 707    CPUState *cs = CPU(cpu);
 708
 709    target_ulong dst   = args[0]; /* Destination address */
 710    target_ulong src   = args[1]; /* Source address */
 711    target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
 712    target_ulong count = args[3]; /* Element count */
 713    target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
 714    uint64_t tmp;
 715    unsigned int mask = (1 << esize) - 1;
 716    int step = 1 << esize;
 717
 718    if (count > 0x80000000) {
 719        return H_PARAMETER;
 720    }
 721
 722    if ((dst & mask) || (src & mask) || (op > 1)) {
 723        return H_PARAMETER;
 724    }
 725
 726    if (dst >= src && dst < (src + (count << esize))) {
 727            dst = dst + ((count - 1) << esize);
 728            src = src + ((count - 1) << esize);
 729            step = -step;
 730    }
 731
 732    while (count--) {
 733        switch (esize) {
 734        case 0:
 735            tmp = ldub_phys(cs->as, src);
 736            break;
 737        case 1:
 738            tmp = lduw_phys(cs->as, src);
 739            break;
 740        case 2:
 741            tmp = ldl_phys(cs->as, src);
 742            break;
 743        case 3:
 744            tmp = ldq_phys(cs->as, src);
 745            break;
 746        default:
 747            return H_PARAMETER;
 748        }
 749        if (op == 1) {
 750            tmp = ~tmp;
 751        }
 752        switch (esize) {
 753        case 0:
 754            stb_phys(cs->as, dst, tmp);
 755            break;
 756        case 1:
 757            stw_phys(cs->as, dst, tmp);
 758            break;
 759        case 2:
 760            stl_phys(cs->as, dst, tmp);
 761            break;
 762        case 3:
 763            stq_phys(cs->as, dst, tmp);
 764            break;
 765        }
 766        dst = dst + step;
 767        src = src + step;
 768    }
 769
 770    return H_SUCCESS;
 771}
 772
 773static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 774                                   target_ulong opcode, target_ulong *args)
 775{
 776    /* Nothing to do on emulation, KVM will trap this in the kernel */
 777    return H_SUCCESS;
 778}
 779
 780static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 781                                   target_ulong opcode, target_ulong *args)
 782{
 783    /* Nothing to do on emulation, KVM will trap this in the kernel */
 784    return H_SUCCESS;
 785}
 786
 787static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
 788                                           target_ulong mflags,
 789                                           target_ulong value1,
 790                                           target_ulong value2)
 791{
 792    CPUState *cs;
 793
 794    if (value1) {
 795        return H_P3;
 796    }
 797    if (value2) {
 798        return H_P4;
 799    }
 800
 801    switch (mflags) {
 802    case H_SET_MODE_ENDIAN_BIG:
 803        CPU_FOREACH(cs) {
 804            set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
 805        }
 806        spapr_pci_switch_vga(true);
 807        return H_SUCCESS;
 808
 809    case H_SET_MODE_ENDIAN_LITTLE:
 810        CPU_FOREACH(cs) {
 811            set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
 812        }
 813        spapr_pci_switch_vga(false);
 814        return H_SUCCESS;
 815    }
 816
 817    return H_UNSUPPORTED_FLAG;
 818}
 819
 820static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
 821                                                        target_ulong mflags,
 822                                                        target_ulong value1,
 823                                                        target_ulong value2)
 824{
 825    CPUState *cs;
 826    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 827
 828    if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
 829        return H_P2;
 830    }
 831    if (value1) {
 832        return H_P3;
 833    }
 834    if (value2) {
 835        return H_P4;
 836    }
 837
 838    if (mflags == AIL_RESERVED) {
 839        return H_UNSUPPORTED_FLAG;
 840    }
 841
 842    CPU_FOREACH(cs) {
 843        set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
 844    }
 845
 846    return H_SUCCESS;
 847}
 848
 849static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
 850                               target_ulong opcode, target_ulong *args)
 851{
 852    target_ulong resource = args[1];
 853    target_ulong ret = H_P2;
 854
 855    switch (resource) {
 856    case H_SET_MODE_RESOURCE_LE:
 857        ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
 858        break;
 859    case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
 860        ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
 861                                                  args[2], args[3]);
 862        break;
 863    }
 864
 865    return ret;
 866}
 867
 868/*
 869 * Return the offset to the requested option vector @vector in the
 870 * option vector table @table.
 871 */
 872static target_ulong cas_get_option_vector(int vector, target_ulong table)
 873{
 874    int i;
 875    char nr_vectors, nr_entries;
 876
 877    if (!table) {
 878        return 0;
 879    }
 880
 881    nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1;
 882    if (!vector || vector > nr_vectors) {
 883        return 0;
 884    }
 885    table++; /* skip nr option vectors */
 886
 887    for (i = 0; i < vector - 1; i++) {
 888        nr_entries = ldl_phys(&address_space_memory, table) >> 24;
 889        table += nr_entries + 2;
 890    }
 891    return table;
 892}
 893
 894typedef struct {
 895    PowerPCCPU *cpu;
 896    uint32_t cpu_version;
 897    Error *err;
 898} SetCompatState;
 899
 900static void do_set_compat(void *arg)
 901{
 902    SetCompatState *s = arg;
 903
 904    cpu_synchronize_state(CPU(s->cpu));
 905    ppc_set_compat(s->cpu, s->cpu_version, &s->err);
 906}
 907
 908#define get_compat_level(cpuver) ( \
 909    ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
 910    ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
 911    ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
 912    ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
 913
 914#define OV5_DRCONF_MEMORY 0x20
 915
 916static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
 917                                                  sPAPRMachineState *spapr,
 918                                                  target_ulong opcode,
 919                                                  target_ulong *args)
 920{
 921    target_ulong list = ppc64_phys_to_real(args[0]);
 922    target_ulong ov_table, ov5;
 923    PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
 924    CPUState *cs;
 925    bool cpu_match = false, cpu_update = true, memory_update = false;
 926    unsigned old_cpu_version = cpu_->cpu_version;
 927    unsigned compat_lvl = 0, cpu_version = 0;
 928    unsigned max_lvl = get_compat_level(cpu_->max_compat);
 929    int counter;
 930    char ov5_byte2;
 931
 932    /* Parse PVR list */
 933    for (counter = 0; counter < 512; ++counter) {
 934        uint32_t pvr, pvr_mask;
 935
 936        pvr_mask = ldl_be_phys(&address_space_memory, list);
 937        list += 4;
 938        pvr = ldl_be_phys(&address_space_memory, list);
 939        list += 4;
 940
 941        trace_spapr_cas_pvr_try(pvr);
 942        if (!max_lvl &&
 943            ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) {
 944            cpu_match = true;
 945            cpu_version = 0;
 946        } else if (pvr == cpu_->cpu_version) {
 947            cpu_match = true;
 948            cpu_version = cpu_->cpu_version;
 949        } else if (!cpu_match) {
 950            /* If it is a logical PVR, try to determine the highest level */
 951            unsigned lvl = get_compat_level(pvr);
 952            if (lvl) {
 953                bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) &&
 954                     (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
 955                bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) &&
 956                    ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
 957                    (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
 958
 959                if (is205 || is206) {
 960                    if (!max_lvl) {
 961                        /* User did not set the level, choose the highest */
 962                        if (compat_lvl <= lvl) {
 963                            compat_lvl = lvl;
 964                            cpu_version = pvr;
 965                        }
 966                    } else if (max_lvl >= lvl) {
 967                        /* User chose the level, don't set higher than this */
 968                        compat_lvl = lvl;
 969                        cpu_version = pvr;
 970                    }
 971                }
 972            }
 973        }
 974        /* Terminator record */
 975        if (~pvr_mask & pvr) {
 976            break;
 977        }
 978    }
 979
 980    /* Parsing finished */
 981    trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
 982                        cpu_version, pcc_->pcr_mask);
 983
 984    /* Update CPUs */
 985    if (old_cpu_version != cpu_version) {
 986        CPU_FOREACH(cs) {
 987            SetCompatState s = {
 988                .cpu = POWERPC_CPU(cs),
 989                .cpu_version = cpu_version,
 990                .err = NULL,
 991            };
 992
 993            run_on_cpu(cs, do_set_compat, &s);
 994
 995            if (s.err) {
 996                error_report_err(s.err);
 997                return H_HARDWARE;
 998            }
 999        }
1000    }
1001
1002    if (!cpu_version) {
1003        cpu_update = false;
1004    }
1005
1006    /* For the future use: here @ov_table points to the first option vector */
1007    ov_table = list;
1008
1009    ov5 = cas_get_option_vector(5, ov_table);
1010    if (!ov5) {
1011        return H_SUCCESS;
1012    }
1013
1014    /* @list now points to OV 5 */
1015    ov5_byte2 = ldub_phys(&address_space_memory, ov5 + 2);
1016    if (ov5_byte2 & OV5_DRCONF_MEMORY) {
1017        memory_update = true;
1018    }
1019
1020    if (spapr_h_cas_compose_response(spapr, args[1], args[2],
1021                                     cpu_update, memory_update)) {
1022        qemu_system_reset_request();
1023    }
1024
1025    return H_SUCCESS;
1026}
1027
1028static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1029static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1030
1031void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1032{
1033    spapr_hcall_fn *slot;
1034
1035    if (opcode <= MAX_HCALL_OPCODE) {
1036        assert((opcode & 0x3) == 0);
1037
1038        slot = &papr_hypercall_table[opcode / 4];
1039    } else {
1040        assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
1041
1042        slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1043    }
1044
1045    assert(!(*slot));
1046    *slot = fn;
1047}
1048
1049target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
1050                             target_ulong *args)
1051{
1052    sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1053
1054    if ((opcode <= MAX_HCALL_OPCODE)
1055        && ((opcode & 0x3) == 0)) {
1056        spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1057
1058        if (fn) {
1059            return fn(cpu, spapr, opcode, args);
1060        }
1061    } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1062               (opcode <= KVMPPC_HCALL_MAX)) {
1063        spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1064
1065        if (fn) {
1066            return fn(cpu, spapr, opcode, args);
1067        }
1068    }
1069
1070    qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1071                  opcode);
1072    return H_FUNCTION;
1073}
1074
1075static void hypercall_register_types(void)
1076{
1077    /* hcall-pft */
1078    spapr_register_hypercall(H_ENTER, h_enter);
1079    spapr_register_hypercall(H_REMOVE, h_remove);
1080    spapr_register_hypercall(H_PROTECT, h_protect);
1081    spapr_register_hypercall(H_READ, h_read);
1082
1083    /* hcall-bulk */
1084    spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
1085
1086    /* hcall-splpar */
1087    spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1088    spapr_register_hypercall(H_CEDE, h_cede);
1089
1090    /* processor register resource access h-calls */
1091    spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
1092    spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1093    spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
1094    spapr_register_hypercall(H_PAGE_INIT, h_page_init);
1095    spapr_register_hypercall(H_SET_MODE, h_set_mode);
1096
1097    /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1098     * here between the "CI" and the "CACHE" variants, they will use whatever
1099     * mapping attributes qemu is using. When using KVM, the kernel will
1100     * enforce the attributes more strongly
1101     */
1102    spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1103    spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1104    spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1105    spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1106    spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1107    spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1108    spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1109
1110    /* qemu/KVM-PPC specific hcalls */
1111    spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1112
1113    /* ibm,client-architecture-support support */
1114    spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1115}
1116
1117type_init(hypercall_register_types)
1118