qemu/hw/ppc/spapr_hcall.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "qapi/error.h"
   3#include "sysemu/hw_accel.h"
   4#include "sysemu/sysemu.h"
   5#include "qemu/log.h"
   6#include "qemu/module.h"
   7#include "qemu/error-report.h"
   8#include "cpu.h"
   9#include "exec/exec-all.h"
  10#include "helper_regs.h"
  11#include "hw/ppc/spapr.h"
  12#include "hw/ppc/spapr_cpu_core.h"
  13#include "mmu-hash64.h"
  14#include "cpu-models.h"
  15#include "trace.h"
  16#include "kvm_ppc.h"
  17#include "hw/ppc/spapr_ovec.h"
  18#include "mmu-book3s-v3.h"
  19#include "hw/mem/memory-device.h"
  20
  21static bool has_spr(PowerPCCPU *cpu, int spr)
  22{
  23    /* We can test whether the SPR is defined by checking for a valid name */
  24    return cpu->env.spr_cb[spr].name != NULL;
  25}
  26
  27static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
  28{
  29    /*
  30     * hash value/pteg group index is normalized by HPT mask
  31     */
  32    if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
  33        return false;
  34    }
  35    return true;
  36}
  37
  38static bool is_ram_address(SpaprMachineState *spapr, hwaddr addr)
  39{
  40    MachineState *machine = MACHINE(spapr);
  41    DeviceMemoryState *dms = machine->device_memory;
  42
  43    if (addr < machine->ram_size) {
  44        return true;
  45    }
  46    if ((addr >= dms->base)
  47        && ((addr - dms->base) < memory_region_size(&dms->mr))) {
  48        return true;
  49    }
  50
  51    return false;
  52}
  53
  54static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
  55                            target_ulong opcode, target_ulong *args)
  56{
  57    target_ulong flags = args[0];
  58    target_ulong ptex = args[1];
  59    target_ulong pteh = args[2];
  60    target_ulong ptel = args[3];
  61    unsigned apshift;
  62    target_ulong raddr;
  63    target_ulong slot;
  64    const ppc_hash_pte64_t *hptes;
  65
  66    apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
  67    if (!apshift) {
  68        /* Bad page size encoding */
  69        return H_PARAMETER;
  70    }
  71
  72    raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
  73
  74    if (is_ram_address(spapr, raddr)) {
  75        /* Regular RAM - should have WIMG=0010 */
  76        if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
  77            return H_PARAMETER;
  78        }
  79    } else {
  80        target_ulong wimg_flags;
  81        /* Looks like an IO address */
  82        /* FIXME: What WIMG combinations could be sensible for IO?
  83         * For now we allow WIMG=010x, but are there others? */
  84        /* FIXME: Should we check against registered IO addresses? */
  85        wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
  86
  87        if (wimg_flags != HPTE64_R_I &&
  88            wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
  89            return H_PARAMETER;
  90        }
  91    }
  92
  93    pteh &= ~0x60ULL;
  94
  95    if (!valid_ptex(cpu, ptex)) {
  96        return H_PARAMETER;
  97    }
  98
  99    slot = ptex & 7ULL;
 100    ptex = ptex & ~7ULL;
 101
 102    if (likely((flags & H_EXACT) == 0)) {
 103        hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
 104        for (slot = 0; slot < 8; slot++) {
 105            if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
 106                break;
 107            }
 108        }
 109        ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
 110        if (slot == 8) {
 111            return H_PTEG_FULL;
 112        }
 113    } else {
 114        hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
 115        if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
 116            ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
 117            return H_PTEG_FULL;
 118        }
 119        ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
 120    }
 121
 122    spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
 123
 124    args[0] = ptex + slot;
 125    return H_SUCCESS;
 126}
 127
 128typedef enum {
 129    REMOVE_SUCCESS = 0,
 130    REMOVE_NOT_FOUND = 1,
 131    REMOVE_PARM = 2,
 132    REMOVE_HW = 3,
 133} RemoveResult;
 134
 135static RemoveResult remove_hpte(PowerPCCPU *cpu
 136                                , target_ulong ptex,
 137                                target_ulong avpn,
 138                                target_ulong flags,
 139                                target_ulong *vp, target_ulong *rp)
 140{
 141    const ppc_hash_pte64_t *hptes;
 142    target_ulong v, r;
 143
 144    if (!valid_ptex(cpu, ptex)) {
 145        return REMOVE_PARM;
 146    }
 147
 148    hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
 149    v = ppc_hash64_hpte0(cpu, hptes, 0);
 150    r = ppc_hash64_hpte1(cpu, hptes, 0);
 151    ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
 152
 153    if ((v & HPTE64_V_VALID) == 0 ||
 154        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
 155        ((flags & H_ANDCOND) && (v & avpn) != 0)) {
 156        return REMOVE_NOT_FOUND;
 157    }
 158    *vp = v;
 159    *rp = r;
 160    spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
 161    ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
 162    return REMOVE_SUCCESS;
 163}
 164
 165static target_ulong h_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
 166                             target_ulong opcode, target_ulong *args)
 167{
 168    CPUPPCState *env = &cpu->env;
 169    target_ulong flags = args[0];
 170    target_ulong ptex = args[1];
 171    target_ulong avpn = args[2];
 172    RemoveResult ret;
 173
 174    ret = remove_hpte(cpu, ptex, avpn, flags,
 175                      &args[0], &args[1]);
 176
 177    switch (ret) {
 178    case REMOVE_SUCCESS:
 179        check_tlb_flush(env, true);
 180        return H_SUCCESS;
 181
 182    case REMOVE_NOT_FOUND:
 183        return H_NOT_FOUND;
 184
 185    case REMOVE_PARM:
 186        return H_PARAMETER;
 187
 188    case REMOVE_HW:
 189        return H_HARDWARE;
 190    }
 191
 192    g_assert_not_reached();
 193}
 194
 195#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
 196#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
 197#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
 198#define   H_BULK_REMOVE_END            0xc000000000000000ULL
 199#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
 200#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
 201#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
 202#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
 203#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
 204#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
 205#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
 206#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
 207#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
 208#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
 209#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
 210
 211#define H_BULK_REMOVE_MAX_BATCH        4
 212
 213static target_ulong h_bulk_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
 214                                  target_ulong opcode, target_ulong *args)
 215{
 216    CPUPPCState *env = &cpu->env;
 217    int i;
 218    target_ulong rc = H_SUCCESS;
 219
 220    for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 221        target_ulong *tsh = &args[i*2];
 222        target_ulong tsl = args[i*2 + 1];
 223        target_ulong v, r, ret;
 224
 225        if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
 226            break;
 227        } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
 228            return H_PARAMETER;
 229        }
 230
 231        *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
 232        *tsh |= H_BULK_REMOVE_RESPONSE;
 233
 234        if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
 235            *tsh |= H_BULK_REMOVE_PARM;
 236            return H_PARAMETER;
 237        }
 238
 239        ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
 240                          (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
 241                          &v, &r);
 242
 243        *tsh |= ret << 60;
 244
 245        switch (ret) {
 246        case REMOVE_SUCCESS:
 247            *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
 248            break;
 249
 250        case REMOVE_PARM:
 251            rc = H_PARAMETER;
 252            goto exit;
 253
 254        case REMOVE_HW:
 255            rc = H_HARDWARE;
 256            goto exit;
 257        }
 258    }
 259 exit:
 260    check_tlb_flush(env, true);
 261
 262    return rc;
 263}
 264
 265static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
 266                              target_ulong opcode, target_ulong *args)
 267{
 268    CPUPPCState *env = &cpu->env;
 269    target_ulong flags = args[0];
 270    target_ulong ptex = args[1];
 271    target_ulong avpn = args[2];
 272    const ppc_hash_pte64_t *hptes;
 273    target_ulong v, r;
 274
 275    if (!valid_ptex(cpu, ptex)) {
 276        return H_PARAMETER;
 277    }
 278
 279    hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
 280    v = ppc_hash64_hpte0(cpu, hptes, 0);
 281    r = ppc_hash64_hpte1(cpu, hptes, 0);
 282    ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
 283
 284    if ((v & HPTE64_V_VALID) == 0 ||
 285        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
 286        return H_NOT_FOUND;
 287    }
 288
 289    r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
 290           HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
 291    r |= (flags << 55) & HPTE64_R_PP0;
 292    r |= (flags << 48) & HPTE64_R_KEY_HI;
 293    r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
 294    spapr_store_hpte(cpu, ptex,
 295                     (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
 296    ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
 297    /* Flush the tlb */
 298    check_tlb_flush(env, true);
 299    /* Don't need a memory barrier, due to qemu's global lock */
 300    spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
 301    return H_SUCCESS;
 302}
 303
 304static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
 305                           target_ulong opcode, target_ulong *args)
 306{
 307    target_ulong flags = args[0];
 308    target_ulong ptex = args[1];
 309    int i, ridx, n_entries = 1;
 310    const ppc_hash_pte64_t *hptes;
 311
 312    if (!valid_ptex(cpu, ptex)) {
 313        return H_PARAMETER;
 314    }
 315
 316    if (flags & H_READ_4) {
 317        /* Clear the two low order bits */
 318        ptex &= ~(3ULL);
 319        n_entries = 4;
 320    }
 321
 322    hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
 323    for (i = 0, ridx = 0; i < n_entries; i++) {
 324        args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
 325        args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
 326    }
 327    ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
 328
 329    return H_SUCCESS;
 330}
 331
 332struct SpaprPendingHpt {
 333    /* These fields are read-only after initialization */
 334    int shift;
 335    QemuThread thread;
 336
 337    /* These fields are protected by the BQL */
 338    bool complete;
 339
 340    /* These fields are private to the preparation thread if
 341     * !complete, otherwise protected by the BQL */
 342    int ret;
 343    void *hpt;
 344};
 345
 346static void free_pending_hpt(SpaprPendingHpt *pending)
 347{
 348    if (pending->hpt) {
 349        qemu_vfree(pending->hpt);
 350    }
 351
 352    g_free(pending);
 353}
 354
 355static void *hpt_prepare_thread(void *opaque)
 356{
 357    SpaprPendingHpt *pending = opaque;
 358    size_t size = 1ULL << pending->shift;
 359
 360    pending->hpt = qemu_memalign(size, size);
 361    if (pending->hpt) {
 362        memset(pending->hpt, 0, size);
 363        pending->ret = H_SUCCESS;
 364    } else {
 365        pending->ret = H_NO_MEM;
 366    }
 367
 368    qemu_mutex_lock_iothread();
 369
 370    if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
 371        /* Ready to go */
 372        pending->complete = true;
 373    } else {
 374        /* We've been cancelled, clean ourselves up */
 375        free_pending_hpt(pending);
 376    }
 377
 378    qemu_mutex_unlock_iothread();
 379    return NULL;
 380}
 381
 382/* Must be called with BQL held */
 383static void cancel_hpt_prepare(SpaprMachineState *spapr)
 384{
 385    SpaprPendingHpt *pending = spapr->pending_hpt;
 386
 387    /* Let the thread know it's cancelled */
 388    spapr->pending_hpt = NULL;
 389
 390    if (!pending) {
 391        /* Nothing to do */
 392        return;
 393    }
 394
 395    if (!pending->complete) {
 396        /* thread will clean itself up */
 397        return;
 398    }
 399
 400    free_pending_hpt(pending);
 401}
 402
 403/* Convert a return code from the KVM ioctl()s implementing resize HPT
 404 * into a PAPR hypercall return code */
 405static target_ulong resize_hpt_convert_rc(int ret)
 406{
 407    if (ret >= 100000) {
 408        return H_LONG_BUSY_ORDER_100_SEC;
 409    } else if (ret >= 10000) {
 410        return H_LONG_BUSY_ORDER_10_SEC;
 411    } else if (ret >= 1000) {
 412        return H_LONG_BUSY_ORDER_1_SEC;
 413    } else if (ret >= 100) {
 414        return H_LONG_BUSY_ORDER_100_MSEC;
 415    } else if (ret >= 10) {
 416        return H_LONG_BUSY_ORDER_10_MSEC;
 417    } else if (ret > 0) {
 418        return H_LONG_BUSY_ORDER_1_MSEC;
 419    }
 420
 421    switch (ret) {
 422    case 0:
 423        return H_SUCCESS;
 424    case -EPERM:
 425        return H_AUTHORITY;
 426    case -EINVAL:
 427        return H_PARAMETER;
 428    case -ENXIO:
 429        return H_CLOSED;
 430    case -ENOSPC:
 431        return H_PTEG_FULL;
 432    case -EBUSY:
 433        return H_BUSY;
 434    case -ENOMEM:
 435        return H_NO_MEM;
 436    default:
 437        return H_HARDWARE;
 438    }
 439}
 440
 441static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu,
 442                                         SpaprMachineState *spapr,
 443                                         target_ulong opcode,
 444                                         target_ulong *args)
 445{
 446    target_ulong flags = args[0];
 447    int shift = args[1];
 448    SpaprPendingHpt *pending = spapr->pending_hpt;
 449    uint64_t current_ram_size;
 450    int rc;
 451
 452    if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
 453        return H_AUTHORITY;
 454    }
 455
 456    if (!spapr->htab_shift) {
 457        /* Radix guest, no HPT */
 458        return H_NOT_AVAILABLE;
 459    }
 460
 461    trace_spapr_h_resize_hpt_prepare(flags, shift);
 462
 463    if (flags != 0) {
 464        return H_PARAMETER;
 465    }
 466
 467    if (shift && ((shift < 18) || (shift > 46))) {
 468        return H_PARAMETER;
 469    }
 470
 471    current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
 472
 473    /* We only allow the guest to allocate an HPT one order above what
 474     * we'd normally give them (to stop a small guest claiming a huge
 475     * chunk of resources in the HPT */
 476    if (shift > (spapr_hpt_shift_for_ramsize(current_ram_size) + 1)) {
 477        return H_RESOURCE;
 478    }
 479
 480    rc = kvmppc_resize_hpt_prepare(cpu, flags, shift);
 481    if (rc != -ENOSYS) {
 482        return resize_hpt_convert_rc(rc);
 483    }
 484
 485    if (pending) {
 486        /* something already in progress */
 487        if (pending->shift == shift) {
 488            /* and it's suitable */
 489            if (pending->complete) {
 490                return pending->ret;
 491            } else {
 492                return H_LONG_BUSY_ORDER_100_MSEC;
 493            }
 494        }
 495
 496        /* not suitable, cancel and replace */
 497        cancel_hpt_prepare(spapr);
 498    }
 499
 500    if (!shift) {
 501        /* nothing to do */
 502        return H_SUCCESS;
 503    }
 504
 505    /* start new prepare */
 506
 507    pending = g_new0(SpaprPendingHpt, 1);
 508    pending->shift = shift;
 509    pending->ret = H_HARDWARE;
 510
 511    qemu_thread_create(&pending->thread, "sPAPR HPT prepare",
 512                       hpt_prepare_thread, pending, QEMU_THREAD_DETACHED);
 513
 514    spapr->pending_hpt = pending;
 515
 516    /* In theory we could estimate the time more accurately based on
 517     * the new size, but there's not much point */
 518    return H_LONG_BUSY_ORDER_100_MSEC;
 519}
 520
 521static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot)
 522{
 523    uint8_t *addr = htab;
 524
 525    addr += pteg * HASH_PTEG_SIZE_64;
 526    addr += slot * HASH_PTE_SIZE_64;
 527    return  ldq_p(addr);
 528}
 529
 530static void new_hpte_store(void *htab, uint64_t pteg, int slot,
 531                           uint64_t pte0, uint64_t pte1)
 532{
 533    uint8_t *addr = htab;
 534
 535    addr += pteg * HASH_PTEG_SIZE_64;
 536    addr += slot * HASH_PTE_SIZE_64;
 537
 538    stq_p(addr, pte0);
 539    stq_p(addr + HASH_PTE_SIZE_64 / 2, pte1);
 540}
 541
 542static int rehash_hpte(PowerPCCPU *cpu,
 543                       const ppc_hash_pte64_t *hptes,
 544                       void *old_hpt, uint64_t oldsize,
 545                       void *new_hpt, uint64_t newsize,
 546                       uint64_t pteg, int slot)
 547{
 548    uint64_t old_hash_mask = (oldsize >> 7) - 1;
 549    uint64_t new_hash_mask = (newsize >> 7) - 1;
 550    target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot);
 551    target_ulong pte1;
 552    uint64_t avpn;
 553    unsigned base_pg_shift;
 554    uint64_t hash, new_pteg, replace_pte0;
 555
 556    if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) {
 557        return H_SUCCESS;
 558    }
 559
 560    pte1 = ppc_hash64_hpte1(cpu, hptes, slot);
 561
 562    base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1);
 563    assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */
 564    avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23);
 565
 566    if (pte0 & HPTE64_V_SECONDARY) {
 567        pteg = ~pteg;
 568    }
 569
 570    if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) {
 571        uint64_t offset, vsid;
 572
 573        /* We only have 28 - 23 bits of offset in avpn */
 574        offset = (avpn & 0x1f) << 23;
 575        vsid = avpn >> 5;
 576        /* We can find more bits from the pteg value */
 577        if (base_pg_shift < 23) {
 578            offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift;
 579        }
 580
 581        hash = vsid ^ (offset >> base_pg_shift);
 582    } else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) {
 583        uint64_t offset, vsid;
 584
 585        /* We only have 40 - 23 bits of seg_off in avpn */
 586        offset = (avpn & 0x1ffff) << 23;
 587        vsid = avpn >> 17;
 588        if (base_pg_shift < 23) {
 589            offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask)
 590                << base_pg_shift;
 591        }
 592
 593        hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift);
 594    } else {
 595        error_report("rehash_pte: Bad segment size in HPTE");
 596        return H_HARDWARE;
 597    }
 598
 599    new_pteg = hash & new_hash_mask;
 600    if (pte0 & HPTE64_V_SECONDARY) {
 601        assert(~pteg == (hash & old_hash_mask));
 602        new_pteg = ~new_pteg;
 603    } else {
 604        assert(pteg == (hash & old_hash_mask));
 605    }
 606    assert((oldsize != newsize) || (pteg == new_pteg));
 607    replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot);
 608    /*
 609     * Strictly speaking, we don't need all these tests, since we only
 610     * ever rehash bolted HPTEs.  We might in future handle non-bolted
 611     * HPTEs, though so make the logic correct for those cases as
 612     * well.
 613     */
 614    if (replace_pte0 & HPTE64_V_VALID) {
 615        assert(newsize < oldsize);
 616        if (replace_pte0 & HPTE64_V_BOLTED) {
 617            if (pte0 & HPTE64_V_BOLTED) {
 618                /* Bolted collision, nothing we can do */
 619                return H_PTEG_FULL;
 620            } else {
 621                /* Discard this hpte */
 622                return H_SUCCESS;
 623            }
 624        }
 625    }
 626
 627    new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1);
 628    return H_SUCCESS;
 629}
 630
 631static int rehash_hpt(PowerPCCPU *cpu,
 632                      void *old_hpt, uint64_t oldsize,
 633                      void *new_hpt, uint64_t newsize)
 634{
 635    uint64_t n_ptegs = oldsize >> 7;
 636    uint64_t pteg;
 637    int slot;
 638    int rc;
 639
 640    for (pteg = 0; pteg < n_ptegs; pteg++) {
 641        hwaddr ptex = pteg * HPTES_PER_GROUP;
 642        const ppc_hash_pte64_t *hptes
 643            = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
 644
 645        if (!hptes) {
 646            return H_HARDWARE;
 647        }
 648
 649        for (slot = 0; slot < HPTES_PER_GROUP; slot++) {
 650            rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize,
 651                             pteg, slot);
 652            if (rc != H_SUCCESS) {
 653                ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
 654                return rc;
 655            }
 656        }
 657        ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
 658    }
 659
 660    return H_SUCCESS;
 661}
 662
 663static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data)
 664{
 665    int ret;
 666
 667    cpu_synchronize_state(cs);
 668
 669    ret = kvmppc_put_books_sregs(POWERPC_CPU(cs));
 670    if (ret < 0) {
 671        error_report("failed to push sregs to KVM: %s", strerror(-ret));
 672        exit(1);
 673    }
 674}
 675
 676static void push_sregs_to_kvm_pr(SpaprMachineState *spapr)
 677{
 678    CPUState *cs;
 679
 680    /*
 681     * This is a hack for the benefit of KVM PR - it abuses the SDR1
 682     * slot in kvm_sregs to communicate the userspace address of the
 683     * HPT
 684     */
 685    if (!kvm_enabled() || !spapr->htab) {
 686        return;
 687    }
 688
 689    CPU_FOREACH(cs) {
 690        run_on_cpu(cs, do_push_sregs_to_kvm_pr, RUN_ON_CPU_NULL);
 691    }
 692}
 693
 694static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu,
 695                                        SpaprMachineState *spapr,
 696                                        target_ulong opcode,
 697                                        target_ulong *args)
 698{
 699    target_ulong flags = args[0];
 700    target_ulong shift = args[1];
 701    SpaprPendingHpt *pending = spapr->pending_hpt;
 702    int rc;
 703    size_t newsize;
 704
 705    if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
 706        return H_AUTHORITY;
 707    }
 708
 709    if (!spapr->htab_shift) {
 710        /* Radix guest, no HPT */
 711        return H_NOT_AVAILABLE;
 712    }
 713
 714    trace_spapr_h_resize_hpt_commit(flags, shift);
 715
 716    rc = kvmppc_resize_hpt_commit(cpu, flags, shift);
 717    if (rc != -ENOSYS) {
 718        rc = resize_hpt_convert_rc(rc);
 719        if (rc == H_SUCCESS) {
 720            /* Need to set the new htab_shift in the machine state */
 721            spapr->htab_shift = shift;
 722        }
 723        return rc;
 724    }
 725
 726    if (flags != 0) {
 727        return H_PARAMETER;
 728    }
 729
 730    if (!pending || (pending->shift != shift)) {
 731        /* no matching prepare */
 732        return H_CLOSED;
 733    }
 734
 735    if (!pending->complete) {
 736        /* prepare has not completed */
 737        return H_BUSY;
 738    }
 739
 740    /* Shouldn't have got past PREPARE without an HPT */
 741    g_assert(spapr->htab_shift);
 742
 743    newsize = 1ULL << pending->shift;
 744    rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr),
 745                    pending->hpt, newsize);
 746    if (rc == H_SUCCESS) {
 747        qemu_vfree(spapr->htab);
 748        spapr->htab = pending->hpt;
 749        spapr->htab_shift = pending->shift;
 750
 751        push_sregs_to_kvm_pr(spapr);
 752
 753        pending->hpt = NULL; /* so it's not free()d */
 754    }
 755
 756    /* Clean up */
 757    spapr->pending_hpt = NULL;
 758    free_pending_hpt(pending);
 759
 760    return rc;
 761}
 762
 763static target_ulong h_set_sprg0(PowerPCCPU *cpu, SpaprMachineState *spapr,
 764                                target_ulong opcode, target_ulong *args)
 765{
 766    cpu_synchronize_state(CPU(cpu));
 767    cpu->env.spr[SPR_SPRG0] = args[0];
 768
 769    return H_SUCCESS;
 770}
 771
 772static target_ulong h_set_dabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
 773                               target_ulong opcode, target_ulong *args)
 774{
 775    if (!has_spr(cpu, SPR_DABR)) {
 776        return H_HARDWARE;              /* DABR register not available */
 777    }
 778    cpu_synchronize_state(CPU(cpu));
 779
 780    if (has_spr(cpu, SPR_DABRX)) {
 781        cpu->env.spr[SPR_DABRX] = 0x3;  /* Use Problem and Privileged state */
 782    } else if (!(args[0] & 0x4)) {      /* Breakpoint Translation set? */
 783        return H_RESERVED_DABR;
 784    }
 785
 786    cpu->env.spr[SPR_DABR] = args[0];
 787    return H_SUCCESS;
 788}
 789
 790static target_ulong h_set_xdabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
 791                                target_ulong opcode, target_ulong *args)
 792{
 793    target_ulong dabrx = args[1];
 794
 795    if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
 796        return H_HARDWARE;
 797    }
 798
 799    if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
 800        || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
 801        return H_PARAMETER;
 802    }
 803
 804    cpu_synchronize_state(CPU(cpu));
 805    cpu->env.spr[SPR_DABRX] = dabrx;
 806    cpu->env.spr[SPR_DABR] = args[0];
 807
 808    return H_SUCCESS;
 809}
 810
 811static target_ulong h_page_init(PowerPCCPU *cpu, SpaprMachineState *spapr,
 812                                target_ulong opcode, target_ulong *args)
 813{
 814    target_ulong flags = args[0];
 815    hwaddr dst = args[1];
 816    hwaddr src = args[2];
 817    hwaddr len = TARGET_PAGE_SIZE;
 818    uint8_t *pdst, *psrc;
 819    target_long ret = H_SUCCESS;
 820
 821    if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
 822                  | H_COPY_PAGE | H_ZERO_PAGE)) {
 823        qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
 824                      flags);
 825        return H_PARAMETER;
 826    }
 827
 828    /* Map-in destination */
 829    if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
 830        return H_PARAMETER;
 831    }
 832    pdst = cpu_physical_memory_map(dst, &len, 1);
 833    if (!pdst || len != TARGET_PAGE_SIZE) {
 834        return H_PARAMETER;
 835    }
 836
 837    if (flags & H_COPY_PAGE) {
 838        /* Map-in source, copy to destination, and unmap source again */
 839        if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
 840            ret = H_PARAMETER;
 841            goto unmap_out;
 842        }
 843        psrc = cpu_physical_memory_map(src, &len, 0);
 844        if (!psrc || len != TARGET_PAGE_SIZE) {
 845            ret = H_PARAMETER;
 846            goto unmap_out;
 847        }
 848        memcpy(pdst, psrc, len);
 849        cpu_physical_memory_unmap(psrc, len, 0, len);
 850    } else if (flags & H_ZERO_PAGE) {
 851        memset(pdst, 0, len);          /* Just clear the destination page */
 852    }
 853
 854    if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
 855        kvmppc_dcbst_range(cpu, pdst, len);
 856    }
 857    if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
 858        if (kvm_enabled()) {
 859            kvmppc_icbi_range(cpu, pdst, len);
 860        } else {
 861            tb_flush(CPU(cpu));
 862        }
 863    }
 864
 865unmap_out:
 866    cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
 867    return ret;
 868}
 869
 870#define FLAGS_REGISTER_VPA         0x0000200000000000ULL
 871#define FLAGS_REGISTER_DTL         0x0000400000000000ULL
 872#define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
 873#define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
 874#define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
 875#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
 876
 877#define VPA_MIN_SIZE           640
 878#define VPA_SIZE_OFFSET        0x4
 879#define VPA_SHARED_PROC_OFFSET 0x9
 880#define VPA_SHARED_PROC_VAL    0x2
 881
 882static target_ulong register_vpa(PowerPCCPU *cpu, target_ulong vpa)
 883{
 884    CPUState *cs = CPU(cpu);
 885    CPUPPCState *env = &cpu->env;
 886    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 887    uint16_t size;
 888    uint8_t tmp;
 889
 890    if (vpa == 0) {
 891        hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
 892        return H_HARDWARE;
 893    }
 894
 895    if (vpa % env->dcache_line_size) {
 896        return H_PARAMETER;
 897    }
 898    /* FIXME: bounds check the address */
 899
 900    size = lduw_be_phys(cs->as, vpa + 0x4);
 901
 902    if (size < VPA_MIN_SIZE) {
 903        return H_PARAMETER;
 904    }
 905
 906    /* VPA is not allowed to cross a page boundary */
 907    if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
 908        return H_PARAMETER;
 909    }
 910
 911    spapr_cpu->vpa_addr = vpa;
 912
 913    tmp = ldub_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET);
 914    tmp |= VPA_SHARED_PROC_VAL;
 915    stb_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
 916
 917    return H_SUCCESS;
 918}
 919
 920static target_ulong deregister_vpa(PowerPCCPU *cpu, target_ulong vpa)
 921{
 922    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 923
 924    if (spapr_cpu->slb_shadow_addr) {
 925        return H_RESOURCE;
 926    }
 927
 928    if (spapr_cpu->dtl_addr) {
 929        return H_RESOURCE;
 930    }
 931
 932    spapr_cpu->vpa_addr = 0;
 933    return H_SUCCESS;
 934}
 935
 936static target_ulong register_slb_shadow(PowerPCCPU *cpu, target_ulong addr)
 937{
 938    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 939    uint32_t size;
 940
 941    if (addr == 0) {
 942        hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
 943        return H_HARDWARE;
 944    }
 945
 946    size = ldl_be_phys(CPU(cpu)->as, addr + 0x4);
 947    if (size < 0x8) {
 948        return H_PARAMETER;
 949    }
 950
 951    if ((addr / 4096) != ((addr + size - 1) / 4096)) {
 952        return H_PARAMETER;
 953    }
 954
 955    if (!spapr_cpu->vpa_addr) {
 956        return H_RESOURCE;
 957    }
 958
 959    spapr_cpu->slb_shadow_addr = addr;
 960    spapr_cpu->slb_shadow_size = size;
 961
 962    return H_SUCCESS;
 963}
 964
 965static target_ulong deregister_slb_shadow(PowerPCCPU *cpu, target_ulong addr)
 966{
 967    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 968
 969    spapr_cpu->slb_shadow_addr = 0;
 970    spapr_cpu->slb_shadow_size = 0;
 971    return H_SUCCESS;
 972}
 973
 974static target_ulong register_dtl(PowerPCCPU *cpu, target_ulong addr)
 975{
 976    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 977    uint32_t size;
 978
 979    if (addr == 0) {
 980        hcall_dprintf("Can't cope with DTL at logical 0\n");
 981        return H_HARDWARE;
 982    }
 983
 984    size = ldl_be_phys(CPU(cpu)->as, addr + 0x4);
 985
 986    if (size < 48) {
 987        return H_PARAMETER;
 988    }
 989
 990    if (!spapr_cpu->vpa_addr) {
 991        return H_RESOURCE;
 992    }
 993
 994    spapr_cpu->dtl_addr = addr;
 995    spapr_cpu->dtl_size = size;
 996
 997    return H_SUCCESS;
 998}
 999
1000static target_ulong deregister_dtl(PowerPCCPU *cpu, target_ulong addr)
1001{
1002    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1003
1004    spapr_cpu->dtl_addr = 0;
1005    spapr_cpu->dtl_size = 0;
1006
1007    return H_SUCCESS;
1008}
1009
1010static target_ulong h_register_vpa(PowerPCCPU *cpu, SpaprMachineState *spapr,
1011                                   target_ulong opcode, target_ulong *args)
1012{
1013    target_ulong flags = args[0];
1014    target_ulong procno = args[1];
1015    target_ulong vpa = args[2];
1016    target_ulong ret = H_PARAMETER;
1017    PowerPCCPU *tcpu;
1018
1019    tcpu = spapr_find_cpu(procno);
1020    if (!tcpu) {
1021        return H_PARAMETER;
1022    }
1023
1024    switch (flags) {
1025    case FLAGS_REGISTER_VPA:
1026        ret = register_vpa(tcpu, vpa);
1027        break;
1028
1029    case FLAGS_DEREGISTER_VPA:
1030        ret = deregister_vpa(tcpu, vpa);
1031        break;
1032
1033    case FLAGS_REGISTER_SLBSHADOW:
1034        ret = register_slb_shadow(tcpu, vpa);
1035        break;
1036
1037    case FLAGS_DEREGISTER_SLBSHADOW:
1038        ret = deregister_slb_shadow(tcpu, vpa);
1039        break;
1040
1041    case FLAGS_REGISTER_DTL:
1042        ret = register_dtl(tcpu, vpa);
1043        break;
1044
1045    case FLAGS_DEREGISTER_DTL:
1046        ret = deregister_dtl(tcpu, vpa);
1047        break;
1048    }
1049
1050    return ret;
1051}
1052
1053static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr,
1054                           target_ulong opcode, target_ulong *args)
1055{
1056    CPUPPCState *env = &cpu->env;
1057    CPUState *cs = CPU(cpu);
1058
1059    env->msr |= (1ULL << MSR_EE);
1060    hreg_compute_hflags(env);
1061    if (!cpu_has_work(cs)) {
1062        cs->halted = 1;
1063        cs->exception_index = EXCP_HLT;
1064        cs->exit_request = 1;
1065    }
1066    return H_SUCCESS;
1067}
1068
1069static target_ulong h_rtas(PowerPCCPU *cpu, SpaprMachineState *spapr,
1070                           target_ulong opcode, target_ulong *args)
1071{
1072    target_ulong rtas_r3 = args[0];
1073    uint32_t token = rtas_ld(rtas_r3, 0);
1074    uint32_t nargs = rtas_ld(rtas_r3, 1);
1075    uint32_t nret = rtas_ld(rtas_r3, 2);
1076
1077    return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
1078                           nret, rtas_r3 + 12 + 4*nargs);
1079}
1080
1081static target_ulong h_logical_load(PowerPCCPU *cpu, SpaprMachineState *spapr,
1082                                   target_ulong opcode, target_ulong *args)
1083{
1084    CPUState *cs = CPU(cpu);
1085    target_ulong size = args[0];
1086    target_ulong addr = args[1];
1087
1088    switch (size) {
1089    case 1:
1090        args[0] = ldub_phys(cs->as, addr);
1091        return H_SUCCESS;
1092    case 2:
1093        args[0] = lduw_phys(cs->as, addr);
1094        return H_SUCCESS;
1095    case 4:
1096        args[0] = ldl_phys(cs->as, addr);
1097        return H_SUCCESS;
1098    case 8:
1099        args[0] = ldq_phys(cs->as, addr);
1100        return H_SUCCESS;
1101    }
1102    return H_PARAMETER;
1103}
1104
1105static target_ulong h_logical_store(PowerPCCPU *cpu, SpaprMachineState *spapr,
1106                                    target_ulong opcode, target_ulong *args)
1107{
1108    CPUState *cs = CPU(cpu);
1109
1110    target_ulong size = args[0];
1111    target_ulong addr = args[1];
1112    target_ulong val  = args[2];
1113
1114    switch (size) {
1115    case 1:
1116        stb_phys(cs->as, addr, val);
1117        return H_SUCCESS;
1118    case 2:
1119        stw_phys(cs->as, addr, val);
1120        return H_SUCCESS;
1121    case 4:
1122        stl_phys(cs->as, addr, val);
1123        return H_SUCCESS;
1124    case 8:
1125        stq_phys(cs->as, addr, val);
1126        return H_SUCCESS;
1127    }
1128    return H_PARAMETER;
1129}
1130
1131static target_ulong h_logical_memop(PowerPCCPU *cpu, SpaprMachineState *spapr,
1132                                    target_ulong opcode, target_ulong *args)
1133{
1134    CPUState *cs = CPU(cpu);
1135
1136    target_ulong dst   = args[0]; /* Destination address */
1137    target_ulong src   = args[1]; /* Source address */
1138    target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
1139    target_ulong count = args[3]; /* Element count */
1140    target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
1141    uint64_t tmp;
1142    unsigned int mask = (1 << esize) - 1;
1143    int step = 1 << esize;
1144
1145    if (count > 0x80000000) {
1146        return H_PARAMETER;
1147    }
1148
1149    if ((dst & mask) || (src & mask) || (op > 1)) {
1150        return H_PARAMETER;
1151    }
1152
1153    if (dst >= src && dst < (src + (count << esize))) {
1154            dst = dst + ((count - 1) << esize);
1155            src = src + ((count - 1) << esize);
1156            step = -step;
1157    }
1158
1159    while (count--) {
1160        switch (esize) {
1161        case 0:
1162            tmp = ldub_phys(cs->as, src);
1163            break;
1164        case 1:
1165            tmp = lduw_phys(cs->as, src);
1166            break;
1167        case 2:
1168            tmp = ldl_phys(cs->as, src);
1169            break;
1170        case 3:
1171            tmp = ldq_phys(cs->as, src);
1172            break;
1173        default:
1174            return H_PARAMETER;
1175        }
1176        if (op == 1) {
1177            tmp = ~tmp;
1178        }
1179        switch (esize) {
1180        case 0:
1181            stb_phys(cs->as, dst, tmp);
1182            break;
1183        case 1:
1184            stw_phys(cs->as, dst, tmp);
1185            break;
1186        case 2:
1187            stl_phys(cs->as, dst, tmp);
1188            break;
1189        case 3:
1190            stq_phys(cs->as, dst, tmp);
1191            break;
1192        }
1193        dst = dst + step;
1194        src = src + step;
1195    }
1196
1197    return H_SUCCESS;
1198}
1199
1200static target_ulong h_logical_icbi(PowerPCCPU *cpu, SpaprMachineState *spapr,
1201                                   target_ulong opcode, target_ulong *args)
1202{
1203    /* Nothing to do on emulation, KVM will trap this in the kernel */
1204    return H_SUCCESS;
1205}
1206
1207static target_ulong h_logical_dcbf(PowerPCCPU *cpu, SpaprMachineState *spapr,
1208                                   target_ulong opcode, target_ulong *args)
1209{
1210    /* Nothing to do on emulation, KVM will trap this in the kernel */
1211    return H_SUCCESS;
1212}
1213
1214static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
1215                                           target_ulong mflags,
1216                                           target_ulong value1,
1217                                           target_ulong value2)
1218{
1219    if (value1) {
1220        return H_P3;
1221    }
1222    if (value2) {
1223        return H_P4;
1224    }
1225
1226    switch (mflags) {
1227    case H_SET_MODE_ENDIAN_BIG:
1228        spapr_set_all_lpcrs(0, LPCR_ILE);
1229        spapr_pci_switch_vga(true);
1230        return H_SUCCESS;
1231
1232    case H_SET_MODE_ENDIAN_LITTLE:
1233        spapr_set_all_lpcrs(LPCR_ILE, LPCR_ILE);
1234        spapr_pci_switch_vga(false);
1235        return H_SUCCESS;
1236    }
1237
1238    return H_UNSUPPORTED_FLAG;
1239}
1240
1241static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
1242                                                        target_ulong mflags,
1243                                                        target_ulong value1,
1244                                                        target_ulong value2)
1245{
1246    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1247
1248    if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
1249        return H_P2;
1250    }
1251    if (value1) {
1252        return H_P3;
1253    }
1254    if (value2) {
1255        return H_P4;
1256    }
1257
1258    if (mflags == AIL_RESERVED) {
1259        return H_UNSUPPORTED_FLAG;
1260    }
1261
1262    spapr_set_all_lpcrs(mflags << LPCR_AIL_SHIFT, LPCR_AIL);
1263
1264    return H_SUCCESS;
1265}
1266
1267static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr,
1268                               target_ulong opcode, target_ulong *args)
1269{
1270    target_ulong resource = args[1];
1271    target_ulong ret = H_P2;
1272
1273    switch (resource) {
1274    case H_SET_MODE_RESOURCE_LE:
1275        ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
1276        break;
1277    case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
1278        ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
1279                                                  args[2], args[3]);
1280        break;
1281    }
1282
1283    return ret;
1284}
1285
1286static target_ulong h_clean_slb(PowerPCCPU *cpu, SpaprMachineState *spapr,
1287                                target_ulong opcode, target_ulong *args)
1288{
1289    qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
1290                  opcode, " (H_CLEAN_SLB)");
1291    return H_FUNCTION;
1292}
1293
1294static target_ulong h_invalidate_pid(PowerPCCPU *cpu, SpaprMachineState *spapr,
1295                                     target_ulong opcode, target_ulong *args)
1296{
1297    qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
1298                  opcode, " (H_INVALIDATE_PID)");
1299    return H_FUNCTION;
1300}
1301
1302static void spapr_check_setup_free_hpt(SpaprMachineState *spapr,
1303                                       uint64_t patbe_old, uint64_t patbe_new)
1304{
1305    /*
1306     * We have 4 Options:
1307     * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing
1308     * HASH->RADIX                                  : Free HPT
1309     * RADIX->HASH                                  : Allocate HPT
1310     * NOTHING->HASH                                : Allocate HPT
1311     * Note: NOTHING implies the case where we said the guest could choose
1312     *       later and so assumed radix and now it's called H_REG_PROC_TBL
1313     */
1314
1315    if ((patbe_old & PATE1_GR) == (patbe_new & PATE1_GR)) {
1316        /* We assume RADIX, so this catches all the "Do Nothing" cases */
1317    } else if (!(patbe_old & PATE1_GR)) {
1318        /* HASH->RADIX : Free HPT */
1319        spapr_free_hpt(spapr);
1320    } else if (!(patbe_new & PATE1_GR)) {
1321        /* RADIX->HASH || NOTHING->HASH : Allocate HPT */
1322        spapr_setup_hpt_and_vrma(spapr);
1323    }
1324    return;
1325}
1326
1327#define FLAGS_MASK              0x01FULL
1328#define FLAG_MODIFY             0x10
1329#define FLAG_REGISTER           0x08
1330#define FLAG_RADIX              0x04
1331#define FLAG_HASH_PROC_TBL      0x02
1332#define FLAG_GTSE               0x01
1333
1334static target_ulong h_register_process_table(PowerPCCPU *cpu,
1335                                             SpaprMachineState *spapr,
1336                                             target_ulong opcode,
1337                                             target_ulong *args)
1338{
1339    target_ulong flags = args[0];
1340    target_ulong proc_tbl = args[1];
1341    target_ulong page_size = args[2];
1342    target_ulong table_size = args[3];
1343    target_ulong update_lpcr = 0;
1344    uint64_t cproc;
1345
1346    if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */
1347        return H_PARAMETER;
1348    }
1349    if (flags & FLAG_MODIFY) {
1350        if (flags & FLAG_REGISTER) {
1351            if (flags & FLAG_RADIX) { /* Register new RADIX process table */
1352                if (proc_tbl & 0xfff || proc_tbl >> 60) {
1353                    return H_P2;
1354                } else if (page_size) {
1355                    return H_P3;
1356                } else if (table_size > 24) {
1357                    return H_P4;
1358                }
1359                cproc = PATE1_GR | proc_tbl | table_size;
1360            } else { /* Register new HPT process table */
1361                if (flags & FLAG_HASH_PROC_TBL) { /* Hash with Segment Tables */
1362                    /* TODO - Not Supported */
1363                    /* Technically caused by flag bits => H_PARAMETER */
1364                    return H_PARAMETER;
1365                } else { /* Hash with SLB */
1366                    if (proc_tbl >> 38) {
1367                        return H_P2;
1368                    } else if (page_size & ~0x7) {
1369                        return H_P3;
1370                    } else if (table_size > 24) {
1371                        return H_P4;
1372                    }
1373                }
1374                cproc = (proc_tbl << 25) | page_size << 5 | table_size;
1375            }
1376
1377        } else { /* Deregister current process table */
1378            /*
1379             * Set to benign value: (current GR) | 0. This allows
1380             * deregistration in KVM to succeed even if the radix bit
1381             * in flags doesn't match the radix bit in the old PATE.
1382             */
1383            cproc = spapr->patb_entry & PATE1_GR;
1384        }
1385    } else { /* Maintain current registration */
1386        if (!(flags & FLAG_RADIX) != !(spapr->patb_entry & PATE1_GR)) {
1387            /* Technically caused by flag bits => H_PARAMETER */
1388            return H_PARAMETER; /* Existing Process Table Mismatch */
1389        }
1390        cproc = spapr->patb_entry;
1391    }
1392
1393    /* Check if we need to setup OR free the hpt */
1394    spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc);
1395
1396    spapr->patb_entry = cproc; /* Save new process table */
1397
1398    /* Update the UPRT, HR and GTSE bits in the LPCR for all cpus */
1399    if (flags & FLAG_RADIX)     /* Radix must use process tables, also set HR */
1400        update_lpcr |= (LPCR_UPRT | LPCR_HR);
1401    else if (flags & FLAG_HASH_PROC_TBL) /* Hash with process tables */
1402        update_lpcr |= LPCR_UPRT;
1403    if (flags & FLAG_GTSE)      /* Guest translation shootdown enable */
1404        update_lpcr |= LPCR_GTSE;
1405
1406    spapr_set_all_lpcrs(update_lpcr, LPCR_UPRT | LPCR_HR | LPCR_GTSE);
1407
1408    if (kvm_enabled()) {
1409        return kvmppc_configure_v3_mmu(cpu, flags & FLAG_RADIX,
1410                                       flags & FLAG_GTSE, cproc);
1411    }
1412    return H_SUCCESS;
1413}
1414
1415#define H_SIGNAL_SYS_RESET_ALL         -1
1416#define H_SIGNAL_SYS_RESET_ALLBUTSELF  -2
1417
1418static target_ulong h_signal_sys_reset(PowerPCCPU *cpu,
1419                                       SpaprMachineState *spapr,
1420                                       target_ulong opcode, target_ulong *args)
1421{
1422    target_long target = args[0];
1423    CPUState *cs;
1424
1425    if (target < 0) {
1426        /* Broadcast */
1427        if (target < H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1428            return H_PARAMETER;
1429        }
1430
1431        CPU_FOREACH(cs) {
1432            PowerPCCPU *c = POWERPC_CPU(cs);
1433
1434            if (target == H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1435                if (c == cpu) {
1436                    continue;
1437                }
1438            }
1439            run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1440        }
1441        return H_SUCCESS;
1442
1443    } else {
1444        /* Unicast */
1445        cs = CPU(spapr_find_cpu(target));
1446        if (cs) {
1447            run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1448            return H_SUCCESS;
1449        }
1450        return H_PARAMETER;
1451    }
1452}
1453
1454static uint32_t cas_check_pvr(SpaprMachineState *spapr, PowerPCCPU *cpu,
1455                              target_ulong *addr, bool *raw_mode_supported,
1456                              Error **errp)
1457{
1458    bool explicit_match = false; /* Matched the CPU's real PVR */
1459    uint32_t max_compat = spapr->max_compat_pvr;
1460    uint32_t best_compat = 0;
1461    int i;
1462
1463    /*
1464     * We scan the supplied table of PVRs looking for two things
1465     *   1. Is our real CPU PVR in the list?
1466     *   2. What's the "best" listed logical PVR
1467     */
1468    for (i = 0; i < 512; ++i) {
1469        uint32_t pvr, pvr_mask;
1470
1471        pvr_mask = ldl_be_phys(&address_space_memory, *addr);
1472        pvr = ldl_be_phys(&address_space_memory, *addr + 4);
1473        *addr += 8;
1474
1475        if (~pvr_mask & pvr) {
1476            break; /* Terminator record */
1477        }
1478
1479        if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) {
1480            explicit_match = true;
1481        } else {
1482            if (ppc_check_compat(cpu, pvr, best_compat, max_compat)) {
1483                best_compat = pvr;
1484            }
1485        }
1486    }
1487
1488    if ((best_compat == 0) && (!explicit_match || max_compat)) {
1489        /* We couldn't find a suitable compatibility mode, and either
1490         * the guest doesn't support "raw" mode for this CPU, or raw
1491         * mode is disabled because a maximum compat mode is set */
1492        error_setg(errp, "Couldn't negotiate a suitable PVR during CAS");
1493        return 0;
1494    }
1495
1496    *raw_mode_supported = explicit_match;
1497
1498    /* Parsing finished */
1499    trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat);
1500
1501    return best_compat;
1502}
1503
1504static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
1505                                                  SpaprMachineState *spapr,
1506                                                  target_ulong opcode,
1507                                                  target_ulong *args)
1508{
1509    /* Working address in data buffer */
1510    target_ulong addr = ppc64_phys_to_real(args[0]);
1511    target_ulong ov_table;
1512    uint32_t cas_pvr;
1513    SpaprOptionVector *ov1_guest, *ov5_guest, *ov5_cas_old, *ov5_updates;
1514    bool guest_radix;
1515    Error *local_err = NULL;
1516    bool raw_mode_supported = false;
1517    bool guest_xive;
1518
1519    cas_pvr = cas_check_pvr(spapr, cpu, &addr, &raw_mode_supported, &local_err);
1520    if (local_err) {
1521        error_report_err(local_err);
1522        return H_HARDWARE;
1523    }
1524
1525    /* Update CPUs */
1526    if (cpu->compat_pvr != cas_pvr) {
1527        ppc_set_compat_all(cas_pvr, &local_err);
1528        if (local_err) {
1529            /* We fail to set compat mode (likely because running with KVM PR),
1530             * but maybe we can fallback to raw mode if the guest supports it.
1531             */
1532            if (!raw_mode_supported) {
1533                error_report_err(local_err);
1534                return H_HARDWARE;
1535            }
1536            error_free(local_err);
1537            local_err = NULL;
1538        }
1539    }
1540
1541    /* For the future use: here @ov_table points to the first option vector */
1542    ov_table = addr;
1543
1544    ov1_guest = spapr_ovec_parse_vector(ov_table, 1);
1545    ov5_guest = spapr_ovec_parse_vector(ov_table, 5);
1546    if (spapr_ovec_test(ov5_guest, OV5_MMU_BOTH)) {
1547        error_report("guest requested hash and radix MMU, which is invalid.");
1548        exit(EXIT_FAILURE);
1549    }
1550    if (spapr_ovec_test(ov5_guest, OV5_XIVE_BOTH)) {
1551        error_report("guest requested an invalid interrupt mode");
1552        exit(EXIT_FAILURE);
1553    }
1554
1555    /* The radix/hash bit in byte 24 requires special handling: */
1556    guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300);
1557    spapr_ovec_clear(ov5_guest, OV5_MMU_RADIX_300);
1558
1559    guest_xive = spapr_ovec_test(ov5_guest, OV5_XIVE_EXPLOIT);
1560
1561    /*
1562     * HPT resizing is a bit of a special case, because when enabled
1563     * we assume an HPT guest will support it until it says it
1564     * doesn't, instead of assuming it won't support it until it says
1565     * it does.  Strictly speaking that approach could break for
1566     * guests which don't make a CAS call, but those are so old we
1567     * don't care about them.  Without that assumption we'd have to
1568     * make at least a temporary allocation of an HPT sized for max
1569     * memory, which could be impossibly difficult under KVM HV if
1570     * maxram is large.
1571     */
1572    if (!guest_radix && !spapr_ovec_test(ov5_guest, OV5_HPT_RESIZE)) {
1573        int maxshift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1574
1575        if (spapr->resize_hpt == SPAPR_RESIZE_HPT_REQUIRED) {
1576            error_report(
1577                "h_client_architecture_support: Guest doesn't support HPT resizing, but resize-hpt=required");
1578            exit(1);
1579        }
1580
1581        if (spapr->htab_shift < maxshift) {
1582            /* Guest doesn't know about HPT resizing, so we
1583             * pre-emptively resize for the maximum permitted RAM.  At
1584             * the point this is called, nothing should have been
1585             * entered into the existing HPT */
1586            spapr_reallocate_hpt(spapr, maxshift, &error_fatal);
1587            push_sregs_to_kvm_pr(spapr);
1588        }
1589    }
1590
1591    /* NOTE: there are actually a number of ov5 bits where input from the
1592     * guest is always zero, and the platform/QEMU enables them independently
1593     * of guest input. To model these properly we'd want some sort of mask,
1594     * but since they only currently apply to memory migration as defined
1595     * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
1596     * to worry about this for now.
1597     */
1598    ov5_cas_old = spapr_ovec_clone(spapr->ov5_cas);
1599
1600    /* also clear the radix/hash bit from the current ov5_cas bits to
1601     * be in sync with the newly ov5 bits. Else the radix bit will be
1602     * seen as being removed and this will generate a reset loop
1603     */
1604    spapr_ovec_clear(ov5_cas_old, OV5_MMU_RADIX_300);
1605
1606    /* full range of negotiated ov5 capabilities */
1607    spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest);
1608    spapr_ovec_cleanup(ov5_guest);
1609    /* capabilities that have been added since CAS-generated guest reset.
1610     * if capabilities have since been removed, generate another reset
1611     */
1612    ov5_updates = spapr_ovec_new();
1613    spapr->cas_reboot = spapr_ovec_diff(ov5_updates,
1614                                        ov5_cas_old, spapr->ov5_cas);
1615    /* Now that processing is finished, set the radix/hash bit for the
1616     * guest if it requested a valid mode; otherwise terminate the boot. */
1617    if (guest_radix) {
1618        if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1619            error_report("Guest requested unavailable MMU mode (radix).");
1620            exit(EXIT_FAILURE);
1621        }
1622        spapr_ovec_set(spapr->ov5_cas, OV5_MMU_RADIX_300);
1623    } else {
1624        if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1625            && !kvmppc_has_cap_mmu_hash_v3()) {
1626            error_report("Guest requested unavailable MMU mode (hash).");
1627            exit(EXIT_FAILURE);
1628        }
1629    }
1630    spapr->cas_legacy_guest_workaround = !spapr_ovec_test(ov1_guest,
1631                                                          OV1_PPC_3_00);
1632    if (!spapr->cas_reboot) {
1633        /* If spapr_machine_reset() did not set up a HPT but one is necessary
1634         * (because the guest isn't going to use radix) then set it up here. */
1635        if ((spapr->patb_entry & PATE1_GR) && !guest_radix) {
1636            /* legacy hash or new hash: */
1637            spapr_setup_hpt_and_vrma(spapr);
1638        }
1639        spapr->cas_reboot =
1640            (spapr_h_cas_compose_response(spapr, args[1], args[2],
1641                                          ov5_updates) != 0);
1642    }
1643
1644    /*
1645     * Ensure the guest asks for an interrupt mode we support; otherwise
1646     * terminate the boot.
1647     */
1648    if (guest_xive) {
1649        if (spapr->irq->ov5 == SPAPR_OV5_XIVE_LEGACY) {
1650            error_report(
1651"Guest requested unavailable interrupt mode (XIVE), try the ic-mode=xive or ic-mode=dual machine property");
1652            exit(EXIT_FAILURE);
1653        }
1654    } else {
1655        if (spapr->irq->ov5 == SPAPR_OV5_XIVE_EXPLOIT) {
1656            error_report(
1657"Guest requested unavailable interrupt mode (XICS), either don't set the ic-mode machine property or try ic-mode=xics or ic-mode=dual");
1658            exit(EXIT_FAILURE);
1659        }
1660    }
1661
1662    /*
1663     * Generate a machine reset when we have an update of the
1664     * interrupt mode. Only required when the machine supports both
1665     * modes.
1666     */
1667    if (!spapr->cas_reboot) {
1668        spapr->cas_reboot = spapr_ovec_test(ov5_updates, OV5_XIVE_EXPLOIT)
1669            && spapr->irq->ov5 & SPAPR_OV5_XIVE_BOTH;
1670    }
1671
1672    spapr_ovec_cleanup(ov5_updates);
1673
1674    if (spapr->cas_reboot) {
1675        qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1676    }
1677
1678    return H_SUCCESS;
1679}
1680
1681static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
1682                                              SpaprMachineState *spapr,
1683                                              target_ulong opcode,
1684                                              target_ulong *args)
1685{
1686    target_ulong flags = args[0];
1687    target_ulong procno = args[1];
1688    PowerPCCPU *tcpu;
1689    int idx;
1690
1691    /* only support procno from H_REGISTER_VPA */
1692    if (flags != 0x1) {
1693        return H_FUNCTION;
1694    }
1695
1696    tcpu = spapr_find_cpu(procno);
1697    if (tcpu == NULL) {
1698        return H_P2;
1699    }
1700
1701    /* sequence is the same as in the "ibm,associativity" property */
1702
1703    idx = 0;
1704#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
1705                             ((uint64_t)(b) & 0xffffffff))
1706    args[idx++] = ASSOCIATIVITY(0, 0);
1707    args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
1708    args[idx++] = ASSOCIATIVITY(procno, -1);
1709    for ( ; idx < 6; idx++) {
1710        args[idx] = -1;
1711    }
1712#undef ASSOCIATIVITY
1713
1714    return H_SUCCESS;
1715}
1716
1717static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu,
1718                                              SpaprMachineState *spapr,
1719                                              target_ulong opcode,
1720                                              target_ulong *args)
1721{
1722    uint64_t characteristics = H_CPU_CHAR_HON_BRANCH_HINTS &
1723                               ~H_CPU_CHAR_THR_RECONF_TRIG;
1724    uint64_t behaviour = H_CPU_BEHAV_FAVOUR_SECURITY;
1725    uint8_t safe_cache = spapr_get_cap(spapr, SPAPR_CAP_CFPC);
1726    uint8_t safe_bounds_check = spapr_get_cap(spapr, SPAPR_CAP_SBBC);
1727    uint8_t safe_indirect_branch = spapr_get_cap(spapr, SPAPR_CAP_IBS);
1728    uint8_t count_cache_flush_assist = spapr_get_cap(spapr,
1729                                                     SPAPR_CAP_CCF_ASSIST);
1730
1731    switch (safe_cache) {
1732    case SPAPR_CAP_WORKAROUND:
1733        characteristics |= H_CPU_CHAR_L1D_FLUSH_ORI30;
1734        characteristics |= H_CPU_CHAR_L1D_FLUSH_TRIG2;
1735        characteristics |= H_CPU_CHAR_L1D_THREAD_PRIV;
1736        behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR;
1737        break;
1738    case SPAPR_CAP_FIXED:
1739        break;
1740    default: /* broken */
1741        assert(safe_cache == SPAPR_CAP_BROKEN);
1742        behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR;
1743        break;
1744    }
1745
1746    switch (safe_bounds_check) {
1747    case SPAPR_CAP_WORKAROUND:
1748        characteristics |= H_CPU_CHAR_SPEC_BAR_ORI31;
1749        behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1750        break;
1751    case SPAPR_CAP_FIXED:
1752        break;
1753    default: /* broken */
1754        assert(safe_bounds_check == SPAPR_CAP_BROKEN);
1755        behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1756        break;
1757    }
1758
1759    switch (safe_indirect_branch) {
1760    case SPAPR_CAP_FIXED_NA:
1761        break;
1762    case SPAPR_CAP_FIXED_CCD:
1763        characteristics |= H_CPU_CHAR_CACHE_COUNT_DIS;
1764        break;
1765    case SPAPR_CAP_FIXED_IBS:
1766        characteristics |= H_CPU_CHAR_BCCTRL_SERIALISED;
1767        break;
1768    case SPAPR_CAP_WORKAROUND:
1769        behaviour |= H_CPU_BEHAV_FLUSH_COUNT_CACHE;
1770        if (count_cache_flush_assist) {
1771            characteristics |= H_CPU_CHAR_BCCTR_FLUSH_ASSIST;
1772        }
1773        break;
1774    default: /* broken */
1775        assert(safe_indirect_branch == SPAPR_CAP_BROKEN);
1776        break;
1777    }
1778
1779    args[0] = characteristics;
1780    args[1] = behaviour;
1781    return H_SUCCESS;
1782}
1783
1784static target_ulong h_update_dt(PowerPCCPU *cpu, SpaprMachineState *spapr,
1785                                target_ulong opcode, target_ulong *args)
1786{
1787    target_ulong dt = ppc64_phys_to_real(args[0]);
1788    struct fdt_header hdr = { 0 };
1789    unsigned cb;
1790    SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
1791    void *fdt;
1792
1793    cpu_physical_memory_read(dt, &hdr, sizeof(hdr));
1794    cb = fdt32_to_cpu(hdr.totalsize);
1795
1796    if (!smc->update_dt_enabled) {
1797        return H_SUCCESS;
1798    }
1799
1800    /* Check that the fdt did not grow out of proportion */
1801    if (cb > spapr->fdt_initial_size * 2) {
1802        trace_spapr_update_dt_failed_size(spapr->fdt_initial_size, cb,
1803                                          fdt32_to_cpu(hdr.magic));
1804        return H_PARAMETER;
1805    }
1806
1807    fdt = g_malloc0(cb);
1808    cpu_physical_memory_read(dt, fdt, cb);
1809
1810    /* Check the fdt consistency */
1811    if (fdt_check_full(fdt, cb)) {
1812        trace_spapr_update_dt_failed_check(spapr->fdt_initial_size, cb,
1813                                           fdt32_to_cpu(hdr.magic));
1814        return H_PARAMETER;
1815    }
1816
1817    g_free(spapr->fdt_blob);
1818    spapr->fdt_size = cb;
1819    spapr->fdt_blob = fdt;
1820    trace_spapr_update_dt(cb);
1821
1822    return H_SUCCESS;
1823}
1824
1825static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1826static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1827
1828void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1829{
1830    spapr_hcall_fn *slot;
1831
1832    if (opcode <= MAX_HCALL_OPCODE) {
1833        assert((opcode & 0x3) == 0);
1834
1835        slot = &papr_hypercall_table[opcode / 4];
1836    } else {
1837        assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
1838
1839        slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1840    }
1841
1842    assert(!(*slot));
1843    *slot = fn;
1844}
1845
1846target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
1847                             target_ulong *args)
1848{
1849    SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1850
1851    if ((opcode <= MAX_HCALL_OPCODE)
1852        && ((opcode & 0x3) == 0)) {
1853        spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1854
1855        if (fn) {
1856            return fn(cpu, spapr, opcode, args);
1857        }
1858    } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1859               (opcode <= KVMPPC_HCALL_MAX)) {
1860        spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1861
1862        if (fn) {
1863            return fn(cpu, spapr, opcode, args);
1864        }
1865    }
1866
1867    qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1868                  opcode);
1869    return H_FUNCTION;
1870}
1871
1872static void hypercall_register_types(void)
1873{
1874    /* hcall-pft */
1875    spapr_register_hypercall(H_ENTER, h_enter);
1876    spapr_register_hypercall(H_REMOVE, h_remove);
1877    spapr_register_hypercall(H_PROTECT, h_protect);
1878    spapr_register_hypercall(H_READ, h_read);
1879
1880    /* hcall-bulk */
1881    spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
1882
1883    /* hcall-hpt-resize */
1884    spapr_register_hypercall(H_RESIZE_HPT_PREPARE, h_resize_hpt_prepare);
1885    spapr_register_hypercall(H_RESIZE_HPT_COMMIT, h_resize_hpt_commit);
1886
1887    /* hcall-splpar */
1888    spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1889    spapr_register_hypercall(H_CEDE, h_cede);
1890    spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset);
1891
1892    /* processor register resource access h-calls */
1893    spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
1894    spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1895    spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
1896    spapr_register_hypercall(H_PAGE_INIT, h_page_init);
1897    spapr_register_hypercall(H_SET_MODE, h_set_mode);
1898
1899    /* In Memory Table MMU h-calls */
1900    spapr_register_hypercall(H_CLEAN_SLB, h_clean_slb);
1901    spapr_register_hypercall(H_INVALIDATE_PID, h_invalidate_pid);
1902    spapr_register_hypercall(H_REGISTER_PROC_TBL, h_register_process_table);
1903
1904    /* hcall-get-cpu-characteristics */
1905    spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS,
1906                             h_get_cpu_characteristics);
1907
1908    /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1909     * here between the "CI" and the "CACHE" variants, they will use whatever
1910     * mapping attributes qemu is using. When using KVM, the kernel will
1911     * enforce the attributes more strongly
1912     */
1913    spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1914    spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1915    spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1916    spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1917    spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1918    spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1919    spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1920
1921    /* qemu/KVM-PPC specific hcalls */
1922    spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1923
1924    /* ibm,client-architecture-support support */
1925    spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1926
1927    spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
1928
1929    /* Virtual Processor Home Node */
1930    spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
1931                             h_home_node_associativity);
1932}
1933
1934type_init(hypercall_register_types)
1935