qemu/hw/ppc/spapr_hcall.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "qemu/cutils.h"
   3#include "qapi/error.h"
   4#include "sysemu/hw_accel.h"
   5#include "sysemu/runstate.h"
   6#include "qemu/log.h"
   7#include "qemu/main-loop.h"
   8#include "qemu/module.h"
   9#include "qemu/error-report.h"
  10#include "exec/exec-all.h"
  11#include "helper_regs.h"
  12#include "hw/ppc/spapr.h"
  13#include "hw/ppc/spapr_cpu_core.h"
  14#include "mmu-hash64.h"
  15#include "cpu-models.h"
  16#include "trace.h"
  17#include "kvm_ppc.h"
  18#include "hw/ppc/fdt.h"
  19#include "hw/ppc/spapr_ovec.h"
  20#include "hw/ppc/spapr_numa.h"
  21#include "mmu-book3s-v3.h"
  22#include "hw/mem/memory-device.h"
  23
  24bool is_ram_address(SpaprMachineState *spapr, hwaddr addr)
  25{
  26    MachineState *machine = MACHINE(spapr);
  27    DeviceMemoryState *dms = machine->device_memory;
  28
  29    if (addr < machine->ram_size) {
  30        return true;
  31    }
  32    if ((addr >= dms->base)
  33        && ((addr - dms->base) < memory_region_size(&dms->mr))) {
  34        return true;
  35    }
  36
  37    return false;
  38}
  39
  40/* Convert a return code from the KVM ioctl()s implementing resize HPT
  41 * into a PAPR hypercall return code */
  42static target_ulong resize_hpt_convert_rc(int ret)
  43{
  44    if (ret >= 100000) {
  45        return H_LONG_BUSY_ORDER_100_SEC;
  46    } else if (ret >= 10000) {
  47        return H_LONG_BUSY_ORDER_10_SEC;
  48    } else if (ret >= 1000) {
  49        return H_LONG_BUSY_ORDER_1_SEC;
  50    } else if (ret >= 100) {
  51        return H_LONG_BUSY_ORDER_100_MSEC;
  52    } else if (ret >= 10) {
  53        return H_LONG_BUSY_ORDER_10_MSEC;
  54    } else if (ret > 0) {
  55        return H_LONG_BUSY_ORDER_1_MSEC;
  56    }
  57
  58    switch (ret) {
  59    case 0:
  60        return H_SUCCESS;
  61    case -EPERM:
  62        return H_AUTHORITY;
  63    case -EINVAL:
  64        return H_PARAMETER;
  65    case -ENXIO:
  66        return H_CLOSED;
  67    case -ENOSPC:
  68        return H_PTEG_FULL;
  69    case -EBUSY:
  70        return H_BUSY;
  71    case -ENOMEM:
  72        return H_NO_MEM;
  73    default:
  74        return H_HARDWARE;
  75    }
  76}
  77
  78static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu,
  79                                         SpaprMachineState *spapr,
  80                                         target_ulong opcode,
  81                                         target_ulong *args)
  82{
  83    target_ulong flags = args[0];
  84    int shift = args[1];
  85    uint64_t current_ram_size;
  86    int rc;
  87
  88    if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
  89        return H_AUTHORITY;
  90    }
  91
  92    if (!spapr->htab_shift) {
  93        /* Radix guest, no HPT */
  94        return H_NOT_AVAILABLE;
  95    }
  96
  97    trace_spapr_h_resize_hpt_prepare(flags, shift);
  98
  99    if (flags != 0) {
 100        return H_PARAMETER;
 101    }
 102
 103    if (shift && ((shift < 18) || (shift > 46))) {
 104        return H_PARAMETER;
 105    }
 106
 107    current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
 108
 109    /* We only allow the guest to allocate an HPT one order above what
 110     * we'd normally give them (to stop a small guest claiming a huge
 111     * chunk of resources in the HPT */
 112    if (shift > (spapr_hpt_shift_for_ramsize(current_ram_size) + 1)) {
 113        return H_RESOURCE;
 114    }
 115
 116    rc = kvmppc_resize_hpt_prepare(cpu, flags, shift);
 117    if (rc != -ENOSYS) {
 118        return resize_hpt_convert_rc(rc);
 119    }
 120
 121    if (kvm_enabled()) {
 122        return H_HARDWARE;
 123    }
 124
 125    return softmmu_resize_hpt_prepare(cpu, spapr, shift);
 126}
 127
 128static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data)
 129{
 130    int ret;
 131
 132    cpu_synchronize_state(cs);
 133
 134    ret = kvmppc_put_books_sregs(POWERPC_CPU(cs));
 135    if (ret < 0) {
 136        error_report("failed to push sregs to KVM: %s", strerror(-ret));
 137        exit(1);
 138    }
 139}
 140
 141void push_sregs_to_kvm_pr(SpaprMachineState *spapr)
 142{
 143    CPUState *cs;
 144
 145    /*
 146     * This is a hack for the benefit of KVM PR - it abuses the SDR1
 147     * slot in kvm_sregs to communicate the userspace address of the
 148     * HPT
 149     */
 150    if (!kvm_enabled() || !spapr->htab) {
 151        return;
 152    }
 153
 154    CPU_FOREACH(cs) {
 155        run_on_cpu(cs, do_push_sregs_to_kvm_pr, RUN_ON_CPU_NULL);
 156    }
 157}
 158
 159static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu,
 160                                        SpaprMachineState *spapr,
 161                                        target_ulong opcode,
 162                                        target_ulong *args)
 163{
 164    target_ulong flags = args[0];
 165    target_ulong shift = args[1];
 166    int rc;
 167
 168    if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
 169        return H_AUTHORITY;
 170    }
 171
 172    if (!spapr->htab_shift) {
 173        /* Radix guest, no HPT */
 174        return H_NOT_AVAILABLE;
 175    }
 176
 177    trace_spapr_h_resize_hpt_commit(flags, shift);
 178
 179    rc = kvmppc_resize_hpt_commit(cpu, flags, shift);
 180    if (rc != -ENOSYS) {
 181        rc = resize_hpt_convert_rc(rc);
 182        if (rc == H_SUCCESS) {
 183            /* Need to set the new htab_shift in the machine state */
 184            spapr->htab_shift = shift;
 185        }
 186        return rc;
 187    }
 188
 189    if (kvm_enabled()) {
 190        return H_HARDWARE;
 191    }
 192
 193    return softmmu_resize_hpt_commit(cpu, spapr, flags, shift);
 194}
 195
 196
 197
 198static target_ulong h_set_sprg0(PowerPCCPU *cpu, SpaprMachineState *spapr,
 199                                target_ulong opcode, target_ulong *args)
 200{
 201    cpu_synchronize_state(CPU(cpu));
 202    cpu->env.spr[SPR_SPRG0] = args[0];
 203
 204    return H_SUCCESS;
 205}
 206
 207static target_ulong h_set_dabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
 208                               target_ulong opcode, target_ulong *args)
 209{
 210    if (!ppc_has_spr(cpu, SPR_DABR)) {
 211        return H_HARDWARE;              /* DABR register not available */
 212    }
 213    cpu_synchronize_state(CPU(cpu));
 214
 215    if (ppc_has_spr(cpu, SPR_DABRX)) {
 216        cpu->env.spr[SPR_DABRX] = 0x3;  /* Use Problem and Privileged state */
 217    } else if (!(args[0] & 0x4)) {      /* Breakpoint Translation set? */
 218        return H_RESERVED_DABR;
 219    }
 220
 221    cpu->env.spr[SPR_DABR] = args[0];
 222    return H_SUCCESS;
 223}
 224
 225static target_ulong h_set_xdabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
 226                                target_ulong opcode, target_ulong *args)
 227{
 228    target_ulong dabrx = args[1];
 229
 230    if (!ppc_has_spr(cpu, SPR_DABR) || !ppc_has_spr(cpu, SPR_DABRX)) {
 231        return H_HARDWARE;
 232    }
 233
 234    if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
 235        || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
 236        return H_PARAMETER;
 237    }
 238
 239    cpu_synchronize_state(CPU(cpu));
 240    cpu->env.spr[SPR_DABRX] = dabrx;
 241    cpu->env.spr[SPR_DABR] = args[0];
 242
 243    return H_SUCCESS;
 244}
 245
 246static target_ulong h_page_init(PowerPCCPU *cpu, SpaprMachineState *spapr,
 247                                target_ulong opcode, target_ulong *args)
 248{
 249    target_ulong flags = args[0];
 250    hwaddr dst = args[1];
 251    hwaddr src = args[2];
 252    hwaddr len = TARGET_PAGE_SIZE;
 253    uint8_t *pdst, *psrc;
 254    target_long ret = H_SUCCESS;
 255
 256    if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
 257                  | H_COPY_PAGE | H_ZERO_PAGE)) {
 258        qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
 259                      flags);
 260        return H_PARAMETER;
 261    }
 262
 263    /* Map-in destination */
 264    if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
 265        return H_PARAMETER;
 266    }
 267    pdst = cpu_physical_memory_map(dst, &len, true);
 268    if (!pdst || len != TARGET_PAGE_SIZE) {
 269        return H_PARAMETER;
 270    }
 271
 272    if (flags & H_COPY_PAGE) {
 273        /* Map-in source, copy to destination, and unmap source again */
 274        if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
 275            ret = H_PARAMETER;
 276            goto unmap_out;
 277        }
 278        psrc = cpu_physical_memory_map(src, &len, false);
 279        if (!psrc || len != TARGET_PAGE_SIZE) {
 280            ret = H_PARAMETER;
 281            goto unmap_out;
 282        }
 283        memcpy(pdst, psrc, len);
 284        cpu_physical_memory_unmap(psrc, len, 0, len);
 285    } else if (flags & H_ZERO_PAGE) {
 286        memset(pdst, 0, len);          /* Just clear the destination page */
 287    }
 288
 289    if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
 290        kvmppc_dcbst_range(cpu, pdst, len);
 291    }
 292    if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
 293        if (kvm_enabled()) {
 294            kvmppc_icbi_range(cpu, pdst, len);
 295        } else {
 296            tb_flush(CPU(cpu));
 297        }
 298    }
 299
 300unmap_out:
 301    cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
 302    return ret;
 303}
 304
 305#define FLAGS_REGISTER_VPA         0x0000200000000000ULL
 306#define FLAGS_REGISTER_DTL         0x0000400000000000ULL
 307#define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
 308#define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
 309#define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
 310#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
 311
 312static target_ulong register_vpa(PowerPCCPU *cpu, target_ulong vpa)
 313{
 314    CPUState *cs = CPU(cpu);
 315    CPUPPCState *env = &cpu->env;
 316    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 317    uint16_t size;
 318    uint8_t tmp;
 319
 320    if (vpa == 0) {
 321        hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
 322        return H_HARDWARE;
 323    }
 324
 325    if (vpa % env->dcache_line_size) {
 326        return H_PARAMETER;
 327    }
 328    /* FIXME: bounds check the address */
 329
 330    size = lduw_be_phys(cs->as, vpa + 0x4);
 331
 332    if (size < VPA_MIN_SIZE) {
 333        return H_PARAMETER;
 334    }
 335
 336    /* VPA is not allowed to cross a page boundary */
 337    if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
 338        return H_PARAMETER;
 339    }
 340
 341    spapr_cpu->vpa_addr = vpa;
 342
 343    tmp = ldub_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET);
 344    tmp |= VPA_SHARED_PROC_VAL;
 345    stb_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
 346
 347    return H_SUCCESS;
 348}
 349
 350static target_ulong deregister_vpa(PowerPCCPU *cpu, target_ulong vpa)
 351{
 352    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 353
 354    if (spapr_cpu->slb_shadow_addr) {
 355        return H_RESOURCE;
 356    }
 357
 358    if (spapr_cpu->dtl_addr) {
 359        return H_RESOURCE;
 360    }
 361
 362    spapr_cpu->vpa_addr = 0;
 363    return H_SUCCESS;
 364}
 365
 366static target_ulong register_slb_shadow(PowerPCCPU *cpu, target_ulong addr)
 367{
 368    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 369    uint32_t size;
 370
 371    if (addr == 0) {
 372        hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
 373        return H_HARDWARE;
 374    }
 375
 376    size = ldl_be_phys(CPU(cpu)->as, addr + 0x4);
 377    if (size < 0x8) {
 378        return H_PARAMETER;
 379    }
 380
 381    if ((addr / 4096) != ((addr + size - 1) / 4096)) {
 382        return H_PARAMETER;
 383    }
 384
 385    if (!spapr_cpu->vpa_addr) {
 386        return H_RESOURCE;
 387    }
 388
 389    spapr_cpu->slb_shadow_addr = addr;
 390    spapr_cpu->slb_shadow_size = size;
 391
 392    return H_SUCCESS;
 393}
 394
 395static target_ulong deregister_slb_shadow(PowerPCCPU *cpu, target_ulong addr)
 396{
 397    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 398
 399    spapr_cpu->slb_shadow_addr = 0;
 400    spapr_cpu->slb_shadow_size = 0;
 401    return H_SUCCESS;
 402}
 403
 404static target_ulong register_dtl(PowerPCCPU *cpu, target_ulong addr)
 405{
 406    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 407    uint32_t size;
 408
 409    if (addr == 0) {
 410        hcall_dprintf("Can't cope with DTL at logical 0\n");
 411        return H_HARDWARE;
 412    }
 413
 414    size = ldl_be_phys(CPU(cpu)->as, addr + 0x4);
 415
 416    if (size < 48) {
 417        return H_PARAMETER;
 418    }
 419
 420    if (!spapr_cpu->vpa_addr) {
 421        return H_RESOURCE;
 422    }
 423
 424    spapr_cpu->dtl_addr = addr;
 425    spapr_cpu->dtl_size = size;
 426
 427    return H_SUCCESS;
 428}
 429
 430static target_ulong deregister_dtl(PowerPCCPU *cpu, target_ulong addr)
 431{
 432    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 433
 434    spapr_cpu->dtl_addr = 0;
 435    spapr_cpu->dtl_size = 0;
 436
 437    return H_SUCCESS;
 438}
 439
 440static target_ulong h_register_vpa(PowerPCCPU *cpu, SpaprMachineState *spapr,
 441                                   target_ulong opcode, target_ulong *args)
 442{
 443    target_ulong flags = args[0];
 444    target_ulong procno = args[1];
 445    target_ulong vpa = args[2];
 446    target_ulong ret = H_PARAMETER;
 447    PowerPCCPU *tcpu;
 448
 449    tcpu = spapr_find_cpu(procno);
 450    if (!tcpu) {
 451        return H_PARAMETER;
 452    }
 453
 454    switch (flags) {
 455    case FLAGS_REGISTER_VPA:
 456        ret = register_vpa(tcpu, vpa);
 457        break;
 458
 459    case FLAGS_DEREGISTER_VPA:
 460        ret = deregister_vpa(tcpu, vpa);
 461        break;
 462
 463    case FLAGS_REGISTER_SLBSHADOW:
 464        ret = register_slb_shadow(tcpu, vpa);
 465        break;
 466
 467    case FLAGS_DEREGISTER_SLBSHADOW:
 468        ret = deregister_slb_shadow(tcpu, vpa);
 469        break;
 470
 471    case FLAGS_REGISTER_DTL:
 472        ret = register_dtl(tcpu, vpa);
 473        break;
 474
 475    case FLAGS_DEREGISTER_DTL:
 476        ret = deregister_dtl(tcpu, vpa);
 477        break;
 478    }
 479
 480    return ret;
 481}
 482
 483static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr,
 484                           target_ulong opcode, target_ulong *args)
 485{
 486    CPUPPCState *env = &cpu->env;
 487    CPUState *cs = CPU(cpu);
 488    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 489
 490    env->msr |= (1ULL << MSR_EE);
 491    hreg_compute_hflags(env);
 492
 493    if (spapr_cpu->prod) {
 494        spapr_cpu->prod = false;
 495        return H_SUCCESS;
 496    }
 497
 498    if (!cpu_has_work(cs)) {
 499        cs->halted = 1;
 500        cs->exception_index = EXCP_HLT;
 501        cs->exit_request = 1;
 502    }
 503
 504    return H_SUCCESS;
 505}
 506
 507/*
 508 * Confer to self, aka join. Cede could use the same pattern as well, if
 509 * EXCP_HLT can be changed to ECXP_HALTED.
 510 */
 511static target_ulong h_confer_self(PowerPCCPU *cpu)
 512{
 513    CPUState *cs = CPU(cpu);
 514    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 515
 516    if (spapr_cpu->prod) {
 517        spapr_cpu->prod = false;
 518        return H_SUCCESS;
 519    }
 520    cs->halted = 1;
 521    cs->exception_index = EXCP_HALTED;
 522    cs->exit_request = 1;
 523
 524    return H_SUCCESS;
 525}
 526
 527static target_ulong h_join(PowerPCCPU *cpu, SpaprMachineState *spapr,
 528                           target_ulong opcode, target_ulong *args)
 529{
 530    CPUPPCState *env = &cpu->env;
 531    CPUState *cs;
 532    bool last_unjoined = true;
 533
 534    if (env->msr & (1ULL << MSR_EE)) {
 535        return H_BAD_MODE;
 536    }
 537
 538    /*
 539     * Must not join the last CPU running. Interestingly, no such restriction
 540     * for H_CONFER-to-self, but that is probably not intended to be used
 541     * when H_JOIN is available.
 542     */
 543    CPU_FOREACH(cs) {
 544        PowerPCCPU *c = POWERPC_CPU(cs);
 545        CPUPPCState *e = &c->env;
 546        if (c == cpu) {
 547            continue;
 548        }
 549
 550        /* Don't have a way to indicate joined, so use halted && MSR[EE]=0 */
 551        if (!cs->halted || (e->msr & (1ULL << MSR_EE))) {
 552            last_unjoined = false;
 553            break;
 554        }
 555    }
 556    if (last_unjoined) {
 557        return H_CONTINUE;
 558    }
 559
 560    return h_confer_self(cpu);
 561}
 562
 563static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr,
 564                           target_ulong opcode, target_ulong *args)
 565{
 566    target_long target = args[0];
 567    uint32_t dispatch = args[1];
 568    CPUState *cs = CPU(cpu);
 569    SpaprCpuState *spapr_cpu;
 570
 571    /*
 572     * -1 means confer to all other CPUs without dispatch counter check,
 573     *  otherwise it's a targeted confer.
 574     */
 575    if (target != -1) {
 576        PowerPCCPU *target_cpu = spapr_find_cpu(target);
 577        uint32_t target_dispatch;
 578
 579        if (!target_cpu) {
 580            return H_PARAMETER;
 581        }
 582
 583        /*
 584         * target == self is a special case, we wait until prodded, without
 585         * dispatch counter check.
 586         */
 587        if (cpu == target_cpu) {
 588            return h_confer_self(cpu);
 589        }
 590
 591        spapr_cpu = spapr_cpu_state(target_cpu);
 592        if (!spapr_cpu->vpa_addr || ((dispatch & 1) == 0)) {
 593            return H_SUCCESS;
 594        }
 595
 596        target_dispatch = ldl_be_phys(cs->as,
 597                                  spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
 598        if (target_dispatch != dispatch) {
 599            return H_SUCCESS;
 600        }
 601
 602        /*
 603         * The targeted confer does not do anything special beyond yielding
 604         * the current vCPU, but even this should be better than nothing.
 605         * At least for single-threaded tcg, it gives the target a chance to
 606         * run before we run again. Multi-threaded tcg does not really do
 607         * anything with EXCP_YIELD yet.
 608         */
 609    }
 610
 611    cs->exception_index = EXCP_YIELD;
 612    cs->exit_request = 1;
 613    cpu_loop_exit(cs);
 614
 615    return H_SUCCESS;
 616}
 617
 618static target_ulong h_prod(PowerPCCPU *cpu, SpaprMachineState *spapr,
 619                           target_ulong opcode, target_ulong *args)
 620{
 621    target_long target = args[0];
 622    PowerPCCPU *tcpu;
 623    CPUState *cs;
 624    SpaprCpuState *spapr_cpu;
 625
 626    tcpu = spapr_find_cpu(target);
 627    cs = CPU(tcpu);
 628    if (!cs) {
 629        return H_PARAMETER;
 630    }
 631
 632    spapr_cpu = spapr_cpu_state(tcpu);
 633    spapr_cpu->prod = true;
 634    cs->halted = 0;
 635    qemu_cpu_kick(cs);
 636
 637    return H_SUCCESS;
 638}
 639
 640static target_ulong h_rtas(PowerPCCPU *cpu, SpaprMachineState *spapr,
 641                           target_ulong opcode, target_ulong *args)
 642{
 643    target_ulong rtas_r3 = args[0];
 644    uint32_t token = rtas_ld(rtas_r3, 0);
 645    uint32_t nargs = rtas_ld(rtas_r3, 1);
 646    uint32_t nret = rtas_ld(rtas_r3, 2);
 647
 648    return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
 649                           nret, rtas_r3 + 12 + 4*nargs);
 650}
 651
 652static target_ulong h_logical_load(PowerPCCPU *cpu, SpaprMachineState *spapr,
 653                                   target_ulong opcode, target_ulong *args)
 654{
 655    CPUState *cs = CPU(cpu);
 656    target_ulong size = args[0];
 657    target_ulong addr = args[1];
 658
 659    switch (size) {
 660    case 1:
 661        args[0] = ldub_phys(cs->as, addr);
 662        return H_SUCCESS;
 663    case 2:
 664        args[0] = lduw_phys(cs->as, addr);
 665        return H_SUCCESS;
 666    case 4:
 667        args[0] = ldl_phys(cs->as, addr);
 668        return H_SUCCESS;
 669    case 8:
 670        args[0] = ldq_phys(cs->as, addr);
 671        return H_SUCCESS;
 672    }
 673    return H_PARAMETER;
 674}
 675
 676static target_ulong h_logical_store(PowerPCCPU *cpu, SpaprMachineState *spapr,
 677                                    target_ulong opcode, target_ulong *args)
 678{
 679    CPUState *cs = CPU(cpu);
 680
 681    target_ulong size = args[0];
 682    target_ulong addr = args[1];
 683    target_ulong val  = args[2];
 684
 685    switch (size) {
 686    case 1:
 687        stb_phys(cs->as, addr, val);
 688        return H_SUCCESS;
 689    case 2:
 690        stw_phys(cs->as, addr, val);
 691        return H_SUCCESS;
 692    case 4:
 693        stl_phys(cs->as, addr, val);
 694        return H_SUCCESS;
 695    case 8:
 696        stq_phys(cs->as, addr, val);
 697        return H_SUCCESS;
 698    }
 699    return H_PARAMETER;
 700}
 701
 702static target_ulong h_logical_memop(PowerPCCPU *cpu, SpaprMachineState *spapr,
 703                                    target_ulong opcode, target_ulong *args)
 704{
 705    CPUState *cs = CPU(cpu);
 706
 707    target_ulong dst   = args[0]; /* Destination address */
 708    target_ulong src   = args[1]; /* Source address */
 709    target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
 710    target_ulong count = args[3]; /* Element count */
 711    target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
 712    uint64_t tmp;
 713    unsigned int mask = (1 << esize) - 1;
 714    int step = 1 << esize;
 715
 716    if (count > 0x80000000) {
 717        return H_PARAMETER;
 718    }
 719
 720    if ((dst & mask) || (src & mask) || (op > 1)) {
 721        return H_PARAMETER;
 722    }
 723
 724    if (dst >= src && dst < (src + (count << esize))) {
 725            dst = dst + ((count - 1) << esize);
 726            src = src + ((count - 1) << esize);
 727            step = -step;
 728    }
 729
 730    while (count--) {
 731        switch (esize) {
 732        case 0:
 733            tmp = ldub_phys(cs->as, src);
 734            break;
 735        case 1:
 736            tmp = lduw_phys(cs->as, src);
 737            break;
 738        case 2:
 739            tmp = ldl_phys(cs->as, src);
 740            break;
 741        case 3:
 742            tmp = ldq_phys(cs->as, src);
 743            break;
 744        default:
 745            return H_PARAMETER;
 746        }
 747        if (op == 1) {
 748            tmp = ~tmp;
 749        }
 750        switch (esize) {
 751        case 0:
 752            stb_phys(cs->as, dst, tmp);
 753            break;
 754        case 1:
 755            stw_phys(cs->as, dst, tmp);
 756            break;
 757        case 2:
 758            stl_phys(cs->as, dst, tmp);
 759            break;
 760        case 3:
 761            stq_phys(cs->as, dst, tmp);
 762            break;
 763        }
 764        dst = dst + step;
 765        src = src + step;
 766    }
 767
 768    return H_SUCCESS;
 769}
 770
 771static target_ulong h_logical_icbi(PowerPCCPU *cpu, SpaprMachineState *spapr,
 772                                   target_ulong opcode, target_ulong *args)
 773{
 774    /* Nothing to do on emulation, KVM will trap this in the kernel */
 775    return H_SUCCESS;
 776}
 777
 778static target_ulong h_logical_dcbf(PowerPCCPU *cpu, SpaprMachineState *spapr,
 779                                   target_ulong opcode, target_ulong *args)
 780{
 781    /* Nothing to do on emulation, KVM will trap this in the kernel */
 782    return H_SUCCESS;
 783}
 784
 785static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
 786                                           SpaprMachineState *spapr,
 787                                           target_ulong mflags,
 788                                           target_ulong value1,
 789                                           target_ulong value2)
 790{
 791    if (value1) {
 792        return H_P3;
 793    }
 794    if (value2) {
 795        return H_P4;
 796    }
 797
 798    switch (mflags) {
 799    case H_SET_MODE_ENDIAN_BIG:
 800        spapr_set_all_lpcrs(0, LPCR_ILE);
 801        spapr_pci_switch_vga(spapr, true);
 802        return H_SUCCESS;
 803
 804    case H_SET_MODE_ENDIAN_LITTLE:
 805        spapr_set_all_lpcrs(LPCR_ILE, LPCR_ILE);
 806        spapr_pci_switch_vga(spapr, false);
 807        return H_SUCCESS;
 808    }
 809
 810    return H_UNSUPPORTED_FLAG;
 811}
 812
 813static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
 814                                                        target_ulong mflags,
 815                                                        target_ulong value1,
 816                                                        target_ulong value2)
 817{
 818    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 819
 820    if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
 821        return H_P2;
 822    }
 823    if (value1) {
 824        return H_P3;
 825    }
 826    if (value2) {
 827        return H_P4;
 828    }
 829
 830    if (mflags == 1) {
 831        /* AIL=1 is reserved in POWER8/POWER9/POWER10 */
 832        return H_UNSUPPORTED_FLAG;
 833    }
 834
 835    if (mflags == 2 && (pcc->insns_flags2 & PPC2_ISA310)) {
 836        /* AIL=2 is reserved in POWER10 (ISA v3.1) */
 837        return H_UNSUPPORTED_FLAG;
 838    }
 839
 840    spapr_set_all_lpcrs(mflags << LPCR_AIL_SHIFT, LPCR_AIL);
 841
 842    return H_SUCCESS;
 843}
 844
 845static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr,
 846                               target_ulong opcode, target_ulong *args)
 847{
 848    target_ulong resource = args[1];
 849    target_ulong ret = H_P2;
 850
 851    switch (resource) {
 852    case H_SET_MODE_RESOURCE_LE:
 853        ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]);
 854        break;
 855    case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
 856        ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
 857                                                  args[2], args[3]);
 858        break;
 859    }
 860
 861    return ret;
 862}
 863
 864static target_ulong h_clean_slb(PowerPCCPU *cpu, SpaprMachineState *spapr,
 865                                target_ulong opcode, target_ulong *args)
 866{
 867    qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
 868                  opcode, " (H_CLEAN_SLB)");
 869    return H_FUNCTION;
 870}
 871
 872static target_ulong h_invalidate_pid(PowerPCCPU *cpu, SpaprMachineState *spapr,
 873                                     target_ulong opcode, target_ulong *args)
 874{
 875    qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
 876                  opcode, " (H_INVALIDATE_PID)");
 877    return H_FUNCTION;
 878}
 879
 880static void spapr_check_setup_free_hpt(SpaprMachineState *spapr,
 881                                       uint64_t patbe_old, uint64_t patbe_new)
 882{
 883    /*
 884     * We have 4 Options:
 885     * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing
 886     * HASH->RADIX                                  : Free HPT
 887     * RADIX->HASH                                  : Allocate HPT
 888     * NOTHING->HASH                                : Allocate HPT
 889     * Note: NOTHING implies the case where we said the guest could choose
 890     *       later and so assumed radix and now it's called H_REG_PROC_TBL
 891     */
 892
 893    if ((patbe_old & PATE1_GR) == (patbe_new & PATE1_GR)) {
 894        /* We assume RADIX, so this catches all the "Do Nothing" cases */
 895    } else if (!(patbe_old & PATE1_GR)) {
 896        /* HASH->RADIX : Free HPT */
 897        spapr_free_hpt(spapr);
 898    } else if (!(patbe_new & PATE1_GR)) {
 899        /* RADIX->HASH || NOTHING->HASH : Allocate HPT */
 900        spapr_setup_hpt(spapr);
 901    }
 902    return;
 903}
 904
 905#define FLAGS_MASK              0x01FULL
 906#define FLAG_MODIFY             0x10
 907#define FLAG_REGISTER           0x08
 908#define FLAG_RADIX              0x04
 909#define FLAG_HASH_PROC_TBL      0x02
 910#define FLAG_GTSE               0x01
 911
 912static target_ulong h_register_process_table(PowerPCCPU *cpu,
 913                                             SpaprMachineState *spapr,
 914                                             target_ulong opcode,
 915                                             target_ulong *args)
 916{
 917    target_ulong flags = args[0];
 918    target_ulong proc_tbl = args[1];
 919    target_ulong page_size = args[2];
 920    target_ulong table_size = args[3];
 921    target_ulong update_lpcr = 0;
 922    uint64_t cproc;
 923
 924    if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */
 925        return H_PARAMETER;
 926    }
 927    if (flags & FLAG_MODIFY) {
 928        if (flags & FLAG_REGISTER) {
 929            if (flags & FLAG_RADIX) { /* Register new RADIX process table */
 930                if (proc_tbl & 0xfff || proc_tbl >> 60) {
 931                    return H_P2;
 932                } else if (page_size) {
 933                    return H_P3;
 934                } else if (table_size > 24) {
 935                    return H_P4;
 936                }
 937                cproc = PATE1_GR | proc_tbl | table_size;
 938            } else { /* Register new HPT process table */
 939                if (flags & FLAG_HASH_PROC_TBL) { /* Hash with Segment Tables */
 940                    /* TODO - Not Supported */
 941                    /* Technically caused by flag bits => H_PARAMETER */
 942                    return H_PARAMETER;
 943                } else { /* Hash with SLB */
 944                    if (proc_tbl >> 38) {
 945                        return H_P2;
 946                    } else if (page_size & ~0x7) {
 947                        return H_P3;
 948                    } else if (table_size > 24) {
 949                        return H_P4;
 950                    }
 951                }
 952                cproc = (proc_tbl << 25) | page_size << 5 | table_size;
 953            }
 954
 955        } else { /* Deregister current process table */
 956            /*
 957             * Set to benign value: (current GR) | 0. This allows
 958             * deregistration in KVM to succeed even if the radix bit
 959             * in flags doesn't match the radix bit in the old PATE.
 960             */
 961            cproc = spapr->patb_entry & PATE1_GR;
 962        }
 963    } else { /* Maintain current registration */
 964        if (!(flags & FLAG_RADIX) != !(spapr->patb_entry & PATE1_GR)) {
 965            /* Technically caused by flag bits => H_PARAMETER */
 966            return H_PARAMETER; /* Existing Process Table Mismatch */
 967        }
 968        cproc = spapr->patb_entry;
 969    }
 970
 971    /* Check if we need to setup OR free the hpt */
 972    spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc);
 973
 974    spapr->patb_entry = cproc; /* Save new process table */
 975
 976    /* Update the UPRT, HR and GTSE bits in the LPCR for all cpus */
 977    if (flags & FLAG_RADIX)     /* Radix must use process tables, also set HR */
 978        update_lpcr |= (LPCR_UPRT | LPCR_HR);
 979    else if (flags & FLAG_HASH_PROC_TBL) /* Hash with process tables */
 980        update_lpcr |= LPCR_UPRT;
 981    if (flags & FLAG_GTSE)      /* Guest translation shootdown enable */
 982        update_lpcr |= LPCR_GTSE;
 983
 984    spapr_set_all_lpcrs(update_lpcr, LPCR_UPRT | LPCR_HR | LPCR_GTSE);
 985
 986    if (kvm_enabled()) {
 987        return kvmppc_configure_v3_mmu(cpu, flags & FLAG_RADIX,
 988                                       flags & FLAG_GTSE, cproc);
 989    }
 990    return H_SUCCESS;
 991}
 992
 993#define H_SIGNAL_SYS_RESET_ALL         -1
 994#define H_SIGNAL_SYS_RESET_ALLBUTSELF  -2
 995
 996static target_ulong h_signal_sys_reset(PowerPCCPU *cpu,
 997                                       SpaprMachineState *spapr,
 998                                       target_ulong opcode, target_ulong *args)
 999{
1000    target_long target = args[0];
1001    CPUState *cs;
1002
1003    if (target < 0) {
1004        /* Broadcast */
1005        if (target < H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1006            return H_PARAMETER;
1007        }
1008
1009        CPU_FOREACH(cs) {
1010            PowerPCCPU *c = POWERPC_CPU(cs);
1011
1012            if (target == H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1013                if (c == cpu) {
1014                    continue;
1015                }
1016            }
1017            run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1018        }
1019        return H_SUCCESS;
1020
1021    } else {
1022        /* Unicast */
1023        cs = CPU(spapr_find_cpu(target));
1024        if (cs) {
1025            run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1026            return H_SUCCESS;
1027        }
1028        return H_PARAMETER;
1029    }
1030}
1031
1032/* Returns either a logical PVR or zero if none was found */
1033static uint32_t cas_check_pvr(PowerPCCPU *cpu, uint32_t max_compat,
1034                              target_ulong *addr, bool *raw_mode_supported)
1035{
1036    bool explicit_match = false; /* Matched the CPU's real PVR */
1037    uint32_t best_compat = 0;
1038    int i;
1039
1040    /*
1041     * We scan the supplied table of PVRs looking for two things
1042     *   1. Is our real CPU PVR in the list?
1043     *   2. What's the "best" listed logical PVR
1044     */
1045    for (i = 0; i < 512; ++i) {
1046        uint32_t pvr, pvr_mask;
1047
1048        pvr_mask = ldl_be_phys(&address_space_memory, *addr);
1049        pvr = ldl_be_phys(&address_space_memory, *addr + 4);
1050        *addr += 8;
1051
1052        if (~pvr_mask & pvr) {
1053            break; /* Terminator record */
1054        }
1055
1056        if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) {
1057            explicit_match = true;
1058        } else {
1059            if (ppc_check_compat(cpu, pvr, best_compat, max_compat)) {
1060                best_compat = pvr;
1061            }
1062        }
1063    }
1064
1065    *raw_mode_supported = explicit_match;
1066
1067    /* Parsing finished */
1068    trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat);
1069
1070    return best_compat;
1071}
1072
1073static
1074target_ulong do_client_architecture_support(PowerPCCPU *cpu,
1075                                            SpaprMachineState *spapr,
1076                                            target_ulong vec,
1077                                            target_ulong fdt_bufsize)
1078{
1079    target_ulong ov_table; /* Working address in data buffer */
1080    uint32_t cas_pvr;
1081    SpaprOptionVector *ov1_guest, *ov5_guest;
1082    bool guest_radix;
1083    bool raw_mode_supported = false;
1084    bool guest_xive;
1085    CPUState *cs;
1086    void *fdt;
1087    uint32_t max_compat = spapr->max_compat_pvr;
1088
1089    /* CAS is supposed to be called early when only the boot vCPU is active. */
1090    CPU_FOREACH(cs) {
1091        if (cs == CPU(cpu)) {
1092            continue;
1093        }
1094        if (!cs->halted) {
1095            warn_report("guest has multiple active vCPUs at CAS, which is not allowed");
1096            return H_MULTI_THREADS_ACTIVE;
1097        }
1098    }
1099
1100    cas_pvr = cas_check_pvr(cpu, max_compat, &vec, &raw_mode_supported);
1101    if (!cas_pvr && (!raw_mode_supported || max_compat)) {
1102        /*
1103         * We couldn't find a suitable compatibility mode, and either
1104         * the guest doesn't support "raw" mode for this CPU, or "raw"
1105         * mode is disabled because a maximum compat mode is set.
1106         */
1107        error_report("Couldn't negotiate a suitable PVR during CAS");
1108        return H_HARDWARE;
1109    }
1110
1111    /* Update CPUs */
1112    if (cpu->compat_pvr != cas_pvr) {
1113        Error *local_err = NULL;
1114
1115        if (ppc_set_compat_all(cas_pvr, &local_err) < 0) {
1116            /* We fail to set compat mode (likely because running with KVM PR),
1117             * but maybe we can fallback to raw mode if the guest supports it.
1118             */
1119            if (!raw_mode_supported) {
1120                error_report_err(local_err);
1121                return H_HARDWARE;
1122            }
1123            error_free(local_err);
1124        }
1125    }
1126
1127    /* For the future use: here @ov_table points to the first option vector */
1128    ov_table = vec;
1129
1130    ov1_guest = spapr_ovec_parse_vector(ov_table, 1);
1131    if (!ov1_guest) {
1132        warn_report("guest didn't provide option vector 1");
1133        return H_PARAMETER;
1134    }
1135    ov5_guest = spapr_ovec_parse_vector(ov_table, 5);
1136    if (!ov5_guest) {
1137        spapr_ovec_cleanup(ov1_guest);
1138        warn_report("guest didn't provide option vector 5");
1139        return H_PARAMETER;
1140    }
1141    if (spapr_ovec_test(ov5_guest, OV5_MMU_BOTH)) {
1142        error_report("guest requested hash and radix MMU, which is invalid.");
1143        exit(EXIT_FAILURE);
1144    }
1145    if (spapr_ovec_test(ov5_guest, OV5_XIVE_BOTH)) {
1146        error_report("guest requested an invalid interrupt mode");
1147        exit(EXIT_FAILURE);
1148    }
1149
1150    guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300);
1151
1152    guest_xive = spapr_ovec_test(ov5_guest, OV5_XIVE_EXPLOIT);
1153
1154    /*
1155     * HPT resizing is a bit of a special case, because when enabled
1156     * we assume an HPT guest will support it until it says it
1157     * doesn't, instead of assuming it won't support it until it says
1158     * it does.  Strictly speaking that approach could break for
1159     * guests which don't make a CAS call, but those are so old we
1160     * don't care about them.  Without that assumption we'd have to
1161     * make at least a temporary allocation of an HPT sized for max
1162     * memory, which could be impossibly difficult under KVM HV if
1163     * maxram is large.
1164     */
1165    if (!guest_radix && !spapr_ovec_test(ov5_guest, OV5_HPT_RESIZE)) {
1166        int maxshift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1167
1168        if (spapr->resize_hpt == SPAPR_RESIZE_HPT_REQUIRED) {
1169            error_report(
1170                "h_client_architecture_support: Guest doesn't support HPT resizing, but resize-hpt=required");
1171            exit(1);
1172        }
1173
1174        if (spapr->htab_shift < maxshift) {
1175            /* Guest doesn't know about HPT resizing, so we
1176             * pre-emptively resize for the maximum permitted RAM.  At
1177             * the point this is called, nothing should have been
1178             * entered into the existing HPT */
1179            spapr_reallocate_hpt(spapr, maxshift, &error_fatal);
1180            push_sregs_to_kvm_pr(spapr);
1181        }
1182    }
1183
1184    /* NOTE: there are actually a number of ov5 bits where input from the
1185     * guest is always zero, and the platform/QEMU enables them independently
1186     * of guest input. To model these properly we'd want some sort of mask,
1187     * but since they only currently apply to memory migration as defined
1188     * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
1189     * to worry about this for now.
1190     */
1191
1192    /* full range of negotiated ov5 capabilities */
1193    spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest);
1194    spapr_ovec_cleanup(ov5_guest);
1195
1196    spapr_check_mmu_mode(guest_radix);
1197
1198    spapr->cas_pre_isa3_guest = !spapr_ovec_test(ov1_guest, OV1_PPC_3_00);
1199    spapr_ovec_cleanup(ov1_guest);
1200
1201    /*
1202     * Check for NUMA affinity conditions now that we know which NUMA
1203     * affinity the guest will use.
1204     */
1205    spapr_numa_associativity_check(spapr);
1206
1207    /*
1208     * Ensure the guest asks for an interrupt mode we support;
1209     * otherwise terminate the boot.
1210     */
1211    if (guest_xive) {
1212        if (!spapr->irq->xive) {
1213            error_report(
1214"Guest requested unavailable interrupt mode (XIVE), try the ic-mode=xive or ic-mode=dual machine property");
1215            exit(EXIT_FAILURE);
1216        }
1217    } else {
1218        if (!spapr->irq->xics) {
1219            error_report(
1220"Guest requested unavailable interrupt mode (XICS), either don't set the ic-mode machine property or try ic-mode=xics or ic-mode=dual");
1221            exit(EXIT_FAILURE);
1222        }
1223    }
1224
1225    spapr_irq_update_active_intc(spapr);
1226
1227    /*
1228     * Process all pending hot-plug/unplug requests now. An updated full
1229     * rendered FDT will be returned to the guest.
1230     */
1231    spapr_drc_reset_all(spapr);
1232    spapr_clear_pending_hotplug_events(spapr);
1233
1234    /*
1235     * If spapr_machine_reset() did not set up a HPT but one is necessary
1236     * (because the guest isn't going to use radix) then set it up here.
1237     */
1238    if ((spapr->patb_entry & PATE1_GR) && !guest_radix) {
1239        /* legacy hash or new hash: */
1240        spapr_setup_hpt(spapr);
1241    }
1242
1243    fdt = spapr_build_fdt(spapr, spapr->vof != NULL, fdt_bufsize);
1244    g_free(spapr->fdt_blob);
1245    spapr->fdt_size = fdt_totalsize(fdt);
1246    spapr->fdt_initial_size = spapr->fdt_size;
1247    spapr->fdt_blob = fdt;
1248
1249    return H_SUCCESS;
1250}
1251
1252static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
1253                                                  SpaprMachineState *spapr,
1254                                                  target_ulong opcode,
1255                                                  target_ulong *args)
1256{
1257    target_ulong vec = ppc64_phys_to_real(args[0]);
1258    target_ulong fdt_buf = args[1];
1259    target_ulong fdt_bufsize = args[2];
1260    target_ulong ret;
1261    SpaprDeviceTreeUpdateHeader hdr = { .version_id = 1 };
1262
1263    if (fdt_bufsize < sizeof(hdr)) {
1264        error_report("SLOF provided insufficient CAS buffer "
1265                     TARGET_FMT_lu " (min: %zu)", fdt_bufsize, sizeof(hdr));
1266        exit(EXIT_FAILURE);
1267    }
1268
1269    fdt_bufsize -= sizeof(hdr);
1270
1271    ret = do_client_architecture_support(cpu, spapr, vec, fdt_bufsize);
1272    if (ret == H_SUCCESS) {
1273        _FDT((fdt_pack(spapr->fdt_blob)));
1274        spapr->fdt_size = fdt_totalsize(spapr->fdt_blob);
1275        spapr->fdt_initial_size = spapr->fdt_size;
1276
1277        cpu_physical_memory_write(fdt_buf, &hdr, sizeof(hdr));
1278        cpu_physical_memory_write(fdt_buf + sizeof(hdr), spapr->fdt_blob,
1279                                  spapr->fdt_size);
1280        trace_spapr_cas_continue(spapr->fdt_size + sizeof(hdr));
1281    }
1282
1283    return ret;
1284}
1285
1286target_ulong spapr_vof_client_architecture_support(MachineState *ms,
1287                                                   CPUState *cs,
1288                                                   target_ulong ovec_addr)
1289{
1290    SpaprMachineState *spapr = SPAPR_MACHINE(ms);
1291
1292    target_ulong ret = do_client_architecture_support(POWERPC_CPU(cs), spapr,
1293                                                      ovec_addr, FDT_MAX_SIZE);
1294
1295    /*
1296     * This adds stdout and generates phandles for boottime and CAS FDTs.
1297     * It is alright to update the FDT here as do_client_architecture_support()
1298     * does not pack it.
1299     */
1300    spapr_vof_client_dt_finalize(spapr, spapr->fdt_blob);
1301
1302    return ret;
1303}
1304
1305static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu,
1306                                              SpaprMachineState *spapr,
1307                                              target_ulong opcode,
1308                                              target_ulong *args)
1309{
1310    uint64_t characteristics = H_CPU_CHAR_HON_BRANCH_HINTS &
1311                               ~H_CPU_CHAR_THR_RECONF_TRIG;
1312    uint64_t behaviour = H_CPU_BEHAV_FAVOUR_SECURITY;
1313    uint8_t safe_cache = spapr_get_cap(spapr, SPAPR_CAP_CFPC);
1314    uint8_t safe_bounds_check = spapr_get_cap(spapr, SPAPR_CAP_SBBC);
1315    uint8_t safe_indirect_branch = spapr_get_cap(spapr, SPAPR_CAP_IBS);
1316    uint8_t count_cache_flush_assist = spapr_get_cap(spapr,
1317                                                     SPAPR_CAP_CCF_ASSIST);
1318
1319    switch (safe_cache) {
1320    case SPAPR_CAP_WORKAROUND:
1321        characteristics |= H_CPU_CHAR_L1D_FLUSH_ORI30;
1322        characteristics |= H_CPU_CHAR_L1D_FLUSH_TRIG2;
1323        characteristics |= H_CPU_CHAR_L1D_THREAD_PRIV;
1324        behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR;
1325        break;
1326    case SPAPR_CAP_FIXED:
1327        behaviour |= H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY;
1328        behaviour |= H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS;
1329        break;
1330    default: /* broken */
1331        assert(safe_cache == SPAPR_CAP_BROKEN);
1332        behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR;
1333        break;
1334    }
1335
1336    switch (safe_bounds_check) {
1337    case SPAPR_CAP_WORKAROUND:
1338        characteristics |= H_CPU_CHAR_SPEC_BAR_ORI31;
1339        behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1340        break;
1341    case SPAPR_CAP_FIXED:
1342        break;
1343    default: /* broken */
1344        assert(safe_bounds_check == SPAPR_CAP_BROKEN);
1345        behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1346        break;
1347    }
1348
1349    switch (safe_indirect_branch) {
1350    case SPAPR_CAP_FIXED_NA:
1351        break;
1352    case SPAPR_CAP_FIXED_CCD:
1353        characteristics |= H_CPU_CHAR_CACHE_COUNT_DIS;
1354        break;
1355    case SPAPR_CAP_FIXED_IBS:
1356        characteristics |= H_CPU_CHAR_BCCTRL_SERIALISED;
1357        break;
1358    case SPAPR_CAP_WORKAROUND:
1359        behaviour |= H_CPU_BEHAV_FLUSH_COUNT_CACHE;
1360        if (count_cache_flush_assist) {
1361            characteristics |= H_CPU_CHAR_BCCTR_FLUSH_ASSIST;
1362        }
1363        break;
1364    default: /* broken */
1365        assert(safe_indirect_branch == SPAPR_CAP_BROKEN);
1366        break;
1367    }
1368
1369    args[0] = characteristics;
1370    args[1] = behaviour;
1371    return H_SUCCESS;
1372}
1373
1374static target_ulong h_update_dt(PowerPCCPU *cpu, SpaprMachineState *spapr,
1375                                target_ulong opcode, target_ulong *args)
1376{
1377    target_ulong dt = ppc64_phys_to_real(args[0]);
1378    struct fdt_header hdr = { 0 };
1379    unsigned cb;
1380    SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
1381    void *fdt;
1382
1383    cpu_physical_memory_read(dt, &hdr, sizeof(hdr));
1384    cb = fdt32_to_cpu(hdr.totalsize);
1385
1386    if (!smc->update_dt_enabled) {
1387        return H_SUCCESS;
1388    }
1389
1390    /* Check that the fdt did not grow out of proportion */
1391    if (cb > spapr->fdt_initial_size * 2) {
1392        trace_spapr_update_dt_failed_size(spapr->fdt_initial_size, cb,
1393                                          fdt32_to_cpu(hdr.magic));
1394        return H_PARAMETER;
1395    }
1396
1397    fdt = g_malloc0(cb);
1398    cpu_physical_memory_read(dt, fdt, cb);
1399
1400    /* Check the fdt consistency */
1401    if (fdt_check_full(fdt, cb)) {
1402        trace_spapr_update_dt_failed_check(spapr->fdt_initial_size, cb,
1403                                           fdt32_to_cpu(hdr.magic));
1404        return H_PARAMETER;
1405    }
1406
1407    g_free(spapr->fdt_blob);
1408    spapr->fdt_size = cb;
1409    spapr->fdt_blob = fdt;
1410    trace_spapr_update_dt(cb);
1411
1412    return H_SUCCESS;
1413}
1414
1415static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1416static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1417static spapr_hcall_fn svm_hypercall_table[(SVM_HCALL_MAX - SVM_HCALL_BASE) / 4 + 1];
1418
1419void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1420{
1421    spapr_hcall_fn *slot;
1422
1423    if (opcode <= MAX_HCALL_OPCODE) {
1424        assert((opcode & 0x3) == 0);
1425
1426        slot = &papr_hypercall_table[opcode / 4];
1427    } else if (opcode >= SVM_HCALL_BASE && opcode <= SVM_HCALL_MAX) {
1428        /* we only have SVM-related hcall numbers assigned in multiples of 4 */
1429        assert((opcode & 0x3) == 0);
1430
1431        slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4];
1432    } else {
1433        assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
1434
1435        slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1436    }
1437
1438    assert(!(*slot));
1439    *slot = fn;
1440}
1441
1442target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
1443                             target_ulong *args)
1444{
1445    SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1446
1447    if ((opcode <= MAX_HCALL_OPCODE)
1448        && ((opcode & 0x3) == 0)) {
1449        spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1450
1451        if (fn) {
1452            return fn(cpu, spapr, opcode, args);
1453        }
1454    } else if ((opcode >= SVM_HCALL_BASE) &&
1455               (opcode <= SVM_HCALL_MAX)) {
1456        spapr_hcall_fn fn = svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4];
1457
1458        if (fn) {
1459            return fn(cpu, spapr, opcode, args);
1460        }
1461    } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1462               (opcode <= KVMPPC_HCALL_MAX)) {
1463        spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1464
1465        if (fn) {
1466            return fn(cpu, spapr, opcode, args);
1467        }
1468    }
1469
1470    qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1471                  opcode);
1472    return H_FUNCTION;
1473}
1474
1475#ifndef CONFIG_TCG
1476static target_ulong h_softmmu(PowerPCCPU *cpu, SpaprMachineState *spapr,
1477                            target_ulong opcode, target_ulong *args)
1478{
1479    g_assert_not_reached();
1480}
1481
1482static void hypercall_register_softmmu(void)
1483{
1484    /* hcall-pft */
1485    spapr_register_hypercall(H_ENTER, h_softmmu);
1486    spapr_register_hypercall(H_REMOVE, h_softmmu);
1487    spapr_register_hypercall(H_PROTECT, h_softmmu);
1488    spapr_register_hypercall(H_READ, h_softmmu);
1489
1490    /* hcall-bulk */
1491    spapr_register_hypercall(H_BULK_REMOVE, h_softmmu);
1492}
1493#else
1494static void hypercall_register_softmmu(void)
1495{
1496    /* DO NOTHING */
1497}
1498#endif
1499
1500static void hypercall_register_types(void)
1501{
1502    hypercall_register_softmmu();
1503
1504    /* hcall-hpt-resize */
1505    spapr_register_hypercall(H_RESIZE_HPT_PREPARE, h_resize_hpt_prepare);
1506    spapr_register_hypercall(H_RESIZE_HPT_COMMIT, h_resize_hpt_commit);
1507
1508    /* hcall-splpar */
1509    spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1510    spapr_register_hypercall(H_CEDE, h_cede);
1511    spapr_register_hypercall(H_CONFER, h_confer);
1512    spapr_register_hypercall(H_PROD, h_prod);
1513
1514    /* hcall-join */
1515    spapr_register_hypercall(H_JOIN, h_join);
1516
1517    spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset);
1518
1519    /* processor register resource access h-calls */
1520    spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
1521    spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1522    spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
1523    spapr_register_hypercall(H_PAGE_INIT, h_page_init);
1524    spapr_register_hypercall(H_SET_MODE, h_set_mode);
1525
1526    /* In Memory Table MMU h-calls */
1527    spapr_register_hypercall(H_CLEAN_SLB, h_clean_slb);
1528    spapr_register_hypercall(H_INVALIDATE_PID, h_invalidate_pid);
1529    spapr_register_hypercall(H_REGISTER_PROC_TBL, h_register_process_table);
1530
1531    /* hcall-get-cpu-characteristics */
1532    spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS,
1533                             h_get_cpu_characteristics);
1534
1535    /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1536     * here between the "CI" and the "CACHE" variants, they will use whatever
1537     * mapping attributes qemu is using. When using KVM, the kernel will
1538     * enforce the attributes more strongly
1539     */
1540    spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1541    spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1542    spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1543    spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1544    spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1545    spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1546    spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1547
1548    /* qemu/KVM-PPC specific hcalls */
1549    spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1550
1551    /* ibm,client-architecture-support support */
1552    spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1553
1554    spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
1555}
1556
1557type_init(hypercall_register_types)
1558