qemu/hw/ppc/spapr_hcall.c
<<
>>
Prefs
   1#include "sysemu/sysemu.h"
   2#include "cpu.h"
   3#include "helper_regs.h"
   4#include "hw/ppc/spapr.h"
   5#include "mmu-hash64.h"
   6#include "cpu-models.h"
   7#include "trace.h"
   8#include "kvm_ppc.h"
   9
  10struct SPRSyncState {
  11    CPUState *cs;
  12    int spr;
  13    target_ulong value;
  14    target_ulong mask;
  15};
  16
  17static void do_spr_sync(void *arg)
  18{
  19    struct SPRSyncState *s = arg;
  20    PowerPCCPU *cpu = POWERPC_CPU(s->cs);
  21    CPUPPCState *env = &cpu->env;
  22
  23    cpu_synchronize_state(s->cs);
  24    env->spr[s->spr] &= ~s->mask;
  25    env->spr[s->spr] |= s->value;
  26}
  27
  28static void set_spr(CPUState *cs, int spr, target_ulong value,
  29                    target_ulong mask)
  30{
  31    struct SPRSyncState s = {
  32        .cs = cs,
  33        .spr = spr,
  34        .value = value,
  35        .mask = mask
  36    };
  37    run_on_cpu(cs, do_spr_sync, &s);
  38}
  39
  40static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
  41                                     target_ulong pte_index)
  42{
  43    target_ulong rb, va_low;
  44
  45    rb = (v & ~0x7fULL) << 16; /* AVA field */
  46    va_low = pte_index >> 3;
  47    if (v & HPTE64_V_SECONDARY) {
  48        va_low = ~va_low;
  49    }
  50    /* xor vsid from AVA */
  51    if (!(v & HPTE64_V_1TB_SEG)) {
  52        va_low ^= v >> 12;
  53    } else {
  54        va_low ^= v >> 24;
  55    }
  56    va_low &= 0x7ff;
  57    if (v & HPTE64_V_LARGE) {
  58        rb |= 1;                         /* L field */
  59#if 0 /* Disable that P7 specific bit for now */
  60        if (r & 0xff000) {
  61            /* non-16MB large page, must be 64k */
  62            /* (masks depend on page size) */
  63            rb |= 0x1000;                /* page encoding in LP field */
  64            rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
  65            rb |= (va_low & 0xfe);       /* AVAL field */
  66        }
  67#endif
  68    } else {
  69        /* 4kB page */
  70        rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of AVA */
  71    }
  72    rb |= (v >> 54) & 0x300;            /* B field */
  73    return rb;
  74}
  75
  76static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
  77{
  78    /*
  79     * hash value/pteg group index is normalized by htab_mask
  80     */
  81    if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
  82        return false;
  83    }
  84    return true;
  85}
  86
  87static target_ulong h_enter(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  88                            target_ulong opcode, target_ulong *args)
  89{
  90    CPUPPCState *env = &cpu->env;
  91    target_ulong flags = args[0];
  92    target_ulong pte_index = args[1];
  93    target_ulong pteh = args[2];
  94    target_ulong ptel = args[3];
  95    target_ulong page_shift = 12;
  96    target_ulong raddr;
  97    target_ulong index;
  98    uint64_t token;
  99
 100    /* only handle 4k and 16M pages for now */
 101    if (pteh & HPTE64_V_LARGE) {
 102#if 0 /* We don't support 64k pages yet */
 103        if ((ptel & 0xf000) == 0x1000) {
 104            /* 64k page */
 105        } else
 106#endif
 107        if ((ptel & 0xff000) == 0) {
 108            /* 16M page */
 109            page_shift = 24;
 110            /* lowest AVA bit must be 0 for 16M pages */
 111            if (pteh & 0x80) {
 112                return H_PARAMETER;
 113            }
 114        } else {
 115            return H_PARAMETER;
 116        }
 117    }
 118
 119    raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1);
 120
 121    if (raddr < spapr->ram_limit) {
 122        /* Regular RAM - should have WIMG=0010 */
 123        if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
 124            return H_PARAMETER;
 125        }
 126    } else {
 127        /* Looks like an IO address */
 128        /* FIXME: What WIMG combinations could be sensible for IO?
 129         * For now we allow WIMG=010x, but are there others? */
 130        /* FIXME: Should we check against registered IO addresses? */
 131        if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
 132            return H_PARAMETER;
 133        }
 134    }
 135
 136    pteh &= ~0x60ULL;
 137
 138    if (!valid_pte_index(env, pte_index)) {
 139        return H_PARAMETER;
 140    }
 141
 142    index = 0;
 143    if (likely((flags & H_EXACT) == 0)) {
 144        pte_index &= ~7ULL;
 145        token = ppc_hash64_start_access(cpu, pte_index);
 146        for (; index < 8; index++) {
 147            if ((ppc_hash64_load_hpte0(env, token, index) & HPTE64_V_VALID) == 0) {
 148                break;
 149            }
 150        }
 151        ppc_hash64_stop_access(token);
 152        if (index == 8) {
 153            return H_PTEG_FULL;
 154        }
 155    } else {
 156        token = ppc_hash64_start_access(cpu, pte_index);
 157        if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
 158            ppc_hash64_stop_access(token);
 159            return H_PTEG_FULL;
 160        }
 161        ppc_hash64_stop_access(token);
 162    }
 163
 164    ppc_hash64_store_hpte(env, pte_index + index,
 165                          pteh | HPTE64_V_HPTE_DIRTY, ptel);
 166
 167    args[0] = pte_index + index;
 168    return H_SUCCESS;
 169}
 170
 171typedef enum {
 172    REMOVE_SUCCESS = 0,
 173    REMOVE_NOT_FOUND = 1,
 174    REMOVE_PARM = 2,
 175    REMOVE_HW = 3,
 176} RemoveResult;
 177
 178static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex,
 179                                target_ulong avpn,
 180                                target_ulong flags,
 181                                target_ulong *vp, target_ulong *rp)
 182{
 183    uint64_t token;
 184    target_ulong v, r, rb;
 185
 186    if (!valid_pte_index(env, ptex)) {
 187        return REMOVE_PARM;
 188    }
 189
 190    token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex);
 191    v = ppc_hash64_load_hpte0(env, token, 0);
 192    r = ppc_hash64_load_hpte1(env, token, 0);
 193    ppc_hash64_stop_access(token);
 194
 195    if ((v & HPTE64_V_VALID) == 0 ||
 196        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
 197        ((flags & H_ANDCOND) && (v & avpn) != 0)) {
 198        return REMOVE_NOT_FOUND;
 199    }
 200    *vp = v;
 201    *rp = r;
 202    ppc_hash64_store_hpte(env, ptex, HPTE64_V_HPTE_DIRTY, 0);
 203    rb = compute_tlbie_rb(v, r, ptex);
 204    ppc_tlb_invalidate_one(env, rb);
 205    return REMOVE_SUCCESS;
 206}
 207
 208static target_ulong h_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 209                             target_ulong opcode, target_ulong *args)
 210{
 211    CPUPPCState *env = &cpu->env;
 212    target_ulong flags = args[0];
 213    target_ulong pte_index = args[1];
 214    target_ulong avpn = args[2];
 215    RemoveResult ret;
 216
 217    ret = remove_hpte(env, pte_index, avpn, flags,
 218                      &args[0], &args[1]);
 219
 220    switch (ret) {
 221    case REMOVE_SUCCESS:
 222        return H_SUCCESS;
 223
 224    case REMOVE_NOT_FOUND:
 225        return H_NOT_FOUND;
 226
 227    case REMOVE_PARM:
 228        return H_PARAMETER;
 229
 230    case REMOVE_HW:
 231        return H_HARDWARE;
 232    }
 233
 234    g_assert_not_reached();
 235}
 236
 237#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
 238#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
 239#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
 240#define   H_BULK_REMOVE_END            0xc000000000000000ULL
 241#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
 242#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
 243#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
 244#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
 245#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
 246#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
 247#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
 248#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
 249#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
 250#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
 251#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
 252
 253#define H_BULK_REMOVE_MAX_BATCH        4
 254
 255static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 256                                  target_ulong opcode, target_ulong *args)
 257{
 258    CPUPPCState *env = &cpu->env;
 259    int i;
 260
 261    for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 262        target_ulong *tsh = &args[i*2];
 263        target_ulong tsl = args[i*2 + 1];
 264        target_ulong v, r, ret;
 265
 266        if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
 267            break;
 268        } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
 269            return H_PARAMETER;
 270        }
 271
 272        *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
 273        *tsh |= H_BULK_REMOVE_RESPONSE;
 274
 275        if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
 276            *tsh |= H_BULK_REMOVE_PARM;
 277            return H_PARAMETER;
 278        }
 279
 280        ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl,
 281                          (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
 282                          &v, &r);
 283
 284        *tsh |= ret << 60;
 285
 286        switch (ret) {
 287        case REMOVE_SUCCESS:
 288            *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
 289            break;
 290
 291        case REMOVE_PARM:
 292            return H_PARAMETER;
 293
 294        case REMOVE_HW:
 295            return H_HARDWARE;
 296        }
 297    }
 298
 299    return H_SUCCESS;
 300}
 301
 302static target_ulong h_protect(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 303                              target_ulong opcode, target_ulong *args)
 304{
 305    CPUPPCState *env = &cpu->env;
 306    target_ulong flags = args[0];
 307    target_ulong pte_index = args[1];
 308    target_ulong avpn = args[2];
 309    uint64_t token;
 310    target_ulong v, r, rb;
 311
 312    if (!valid_pte_index(env, pte_index)) {
 313        return H_PARAMETER;
 314    }
 315
 316    token = ppc_hash64_start_access(cpu, pte_index);
 317    v = ppc_hash64_load_hpte0(env, token, 0);
 318    r = ppc_hash64_load_hpte1(env, token, 0);
 319    ppc_hash64_stop_access(token);
 320
 321    if ((v & HPTE64_V_VALID) == 0 ||
 322        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
 323        return H_NOT_FOUND;
 324    }
 325
 326    r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
 327           HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
 328    r |= (flags << 55) & HPTE64_R_PP0;
 329    r |= (flags << 48) & HPTE64_R_KEY_HI;
 330    r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
 331    rb = compute_tlbie_rb(v, r, pte_index);
 332    ppc_hash64_store_hpte(env, pte_index,
 333                          (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
 334    ppc_tlb_invalidate_one(env, rb);
 335    /* Don't need a memory barrier, due to qemu's global lock */
 336    ppc_hash64_store_hpte(env, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
 337    return H_SUCCESS;
 338}
 339
 340static target_ulong h_read(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 341                           target_ulong opcode, target_ulong *args)
 342{
 343    CPUPPCState *env = &cpu->env;
 344    target_ulong flags = args[0];
 345    target_ulong pte_index = args[1];
 346    uint8_t *hpte;
 347    int i, ridx, n_entries = 1;
 348
 349    if (!valid_pte_index(env, pte_index)) {
 350        return H_PARAMETER;
 351    }
 352
 353    if (flags & H_READ_4) {
 354        /* Clear the two low order bits */
 355        pte_index &= ~(3ULL);
 356        n_entries = 4;
 357    }
 358
 359    hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
 360
 361    for (i = 0, ridx = 0; i < n_entries; i++) {
 362        args[ridx++] = ldq_p(hpte);
 363        args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
 364        hpte += HASH_PTE_SIZE_64;
 365    }
 366
 367    return H_SUCCESS;
 368}
 369
 370static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 371                               target_ulong opcode, target_ulong *args)
 372{
 373    /* FIXME: actually implement this */
 374    return H_HARDWARE;
 375}
 376
 377#define FLAGS_REGISTER_VPA         0x0000200000000000ULL
 378#define FLAGS_REGISTER_DTL         0x0000400000000000ULL
 379#define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
 380#define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
 381#define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
 382#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
 383
 384#define VPA_MIN_SIZE           640
 385#define VPA_SIZE_OFFSET        0x4
 386#define VPA_SHARED_PROC_OFFSET 0x9
 387#define VPA_SHARED_PROC_VAL    0x2
 388
 389static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
 390{
 391    CPUState *cs = CPU(ppc_env_get_cpu(env));
 392    uint16_t size;
 393    uint8_t tmp;
 394
 395    if (vpa == 0) {
 396        hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
 397        return H_HARDWARE;
 398    }
 399
 400    if (vpa % env->dcache_line_size) {
 401        return H_PARAMETER;
 402    }
 403    /* FIXME: bounds check the address */
 404
 405    size = lduw_be_phys(cs->as, vpa + 0x4);
 406
 407    if (size < VPA_MIN_SIZE) {
 408        return H_PARAMETER;
 409    }
 410
 411    /* VPA is not allowed to cross a page boundary */
 412    if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
 413        return H_PARAMETER;
 414    }
 415
 416    env->vpa_addr = vpa;
 417
 418    tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
 419    tmp |= VPA_SHARED_PROC_VAL;
 420    stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
 421
 422    return H_SUCCESS;
 423}
 424
 425static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
 426{
 427    if (env->slb_shadow_addr) {
 428        return H_RESOURCE;
 429    }
 430
 431    if (env->dtl_addr) {
 432        return H_RESOURCE;
 433    }
 434
 435    env->vpa_addr = 0;
 436    return H_SUCCESS;
 437}
 438
 439static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
 440{
 441    CPUState *cs = CPU(ppc_env_get_cpu(env));
 442    uint32_t size;
 443
 444    if (addr == 0) {
 445        hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
 446        return H_HARDWARE;
 447    }
 448
 449    size = ldl_be_phys(cs->as, addr + 0x4);
 450    if (size < 0x8) {
 451        return H_PARAMETER;
 452    }
 453
 454    if ((addr / 4096) != ((addr + size - 1) / 4096)) {
 455        return H_PARAMETER;
 456    }
 457
 458    if (!env->vpa_addr) {
 459        return H_RESOURCE;
 460    }
 461
 462    env->slb_shadow_addr = addr;
 463    env->slb_shadow_size = size;
 464
 465    return H_SUCCESS;
 466}
 467
 468static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
 469{
 470    env->slb_shadow_addr = 0;
 471    env->slb_shadow_size = 0;
 472    return H_SUCCESS;
 473}
 474
 475static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
 476{
 477    CPUState *cs = CPU(ppc_env_get_cpu(env));
 478    uint32_t size;
 479
 480    if (addr == 0) {
 481        hcall_dprintf("Can't cope with DTL at logical 0\n");
 482        return H_HARDWARE;
 483    }
 484
 485    size = ldl_be_phys(cs->as, addr + 0x4);
 486
 487    if (size < 48) {
 488        return H_PARAMETER;
 489    }
 490
 491    if (!env->vpa_addr) {
 492        return H_RESOURCE;
 493    }
 494
 495    env->dtl_addr = addr;
 496    env->dtl_size = size;
 497
 498    return H_SUCCESS;
 499}
 500
 501static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
 502{
 503    env->dtl_addr = 0;
 504    env->dtl_size = 0;
 505
 506    return H_SUCCESS;
 507}
 508
 509static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 510                                   target_ulong opcode, target_ulong *args)
 511{
 512    target_ulong flags = args[0];
 513    target_ulong procno = args[1];
 514    target_ulong vpa = args[2];
 515    target_ulong ret = H_PARAMETER;
 516    CPUPPCState *tenv;
 517    PowerPCCPU *tcpu;
 518
 519    tcpu = ppc_get_vcpu_by_dt_id(procno);
 520    if (!tcpu) {
 521        return H_PARAMETER;
 522    }
 523    tenv = &tcpu->env;
 524
 525    switch (flags) {
 526    case FLAGS_REGISTER_VPA:
 527        ret = register_vpa(tenv, vpa);
 528        break;
 529
 530    case FLAGS_DEREGISTER_VPA:
 531        ret = deregister_vpa(tenv, vpa);
 532        break;
 533
 534    case FLAGS_REGISTER_SLBSHADOW:
 535        ret = register_slb_shadow(tenv, vpa);
 536        break;
 537
 538    case FLAGS_DEREGISTER_SLBSHADOW:
 539        ret = deregister_slb_shadow(tenv, vpa);
 540        break;
 541
 542    case FLAGS_REGISTER_DTL:
 543        ret = register_dtl(tenv, vpa);
 544        break;
 545
 546    case FLAGS_DEREGISTER_DTL:
 547        ret = deregister_dtl(tenv, vpa);
 548        break;
 549    }
 550
 551    return ret;
 552}
 553
 554static target_ulong h_cede(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 555                           target_ulong opcode, target_ulong *args)
 556{
 557    CPUPPCState *env = &cpu->env;
 558    CPUState *cs = CPU(cpu);
 559
 560    env->msr |= (1ULL << MSR_EE);
 561    hreg_compute_hflags(env);
 562    if (!cpu_has_work(cs)) {
 563        cs->halted = 1;
 564        cs->exception_index = EXCP_HLT;
 565        cs->exit_request = 1;
 566    }
 567    return H_SUCCESS;
 568}
 569
 570static target_ulong h_rtas(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 571                           target_ulong opcode, target_ulong *args)
 572{
 573    target_ulong rtas_r3 = args[0];
 574    uint32_t token = rtas_ld(rtas_r3, 0);
 575    uint32_t nargs = rtas_ld(rtas_r3, 1);
 576    uint32_t nret = rtas_ld(rtas_r3, 2);
 577
 578    return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
 579                           nret, rtas_r3 + 12 + 4*nargs);
 580}
 581
 582static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 583                                   target_ulong opcode, target_ulong *args)
 584{
 585    CPUState *cs = CPU(cpu);
 586    target_ulong size = args[0];
 587    target_ulong addr = args[1];
 588
 589    switch (size) {
 590    case 1:
 591        args[0] = ldub_phys(cs->as, addr);
 592        return H_SUCCESS;
 593    case 2:
 594        args[0] = lduw_phys(cs->as, addr);
 595        return H_SUCCESS;
 596    case 4:
 597        args[0] = ldl_phys(cs->as, addr);
 598        return H_SUCCESS;
 599    case 8:
 600        args[0] = ldq_phys(cs->as, addr);
 601        return H_SUCCESS;
 602    }
 603    return H_PARAMETER;
 604}
 605
 606static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 607                                    target_ulong opcode, target_ulong *args)
 608{
 609    CPUState *cs = CPU(cpu);
 610
 611    target_ulong size = args[0];
 612    target_ulong addr = args[1];
 613    target_ulong val  = args[2];
 614
 615    switch (size) {
 616    case 1:
 617        stb_phys(cs->as, addr, val);
 618        return H_SUCCESS;
 619    case 2:
 620        stw_phys(cs->as, addr, val);
 621        return H_SUCCESS;
 622    case 4:
 623        stl_phys(cs->as, addr, val);
 624        return H_SUCCESS;
 625    case 8:
 626        stq_phys(cs->as, addr, val);
 627        return H_SUCCESS;
 628    }
 629    return H_PARAMETER;
 630}
 631
 632static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 633                                    target_ulong opcode, target_ulong *args)
 634{
 635    CPUState *cs = CPU(cpu);
 636
 637    target_ulong dst   = args[0]; /* Destination address */
 638    target_ulong src   = args[1]; /* Source address */
 639    target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
 640    target_ulong count = args[3]; /* Element count */
 641    target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
 642    uint64_t tmp;
 643    unsigned int mask = (1 << esize) - 1;
 644    int step = 1 << esize;
 645
 646    if (count > 0x80000000) {
 647        return H_PARAMETER;
 648    }
 649
 650    if ((dst & mask) || (src & mask) || (op > 1)) {
 651        return H_PARAMETER;
 652    }
 653
 654    if (dst >= src && dst < (src + (count << esize))) {
 655            dst = dst + ((count - 1) << esize);
 656            src = src + ((count - 1) << esize);
 657            step = -step;
 658    }
 659
 660    while (count--) {
 661        switch (esize) {
 662        case 0:
 663            tmp = ldub_phys(cs->as, src);
 664            break;
 665        case 1:
 666            tmp = lduw_phys(cs->as, src);
 667            break;
 668        case 2:
 669            tmp = ldl_phys(cs->as, src);
 670            break;
 671        case 3:
 672            tmp = ldq_phys(cs->as, src);
 673            break;
 674        default:
 675            return H_PARAMETER;
 676        }
 677        if (op == 1) {
 678            tmp = ~tmp;
 679        }
 680        switch (esize) {
 681        case 0:
 682            stb_phys(cs->as, dst, tmp);
 683            break;
 684        case 1:
 685            stw_phys(cs->as, dst, tmp);
 686            break;
 687        case 2:
 688            stl_phys(cs->as, dst, tmp);
 689            break;
 690        case 3:
 691            stq_phys(cs->as, dst, tmp);
 692            break;
 693        }
 694        dst = dst + step;
 695        src = src + step;
 696    }
 697
 698    return H_SUCCESS;
 699}
 700
 701static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 702                                   target_ulong opcode, target_ulong *args)
 703{
 704    /* Nothing to do on emulation, KVM will trap this in the kernel */
 705    return H_SUCCESS;
 706}
 707
 708static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 709                                   target_ulong opcode, target_ulong *args)
 710{
 711    /* Nothing to do on emulation, KVM will trap this in the kernel */
 712    return H_SUCCESS;
 713}
 714
 715static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
 716                                           target_ulong mflags,
 717                                           target_ulong value1,
 718                                           target_ulong value2)
 719{
 720    CPUState *cs;
 721
 722    if (value1) {
 723        return H_P3;
 724    }
 725    if (value2) {
 726        return H_P4;
 727    }
 728
 729    switch (mflags) {
 730    case H_SET_MODE_ENDIAN_BIG:
 731        CPU_FOREACH(cs) {
 732            set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
 733        }
 734        return H_SUCCESS;
 735
 736    case H_SET_MODE_ENDIAN_LITTLE:
 737        CPU_FOREACH(cs) {
 738            set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
 739        }
 740        return H_SUCCESS;
 741    }
 742
 743    return H_UNSUPPORTED_FLAG;
 744}
 745
 746static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
 747                                                        target_ulong mflags,
 748                                                        target_ulong value1,
 749                                                        target_ulong value2)
 750{
 751    CPUState *cs;
 752    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 753    target_ulong prefix;
 754
 755    if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
 756        return H_P2;
 757    }
 758    if (value1) {
 759        return H_P3;
 760    }
 761    if (value2) {
 762        return H_P4;
 763    }
 764
 765    switch (mflags) {
 766    case H_SET_MODE_ADDR_TRANS_NONE:
 767        prefix = 0;
 768        break;
 769    case H_SET_MODE_ADDR_TRANS_0001_8000:
 770        prefix = 0x18000;
 771        break;
 772    case H_SET_MODE_ADDR_TRANS_C000_0000_0000_4000:
 773        prefix = 0xC000000000004000ULL;
 774        break;
 775    default:
 776        return H_UNSUPPORTED_FLAG;
 777    }
 778
 779    CPU_FOREACH(cs) {
 780        CPUPPCState *env = &POWERPC_CPU(cpu)->env;
 781
 782        set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
 783        env->excp_prefix = prefix;
 784    }
 785
 786    return H_SUCCESS;
 787}
 788
 789static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
 790                               target_ulong opcode, target_ulong *args)
 791{
 792    target_ulong resource = args[1];
 793    target_ulong ret = H_P2;
 794
 795    switch (resource) {
 796    case H_SET_MODE_RESOURCE_LE:
 797        ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
 798        break;
 799    case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
 800        ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
 801                                                  args[2], args[3]);
 802        break;
 803    }
 804
 805    return ret;
 806}
 807
 808typedef struct {
 809    PowerPCCPU *cpu;
 810    uint32_t cpu_version;
 811    int ret;
 812} SetCompatState;
 813
 814static void do_set_compat(void *arg)
 815{
 816    SetCompatState *s = arg;
 817
 818    cpu_synchronize_state(CPU(s->cpu));
 819    s->ret = ppc_set_compat(s->cpu, s->cpu_version);
 820}
 821
 822#define get_compat_level(cpuver) ( \
 823    ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
 824    ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
 825    ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
 826    ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
 827
 828static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
 829                                                  sPAPREnvironment *spapr,
 830                                                  target_ulong opcode,
 831                                                  target_ulong *args)
 832{
 833    target_ulong list = args[0];
 834    PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
 835    CPUState *cs;
 836    bool cpu_match = false;
 837    unsigned old_cpu_version = cpu_->cpu_version;
 838    unsigned compat_lvl = 0, cpu_version = 0;
 839    unsigned max_lvl = get_compat_level(cpu_->max_compat);
 840    int counter;
 841
 842    /* Parse PVR list */
 843    for (counter = 0; counter < 512; ++counter) {
 844        uint32_t pvr, pvr_mask;
 845
 846        pvr_mask = rtas_ld(list, 0);
 847        list += 4;
 848        pvr = rtas_ld(list, 0);
 849        list += 4;
 850
 851        trace_spapr_cas_pvr_try(pvr);
 852        if (!max_lvl &&
 853            ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) {
 854            cpu_match = true;
 855            cpu_version = 0;
 856        } else if (pvr == cpu_->cpu_version) {
 857            cpu_match = true;
 858            cpu_version = cpu_->cpu_version;
 859        } else if (!cpu_match) {
 860            /* If it is a logical PVR, try to determine the highest level */
 861            unsigned lvl = get_compat_level(pvr);
 862            if (lvl) {
 863                bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) &&
 864                     (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
 865                bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) &&
 866                    ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
 867                    (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
 868
 869                if (is205 || is206) {
 870                    if (!max_lvl) {
 871                        /* User did not set the level, choose the highest */
 872                        if (compat_lvl <= lvl) {
 873                            compat_lvl = lvl;
 874                            cpu_version = pvr;
 875                        }
 876                    } else if (max_lvl >= lvl) {
 877                        /* User chose the level, don't set higher than this */
 878                        compat_lvl = lvl;
 879                        cpu_version = pvr;
 880                    }
 881                }
 882            }
 883        }
 884        /* Terminator record */
 885        if (~pvr_mask & pvr) {
 886            break;
 887        }
 888    }
 889
 890    /* For the future use: here @list points to the first capability */
 891
 892    /* Parsing finished */
 893    trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
 894                        cpu_version, pcc_->pcr_mask);
 895
 896    /* Update CPUs */
 897    if (old_cpu_version != cpu_version) {
 898        CPU_FOREACH(cs) {
 899            SetCompatState s = {
 900                .cpu = POWERPC_CPU(cs),
 901                .cpu_version = cpu_version,
 902                .ret = 0
 903            };
 904
 905            run_on_cpu(cs, do_set_compat, &s);
 906
 907            if (s.ret < 0) {
 908                fprintf(stderr, "Unable to set compatibility mode\n");
 909                return H_HARDWARE;
 910            }
 911        }
 912    }
 913
 914    if (!cpu_version) {
 915        return H_SUCCESS;
 916    }
 917
 918    if (!list) {
 919        return H_SUCCESS;
 920    }
 921
 922    if (spapr_h_cas_compose_response(args[1], args[2])) {
 923        qemu_system_reset_request();
 924    }
 925
 926    return H_SUCCESS;
 927}
 928
 929static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
 930static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
 931
 932void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
 933{
 934    spapr_hcall_fn *slot;
 935
 936    if (opcode <= MAX_HCALL_OPCODE) {
 937        assert((opcode & 0x3) == 0);
 938
 939        slot = &papr_hypercall_table[opcode / 4];
 940    } else {
 941        assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
 942
 943        slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
 944    }
 945
 946    assert(!(*slot));
 947    *slot = fn;
 948}
 949
 950target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
 951                             target_ulong *args)
 952{
 953    if ((opcode <= MAX_HCALL_OPCODE)
 954        && ((opcode & 0x3) == 0)) {
 955        spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
 956
 957        if (fn) {
 958            return fn(cpu, spapr, opcode, args);
 959        }
 960    } else if ((opcode >= KVMPPC_HCALL_BASE) &&
 961               (opcode <= KVMPPC_HCALL_MAX)) {
 962        spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
 963
 964        if (fn) {
 965            return fn(cpu, spapr, opcode, args);
 966        }
 967    }
 968
 969    hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode);
 970    return H_FUNCTION;
 971}
 972
 973static void hypercall_register_types(void)
 974{
 975    /* hcall-pft */
 976    spapr_register_hypercall(H_ENTER, h_enter);
 977    spapr_register_hypercall(H_REMOVE, h_remove);
 978    spapr_register_hypercall(H_PROTECT, h_protect);
 979    spapr_register_hypercall(H_READ, h_read);
 980
 981    /* hcall-bulk */
 982    spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
 983
 984    /* hcall-dabr */
 985    spapr_register_hypercall(H_SET_DABR, h_set_dabr);
 986
 987    /* hcall-splpar */
 988    spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
 989    spapr_register_hypercall(H_CEDE, h_cede);
 990
 991    /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
 992     * here between the "CI" and the "CACHE" variants, they will use whatever
 993     * mapping attributes qemu is using. When using KVM, the kernel will
 994     * enforce the attributes more strongly
 995     */
 996    spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
 997    spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
 998    spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
 999    spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1000    spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1001    spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1002    spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1003
1004    /* qemu/KVM-PPC specific hcalls */
1005    spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1006
1007    spapr_register_hypercall(H_SET_MODE, h_set_mode);
1008
1009    /* ibm,client-architecture-support support */
1010    spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1011}
1012
1013type_init(hypercall_register_types)
1014