qemu/target/s390x/misc_helper.c
<<
>>
Prefs
   1/*
   2 *  S/390 misc helper routines
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2009 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qemu/main-loop.h"
  23#include "cpu.h"
  24#include "internal.h"
  25#include "exec/memory.h"
  26#include "qemu/host-utils.h"
  27#include "exec/helper-proto.h"
  28#include "qemu/timer.h"
  29#include "exec/address-spaces.h"
  30#include "exec/exec-all.h"
  31#include "exec/cpu_ldst.h"
  32
  33#if !defined(CONFIG_USER_ONLY)
  34#include "sysemu/cpus.h"
  35#include "sysemu/sysemu.h"
  36#include "hw/s390x/ebcdic.h"
  37#include "hw/s390x/s390-virtio-hcall.h"
  38#include "hw/s390x/sclp.h"
  39#endif
  40
  41/* #define DEBUG_HELPER */
  42#ifdef DEBUG_HELPER
  43#define HELPER_LOG(x...) qemu_log(x)
  44#else
  45#define HELPER_LOG(x...)
  46#endif
  47
  48/* Raise an exception dynamically from a helper function.  */
  49void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
  50                                     uintptr_t retaddr)
  51{
  52    CPUState *cs = CPU(s390_env_get_cpu(env));
  53
  54    cs->exception_index = EXCP_PGM;
  55    env->int_pgm_code = excp;
  56    env->int_pgm_ilen = ILEN_AUTO;
  57
  58    /* Use the (ultimate) callers address to find the insn that trapped.  */
  59    cpu_restore_state(cs, retaddr);
  60
  61    cpu_loop_exit(cs);
  62}
  63
  64/* Raise an exception statically from a TB.  */
  65void HELPER(exception)(CPUS390XState *env, uint32_t excp)
  66{
  67    CPUState *cs = CPU(s390_env_get_cpu(env));
  68
  69    HELPER_LOG("%s: exception %d\n", __func__, excp);
  70    cs->exception_index = excp;
  71    cpu_loop_exit(cs);
  72}
  73
  74#ifndef CONFIG_USER_ONLY
  75
  76/* SCLP service call */
  77uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
  78{
  79    qemu_mutex_lock_iothread();
  80    int r = sclp_service_call(env, r1, r2);
  81    if (r < 0) {
  82        program_interrupt(env, -r, 4);
  83        r = 0;
  84    }
  85    qemu_mutex_unlock_iothread();
  86    return r;
  87}
  88
  89void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
  90{
  91    uint64_t r;
  92
  93    switch (num) {
  94    case 0x500:
  95        /* KVM hypercall */
  96        qemu_mutex_lock_iothread();
  97        r = s390_virtio_hypercall(env);
  98        qemu_mutex_unlock_iothread();
  99        break;
 100    case 0x44:
 101        /* yield */
 102        r = 0;
 103        break;
 104    case 0x308:
 105        /* ipl */
 106        qemu_mutex_lock_iothread();
 107        handle_diag_308(env, r1, r3);
 108        qemu_mutex_unlock_iothread();
 109        r = 0;
 110        break;
 111    case 0x288:
 112        /* time bomb (watchdog) */
 113        r = handle_diag_288(env, r1, r3);
 114        break;
 115    default:
 116        r = -1;
 117        break;
 118    }
 119
 120    if (r) {
 121        program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
 122    }
 123}
 124
 125/* Set Prefix */
 126void HELPER(spx)(CPUS390XState *env, uint64_t a1)
 127{
 128    CPUState *cs = CPU(s390_env_get_cpu(env));
 129    uint32_t prefix = a1 & 0x7fffe000;
 130
 131    env->psa = prefix;
 132    HELPER_LOG("prefix: %#x\n", prefix);
 133    tlb_flush_page(cs, 0);
 134    tlb_flush_page(cs, TARGET_PAGE_SIZE);
 135}
 136
 137/* Store Clock */
 138uint64_t HELPER(stck)(CPUS390XState *env)
 139{
 140    uint64_t time;
 141
 142    time = env->tod_offset +
 143        time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - env->tod_basetime);
 144
 145    return time;
 146}
 147
 148/* Set Clock Comparator */
 149void HELPER(sckc)(CPUS390XState *env, uint64_t time)
 150{
 151    if (time == -1ULL) {
 152        return;
 153    }
 154
 155    env->ckc = time;
 156
 157    /* difference between origins */
 158    time -= env->tod_offset;
 159
 160    /* nanoseconds */
 161    time = tod2time(time);
 162
 163    timer_mod(env->tod_timer, env->tod_basetime + time);
 164}
 165
 166/* Store Clock Comparator */
 167uint64_t HELPER(stckc)(CPUS390XState *env)
 168{
 169    return env->ckc;
 170}
 171
 172/* Set CPU Timer */
 173void HELPER(spt)(CPUS390XState *env, uint64_t time)
 174{
 175    if (time == -1ULL) {
 176        return;
 177    }
 178
 179    /* nanoseconds */
 180    time = tod2time(time);
 181
 182    env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
 183
 184    timer_mod(env->cpu_timer, env->cputm);
 185}
 186
 187/* Store CPU Timer */
 188uint64_t HELPER(stpt)(CPUS390XState *env)
 189{
 190    return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
 191}
 192
 193/* Store System Information */
 194uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
 195                      uint64_t r0, uint64_t r1)
 196{
 197    S390CPU *cpu = s390_env_get_cpu(env);
 198    int cc = 0;
 199    int sel1, sel2;
 200
 201    if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
 202        ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
 203        /* valid function code, invalid reserved bits */
 204        program_interrupt(env, PGM_SPECIFICATION, 4);
 205    }
 206
 207    sel1 = r0 & STSI_R0_SEL1_MASK;
 208    sel2 = r1 & STSI_R1_SEL2_MASK;
 209
 210    /* XXX: spec exception if sysib is not 4k-aligned */
 211
 212    switch (r0 & STSI_LEVEL_MASK) {
 213    case STSI_LEVEL_1:
 214        if ((sel1 == 1) && (sel2 == 1)) {
 215            /* Basic Machine Configuration */
 216            struct sysib_111 sysib;
 217            char type[5] = {};
 218
 219            memset(&sysib, 0, sizeof(sysib));
 220            ebcdic_put(sysib.manuf, "QEMU            ", 16);
 221            /* same as machine type number in STORE CPU ID, but in EBCDIC */
 222            snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
 223            ebcdic_put(sysib.type, type, 4);
 224            /* model number (not stored in STORE CPU ID for z/Architecure) */
 225            ebcdic_put(sysib.model, "QEMU            ", 16);
 226            ebcdic_put(sysib.sequence, "QEMU            ", 16);
 227            ebcdic_put(sysib.plant, "QEMU", 4);
 228            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
 229        } else if ((sel1 == 2) && (sel2 == 1)) {
 230            /* Basic Machine CPU */
 231            struct sysib_121 sysib;
 232
 233            memset(&sysib, 0, sizeof(sysib));
 234            /* XXX make different for different CPUs? */
 235            ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
 236            ebcdic_put(sysib.plant, "QEMU", 4);
 237            stw_p(&sysib.cpu_addr, env->core_id);
 238            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
 239        } else if ((sel1 == 2) && (sel2 == 2)) {
 240            /* Basic Machine CPUs */
 241            struct sysib_122 sysib;
 242
 243            memset(&sysib, 0, sizeof(sysib));
 244            stl_p(&sysib.capability, 0x443afc29);
 245            /* XXX change when SMP comes */
 246            stw_p(&sysib.total_cpus, 1);
 247            stw_p(&sysib.active_cpus, 1);
 248            stw_p(&sysib.standby_cpus, 0);
 249            stw_p(&sysib.reserved_cpus, 0);
 250            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
 251        } else {
 252            cc = 3;
 253        }
 254        break;
 255    case STSI_LEVEL_2:
 256        {
 257            if ((sel1 == 2) && (sel2 == 1)) {
 258                /* LPAR CPU */
 259                struct sysib_221 sysib;
 260
 261                memset(&sysib, 0, sizeof(sysib));
 262                /* XXX make different for different CPUs? */
 263                ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
 264                ebcdic_put(sysib.plant, "QEMU", 4);
 265                stw_p(&sysib.cpu_addr, env->core_id);
 266                stw_p(&sysib.cpu_id, 0);
 267                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
 268            } else if ((sel1 == 2) && (sel2 == 2)) {
 269                /* LPAR CPUs */
 270                struct sysib_222 sysib;
 271
 272                memset(&sysib, 0, sizeof(sysib));
 273                stw_p(&sysib.lpar_num, 0);
 274                sysib.lcpuc = 0;
 275                /* XXX change when SMP comes */
 276                stw_p(&sysib.total_cpus, 1);
 277                stw_p(&sysib.conf_cpus, 1);
 278                stw_p(&sysib.standby_cpus, 0);
 279                stw_p(&sysib.reserved_cpus, 0);
 280                ebcdic_put(sysib.name, "QEMU    ", 8);
 281                stl_p(&sysib.caf, 1000);
 282                stw_p(&sysib.dedicated_cpus, 0);
 283                stw_p(&sysib.shared_cpus, 0);
 284                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
 285            } else {
 286                cc = 3;
 287            }
 288            break;
 289        }
 290    case STSI_LEVEL_3:
 291        {
 292            if ((sel1 == 2) && (sel2 == 2)) {
 293                /* VM CPUs */
 294                struct sysib_322 sysib;
 295
 296                memset(&sysib, 0, sizeof(sysib));
 297                sysib.count = 1;
 298                /* XXX change when SMP comes */
 299                stw_p(&sysib.vm[0].total_cpus, 1);
 300                stw_p(&sysib.vm[0].conf_cpus, 1);
 301                stw_p(&sysib.vm[0].standby_cpus, 0);
 302                stw_p(&sysib.vm[0].reserved_cpus, 0);
 303                ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
 304                stl_p(&sysib.vm[0].caf, 1000);
 305                ebcdic_put(sysib.vm[0].cpi, "KVM/Linux       ", 16);
 306                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
 307            } else {
 308                cc = 3;
 309            }
 310            break;
 311        }
 312    case STSI_LEVEL_CURRENT:
 313        env->regs[0] = STSI_LEVEL_3;
 314        break;
 315    default:
 316        cc = 3;
 317        break;
 318    }
 319
 320    return cc;
 321}
 322
 323uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
 324                      uint32_t r3)
 325{
 326    int cc;
 327
 328    /* TODO: needed to inject interrupts  - push further down */
 329    qemu_mutex_lock_iothread();
 330    cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
 331    qemu_mutex_unlock_iothread();
 332
 333    return cc;
 334}
 335#endif
 336
 337#ifndef CONFIG_USER_ONLY
 338void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
 339{
 340    S390CPU *cpu = s390_env_get_cpu(env);
 341    qemu_mutex_lock_iothread();
 342    ioinst_handle_xsch(cpu, r1);
 343    qemu_mutex_unlock_iothread();
 344}
 345
 346void HELPER(csch)(CPUS390XState *env, uint64_t r1)
 347{
 348    S390CPU *cpu = s390_env_get_cpu(env);
 349    qemu_mutex_lock_iothread();
 350    ioinst_handle_csch(cpu, r1);
 351    qemu_mutex_unlock_iothread();
 352}
 353
 354void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
 355{
 356    S390CPU *cpu = s390_env_get_cpu(env);
 357    qemu_mutex_lock_iothread();
 358    ioinst_handle_hsch(cpu, r1);
 359    qemu_mutex_unlock_iothread();
 360}
 361
 362void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
 363{
 364    S390CPU *cpu = s390_env_get_cpu(env);
 365    qemu_mutex_lock_iothread();
 366    ioinst_handle_msch(cpu, r1, inst >> 16);
 367    qemu_mutex_unlock_iothread();
 368}
 369
 370void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
 371{
 372    S390CPU *cpu = s390_env_get_cpu(env);
 373    qemu_mutex_lock_iothread();
 374    ioinst_handle_rchp(cpu, r1);
 375    qemu_mutex_unlock_iothread();
 376}
 377
 378void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
 379{
 380    S390CPU *cpu = s390_env_get_cpu(env);
 381    qemu_mutex_lock_iothread();
 382    ioinst_handle_rsch(cpu, r1);
 383    qemu_mutex_unlock_iothread();
 384}
 385
 386void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
 387{
 388    S390CPU *cpu = s390_env_get_cpu(env);
 389    qemu_mutex_lock_iothread();
 390    ioinst_handle_ssch(cpu, r1, inst >> 16);
 391    qemu_mutex_unlock_iothread();
 392}
 393
 394void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
 395{
 396    S390CPU *cpu = s390_env_get_cpu(env);
 397    qemu_mutex_lock_iothread();
 398    ioinst_handle_stsch(cpu, r1, inst >> 16);
 399    qemu_mutex_unlock_iothread();
 400}
 401
 402void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
 403{
 404    S390CPU *cpu = s390_env_get_cpu(env);
 405    qemu_mutex_lock_iothread();
 406    ioinst_handle_tsch(cpu, r1, inst >> 16);
 407    qemu_mutex_unlock_iothread();
 408}
 409
 410void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
 411{
 412    S390CPU *cpu = s390_env_get_cpu(env);
 413    qemu_mutex_lock_iothread();
 414    ioinst_handle_chsc(cpu, inst >> 16);
 415    qemu_mutex_unlock_iothread();
 416}
 417#endif
 418
 419#ifndef CONFIG_USER_ONLY
 420void HELPER(per_check_exception)(CPUS390XState *env)
 421{
 422    uint32_t ilen;
 423
 424    if (env->per_perc_atmid) {
 425        /*
 426         * FIXME: ILEN_AUTO is most probably the right thing to use. ilen
 427         * always has to match the instruction referenced in the PSW. E.g.
 428         * if a PER interrupt is triggered via EXECUTE, we have to use ilen
 429         * of EXECUTE, while per_address contains the target of EXECUTE.
 430         */
 431        ilen = get_ilen(cpu_ldub_code(env, env->per_address));
 432        program_interrupt(env, PGM_PER, ilen);
 433    }
 434}
 435
 436/* Check if an address is within the PER starting address and the PER
 437   ending address.  The address range might loop.  */
 438static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
 439{
 440    if (env->cregs[10] <= env->cregs[11]) {
 441        return env->cregs[10] <= addr && addr <= env->cregs[11];
 442    } else {
 443        return env->cregs[10] <= addr || addr <= env->cregs[11];
 444    }
 445}
 446
 447void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
 448{
 449    if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
 450        if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
 451            || get_per_in_range(env, to)) {
 452            env->per_address = from;
 453            env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
 454        }
 455    }
 456}
 457
 458void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
 459{
 460    if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
 461        env->per_address = addr;
 462        env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
 463
 464        /* If the instruction has to be nullified, trigger the
 465           exception immediately. */
 466        if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
 467            CPUState *cs = CPU(s390_env_get_cpu(env));
 468
 469            env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
 470            env->int_pgm_code = PGM_PER;
 471            env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
 472
 473            cs->exception_index = EXCP_PGM;
 474            cpu_loop_exit(cs);
 475        }
 476    }
 477}
 478#endif
 479
 480static uint8_t stfl_bytes[2048];
 481static unsigned int used_stfl_bytes;
 482
 483static void prepare_stfl(void)
 484{
 485    static bool initialized;
 486    int i;
 487
 488    /* racy, but we don't care, the same values are always written */
 489    if (initialized) {
 490        return;
 491    }
 492
 493    s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
 494    for (i = 0; i < sizeof(stfl_bytes); i++) {
 495        if (stfl_bytes[i]) {
 496            used_stfl_bytes = i + 1;
 497        }
 498    }
 499    initialized = true;
 500}
 501
 502#ifndef CONFIG_USER_ONLY
 503void HELPER(stfl)(CPUS390XState *env)
 504{
 505    LowCore *lowcore;
 506
 507    lowcore = cpu_map_lowcore(env);
 508    prepare_stfl();
 509    memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
 510    cpu_unmap_lowcore(lowcore);
 511}
 512#endif
 513
 514uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
 515{
 516    const uintptr_t ra = GETPC();
 517    const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
 518    const int max_bytes = ROUND_UP(used_stfl_bytes, 8);
 519    int i;
 520
 521    if (addr & 0x7) {
 522        cpu_restore_state(ENV_GET_CPU(env), ra);
 523        program_interrupt(env, PGM_SPECIFICATION, 4);
 524    }
 525
 526    prepare_stfl();
 527    for (i = 0; i < count_bytes; ++i) {
 528        cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
 529    }
 530
 531    env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
 532    return count_bytes >= max_bytes ? 0 : 3;
 533}
 534