qemu/target/s390x/excp_helper.c
<<
>>
Prefs
   1/*
   2 * s390x exception / interrupt helpers
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2011 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "internal.h"
  24#include "exec/helper-proto.h"
  25#include "qemu/timer.h"
  26#include "exec/exec-all.h"
  27#include "exec/cpu_ldst.h"
  28#include "hw/s390x/ioinst.h"
  29#include "exec/address-spaces.h"
  30#include "tcg_s390x.h"
  31#ifndef CONFIG_USER_ONLY
  32#include "sysemu/sysemu.h"
  33#include "hw/s390x/s390_flic.h"
  34#include "hw/boards.h"
  35#endif
  36
  37void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
  38                                              uint32_t code, uintptr_t ra)
  39{
  40    CPUState *cs = env_cpu(env);
  41
  42    cpu_restore_state(cs, ra, true);
  43    qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
  44                  env->psw.addr);
  45    trigger_pgm_exception(env, code);
  46    cpu_loop_exit(cs);
  47}
  48
  49void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
  50                                           uintptr_t ra)
  51{
  52    g_assert(dxc <= 0xff);
  53#if !defined(CONFIG_USER_ONLY)
  54    /* Store the DXC into the lowcore */
  55    stl_phys(env_cpu(env)->as,
  56             env->psa + offsetof(LowCore, data_exc_code), dxc);
  57#endif
  58
  59    /* Store the DXC into the FPC if AFP is enabled */
  60    if (env->cregs[0] & CR0_AFP) {
  61        env->fpc = deposit32(env->fpc, 8, 8, dxc);
  62    }
  63    tcg_s390_program_interrupt(env, PGM_DATA, ra);
  64}
  65
  66void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
  67                                             uintptr_t ra)
  68{
  69    g_assert(vxc <= 0xff);
  70#if !defined(CONFIG_USER_ONLY)
  71    /* Always store the VXC into the lowcore, without AFP it is undefined */
  72    stl_phys(env_cpu(env)->as,
  73             env->psa + offsetof(LowCore, data_exc_code), vxc);
  74#endif
  75
  76    /* Always store the VXC into the FPC, without AFP it is undefined */
  77    env->fpc = deposit32(env->fpc, 8, 8, vxc);
  78    tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
  79}
  80
  81void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
  82{
  83    tcg_s390_data_exception(env, dxc, GETPC());
  84}
  85
  86#if defined(CONFIG_USER_ONLY)
  87
  88void s390_cpu_do_interrupt(CPUState *cs)
  89{
  90    cs->exception_index = -1;
  91}
  92
  93bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
  94                       MMUAccessType access_type, int mmu_idx,
  95                       bool probe, uintptr_t retaddr)
  96{
  97    S390CPU *cpu = S390_CPU(cs);
  98
  99    trigger_pgm_exception(&cpu->env, PGM_ADDRESSING);
 100    /* On real machines this value is dropped into LowMem.  Since this
 101       is userland, simply put this someplace that cpu_loop can find it.  */
 102    cpu->env.__excp_addr = address;
 103    cpu_loop_exit_restore(cs, retaddr);
 104}
 105
 106#else /* !CONFIG_USER_ONLY */
 107
 108static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
 109{
 110    switch (mmu_idx) {
 111    case MMU_PRIMARY_IDX:
 112        return PSW_ASC_PRIMARY;
 113    case MMU_SECONDARY_IDX:
 114        return PSW_ASC_SECONDARY;
 115    case MMU_HOME_IDX:
 116        return PSW_ASC_HOME;
 117    default:
 118        abort();
 119    }
 120}
 121
 122bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
 123                       MMUAccessType access_type, int mmu_idx,
 124                       bool probe, uintptr_t retaddr)
 125{
 126    S390CPU *cpu = S390_CPU(cs);
 127    CPUS390XState *env = &cpu->env;
 128    target_ulong vaddr, raddr;
 129    uint64_t asc, tec;
 130    int prot, excp;
 131
 132    qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
 133                  __func__, address, access_type, mmu_idx);
 134
 135    vaddr = address;
 136
 137    if (mmu_idx < MMU_REAL_IDX) {
 138        asc = cpu_mmu_idx_to_asc(mmu_idx);
 139        /* 31-Bit mode */
 140        if (!(env->psw.mask & PSW_MASK_64)) {
 141            vaddr &= 0x7fffffff;
 142        }
 143        excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
 144    } else if (mmu_idx == MMU_REAL_IDX) {
 145        /* 31-Bit mode */
 146        if (!(env->psw.mask & PSW_MASK_64)) {
 147            vaddr &= 0x7fffffff;
 148        }
 149        excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
 150    } else {
 151        g_assert_not_reached();
 152    }
 153
 154    /* check out of RAM access */
 155    if (!excp &&
 156        !address_space_access_valid(&address_space_memory, raddr,
 157                                    TARGET_PAGE_SIZE, access_type,
 158                                    MEMTXATTRS_UNSPECIFIED)) {
 159        MachineState *ms = MACHINE(qdev_get_machine());
 160        qemu_log_mask(CPU_LOG_MMU,
 161                      "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
 162                      __func__, (uint64_t)raddr, (uint64_t)ms->ram_size);
 163        excp = PGM_ADDRESSING;
 164        tec = 0; /* unused */
 165    }
 166
 167    env->tlb_fill_exc = excp;
 168    env->tlb_fill_tec = tec;
 169
 170    if (!excp) {
 171        qemu_log_mask(CPU_LOG_MMU,
 172                      "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
 173                      __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
 174        tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
 175                     mmu_idx, TARGET_PAGE_SIZE);
 176        return true;
 177    }
 178    if (probe) {
 179        return false;
 180    }
 181
 182    if (excp != PGM_ADDRESSING) {
 183        stq_phys(env_cpu(env)->as,
 184                 env->psa + offsetof(LowCore, trans_exc_code), tec);
 185    }
 186
 187    /*
 188     * For data accesses, ILEN will be filled in from the unwind info,
 189     * within cpu_loop_exit_restore.  For code accesses, retaddr == 0,
 190     * and so unwinding will not occur.  However, ILEN is also undefined
 191     * for that case -- we choose to set ILEN = 2.
 192     */
 193    env->int_pgm_ilen = 2;
 194    trigger_pgm_exception(env, excp);
 195    cpu_loop_exit_restore(cs, retaddr);
 196}
 197
 198static void do_program_interrupt(CPUS390XState *env)
 199{
 200    uint64_t mask, addr;
 201    LowCore *lowcore;
 202    int ilen = env->int_pgm_ilen;
 203
 204    assert(ilen == 2 || ilen == 4 || ilen == 6);
 205
 206    switch (env->int_pgm_code) {
 207    case PGM_PER:
 208        if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
 209            break;
 210        }
 211        /* FALL THROUGH */
 212    case PGM_OPERATION:
 213    case PGM_PRIVILEGED:
 214    case PGM_EXECUTE:
 215    case PGM_PROTECTION:
 216    case PGM_ADDRESSING:
 217    case PGM_SPECIFICATION:
 218    case PGM_DATA:
 219    case PGM_FIXPT_OVERFLOW:
 220    case PGM_FIXPT_DIVIDE:
 221    case PGM_DEC_OVERFLOW:
 222    case PGM_DEC_DIVIDE:
 223    case PGM_HFP_EXP_OVERFLOW:
 224    case PGM_HFP_EXP_UNDERFLOW:
 225    case PGM_HFP_SIGNIFICANCE:
 226    case PGM_HFP_DIVIDE:
 227    case PGM_TRANS_SPEC:
 228    case PGM_SPECIAL_OP:
 229    case PGM_OPERAND:
 230    case PGM_HFP_SQRT:
 231    case PGM_PC_TRANS_SPEC:
 232    case PGM_ALET_SPEC:
 233    case PGM_MONITOR:
 234        /* advance the PSW if our exception is not nullifying */
 235        env->psw.addr += ilen;
 236        break;
 237    }
 238
 239    qemu_log_mask(CPU_LOG_INT,
 240                  "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
 241                  __func__, env->int_pgm_code, ilen, env->psw.mask,
 242                  env->psw.addr);
 243
 244    lowcore = cpu_map_lowcore(env);
 245
 246    /* Signal PER events with the exception.  */
 247    if (env->per_perc_atmid) {
 248        env->int_pgm_code |= PGM_PER;
 249        lowcore->per_address = cpu_to_be64(env->per_address);
 250        lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
 251        env->per_perc_atmid = 0;
 252    }
 253
 254    lowcore->pgm_ilen = cpu_to_be16(ilen);
 255    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
 256    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 257    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
 258    mask = be64_to_cpu(lowcore->program_new_psw.mask);
 259    addr = be64_to_cpu(lowcore->program_new_psw.addr);
 260    lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
 261
 262    cpu_unmap_lowcore(lowcore);
 263
 264    load_psw(env, mask, addr);
 265}
 266
 267static void do_svc_interrupt(CPUS390XState *env)
 268{
 269    uint64_t mask, addr;
 270    LowCore *lowcore;
 271
 272    lowcore = cpu_map_lowcore(env);
 273
 274    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
 275    lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
 276    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 277    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
 278    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
 279    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
 280
 281    cpu_unmap_lowcore(lowcore);
 282
 283    load_psw(env, mask, addr);
 284
 285    /* When a PER event is pending, the PER exception has to happen
 286       immediately after the SERVICE CALL one.  */
 287    if (env->per_perc_atmid) {
 288        env->int_pgm_code = PGM_PER;
 289        env->int_pgm_ilen = env->int_svc_ilen;
 290        do_program_interrupt(env);
 291    }
 292}
 293
 294#define VIRTIO_SUBCODE_64 0x0D00
 295
 296static void do_ext_interrupt(CPUS390XState *env)
 297{
 298    QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
 299    S390CPU *cpu = env_archcpu(env);
 300    uint64_t mask, addr;
 301    uint16_t cpu_addr;
 302    LowCore *lowcore;
 303
 304    if (!(env->psw.mask & PSW_MASK_EXT)) {
 305        cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
 306    }
 307
 308    lowcore = cpu_map_lowcore(env);
 309
 310    if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
 311        (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
 312        MachineState *ms = MACHINE(qdev_get_machine());
 313        unsigned int max_cpus = ms->smp.max_cpus;
 314
 315        lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
 316        cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
 317        g_assert(cpu_addr < S390_MAX_CPUS);
 318        lowcore->cpu_addr = cpu_to_be16(cpu_addr);
 319        clear_bit(cpu_addr, env->emergency_signals);
 320        if (bitmap_empty(env->emergency_signals, max_cpus)) {
 321            env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
 322        }
 323    } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
 324               (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
 325        lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
 326        lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
 327        env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
 328    } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
 329               (env->cregs[0] & CR0_CKC_SC)) {
 330        lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
 331        lowcore->cpu_addr = 0;
 332        env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
 333    } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
 334               (env->cregs[0] & CR0_CPU_TIMER_SC)) {
 335        lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
 336        lowcore->cpu_addr = 0;
 337        env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
 338    } else if (qemu_s390_flic_has_service(flic) &&
 339               (env->cregs[0] & CR0_SERVICE_SC)) {
 340        uint32_t param;
 341
 342        param = qemu_s390_flic_dequeue_service(flic);
 343        lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
 344        lowcore->ext_params = cpu_to_be32(param);
 345        lowcore->cpu_addr = 0;
 346    } else {
 347        g_assert_not_reached();
 348    }
 349
 350    mask = be64_to_cpu(lowcore->external_new_psw.mask);
 351    addr = be64_to_cpu(lowcore->external_new_psw.addr);
 352    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 353    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
 354
 355    cpu_unmap_lowcore(lowcore);
 356
 357    load_psw(env, mask, addr);
 358}
 359
 360static void do_io_interrupt(CPUS390XState *env)
 361{
 362    QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
 363    uint64_t mask, addr;
 364    QEMUS390FlicIO *io;
 365    LowCore *lowcore;
 366
 367    g_assert(env->psw.mask & PSW_MASK_IO);
 368    io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
 369    g_assert(io);
 370
 371    lowcore = cpu_map_lowcore(env);
 372
 373    lowcore->subchannel_id = cpu_to_be16(io->id);
 374    lowcore->subchannel_nr = cpu_to_be16(io->nr);
 375    lowcore->io_int_parm = cpu_to_be32(io->parm);
 376    lowcore->io_int_word = cpu_to_be32(io->word);
 377    lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 378    lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
 379    mask = be64_to_cpu(lowcore->io_new_psw.mask);
 380    addr = be64_to_cpu(lowcore->io_new_psw.addr);
 381
 382    cpu_unmap_lowcore(lowcore);
 383    g_free(io);
 384
 385    load_psw(env, mask, addr);
 386}
 387
 388typedef struct MchkExtSaveArea {
 389    uint64_t    vregs[32][2];                     /* 0x0000 */
 390    uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
 391} MchkExtSaveArea;
 392QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
 393
 394static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
 395{
 396    hwaddr len = sizeof(MchkExtSaveArea);
 397    MchkExtSaveArea *sa;
 398    int i;
 399
 400    sa = cpu_physical_memory_map(mcesao, &len, true);
 401    if (!sa) {
 402        return -EFAULT;
 403    }
 404    if (len != sizeof(MchkExtSaveArea)) {
 405        cpu_physical_memory_unmap(sa, len, 1, 0);
 406        return -EFAULT;
 407    }
 408
 409    for (i = 0; i < 32; i++) {
 410        sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
 411        sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
 412    }
 413
 414    cpu_physical_memory_unmap(sa, len, 1, len);
 415    return 0;
 416}
 417
 418static void do_mchk_interrupt(CPUS390XState *env)
 419{
 420    QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
 421    uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
 422    uint64_t mask, addr, mcesao = 0;
 423    LowCore *lowcore;
 424    int i;
 425
 426    /* for now we only support channel report machine checks (floating) */
 427    g_assert(env->psw.mask & PSW_MASK_MCHECK);
 428    g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
 429
 430    qemu_s390_flic_dequeue_crw_mchk(flic);
 431
 432    lowcore = cpu_map_lowcore(env);
 433
 434    /* extended save area */
 435    if (mcic & MCIC_VB_VR) {
 436        /* length and alignment is 1024 bytes */
 437        mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
 438    }
 439
 440    /* try to store vector registers */
 441    if (!mcesao || mchk_store_vregs(env, mcesao)) {
 442        mcic &= ~MCIC_VB_VR;
 443    }
 444
 445    /* we are always in z/Architecture mode */
 446    lowcore->ar_access_id = 1;
 447
 448    for (i = 0; i < 16; i++) {
 449        lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
 450        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
 451        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
 452        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
 453    }
 454    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
 455    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
 456    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
 457    lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
 458    lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
 459
 460    lowcore->mcic = cpu_to_be64(mcic);
 461    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 462    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
 463    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
 464    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
 465
 466    cpu_unmap_lowcore(lowcore);
 467
 468    load_psw(env, mask, addr);
 469}
 470
 471void s390_cpu_do_interrupt(CPUState *cs)
 472{
 473    QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
 474    S390CPU *cpu = S390_CPU(cs);
 475    CPUS390XState *env = &cpu->env;
 476    bool stopped = false;
 477
 478    qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
 479                  __func__, cs->exception_index, env->psw.mask, env->psw.addr);
 480
 481try_deliver:
 482    /* handle machine checks */
 483    if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
 484        cs->exception_index = EXCP_MCHK;
 485    }
 486    /* handle external interrupts */
 487    if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
 488        cs->exception_index = EXCP_EXT;
 489    }
 490    /* handle I/O interrupts */
 491    if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
 492        cs->exception_index = EXCP_IO;
 493    }
 494    /* RESTART interrupt */
 495    if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
 496        cs->exception_index = EXCP_RESTART;
 497    }
 498    /* STOP interrupt has least priority */
 499    if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
 500        cs->exception_index = EXCP_STOP;
 501    }
 502
 503    switch (cs->exception_index) {
 504    case EXCP_PGM:
 505        do_program_interrupt(env);
 506        break;
 507    case EXCP_SVC:
 508        do_svc_interrupt(env);
 509        break;
 510    case EXCP_EXT:
 511        do_ext_interrupt(env);
 512        break;
 513    case EXCP_IO:
 514        do_io_interrupt(env);
 515        break;
 516    case EXCP_MCHK:
 517        do_mchk_interrupt(env);
 518        break;
 519    case EXCP_RESTART:
 520        do_restart_interrupt(env);
 521        break;
 522    case EXCP_STOP:
 523        do_stop_interrupt(env);
 524        stopped = true;
 525        break;
 526    }
 527
 528    if (cs->exception_index != -1 && !stopped) {
 529        /* check if there are more pending interrupts to deliver */
 530        cs->exception_index = -1;
 531        goto try_deliver;
 532    }
 533    cs->exception_index = -1;
 534
 535    /* we might still have pending interrupts, but not deliverable */
 536    if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
 537        cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
 538    }
 539
 540    /* WAIT PSW during interrupt injection or STOP interrupt */
 541    if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
 542        /* don't trigger a cpu_loop_exit(), use an interrupt instead */
 543        cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
 544    } else if (cs->halted) {
 545        /* unhalt if we had a WAIT PSW somehwere in our injection chain */
 546        s390_cpu_unhalt(cpu);
 547    }
 548}
 549
 550bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 551{
 552    if (interrupt_request & CPU_INTERRUPT_HARD) {
 553        S390CPU *cpu = S390_CPU(cs);
 554        CPUS390XState *env = &cpu->env;
 555
 556        if (env->ex_value) {
 557            /* Execution of the target insn is indivisible from
 558               the parent EXECUTE insn.  */
 559            return false;
 560        }
 561        if (s390_cpu_has_int(cpu)) {
 562            s390_cpu_do_interrupt(cs);
 563            return true;
 564        }
 565        if (env->psw.mask & PSW_MASK_WAIT) {
 566            /* Woken up because of a floating interrupt but it has already
 567             * been delivered. Go back to sleep. */
 568            cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
 569        }
 570    }
 571    return false;
 572}
 573
 574void s390x_cpu_debug_excp_handler(CPUState *cs)
 575{
 576    S390CPU *cpu = S390_CPU(cs);
 577    CPUS390XState *env = &cpu->env;
 578    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
 579
 580    if (wp_hit && wp_hit->flags & BP_CPU) {
 581        /* FIXME: When the storage-alteration-space control bit is set,
 582           the exception should only be triggered if the memory access
 583           is done using an address space with the storage-alteration-event
 584           bit set.  We have no way to detect that with the current
 585           watchpoint code.  */
 586        cs->watchpoint_hit = NULL;
 587
 588        env->per_address = env->psw.addr;
 589        env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
 590        /* FIXME: We currently no way to detect the address space used
 591           to trigger the watchpoint.  For now just consider it is the
 592           current default ASC. This turn to be true except when MVCP
 593           and MVCS instrutions are not used.  */
 594        env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
 595
 596        /* Remove all watchpoints to re-execute the code.  A PER exception
 597           will be triggered, it will call load_psw which will recompute
 598           the watchpoints.  */
 599        cpu_watchpoint_remove_all(cs, BP_CPU);
 600        cpu_loop_exit_noexc(cs);
 601    }
 602}
 603
 604/* Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
 605   this is only for the atomic operations, for which we want to raise a
 606   specification exception.  */
 607void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
 608                                   MMUAccessType access_type,
 609                                   int mmu_idx, uintptr_t retaddr)
 610{
 611    S390CPU *cpu = S390_CPU(cs);
 612    CPUS390XState *env = &cpu->env;
 613
 614    tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
 615}
 616
 617static void QEMU_NORETURN monitor_event(CPUS390XState *env,
 618                                        uint64_t monitor_code,
 619                                        uint8_t monitor_class, uintptr_t ra)
 620{
 621    /* Store the Monitor Code and the Monitor Class Number into the lowcore */
 622    stq_phys(env_cpu(env)->as,
 623             env->psa + offsetof(LowCore, monitor_code), monitor_code);
 624    stw_phys(env_cpu(env)->as,
 625             env->psa + offsetof(LowCore, mon_class_num), monitor_class);
 626
 627    tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
 628}
 629
 630void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
 631                          uint32_t monitor_class)
 632{
 633    g_assert(monitor_class <= 0xff);
 634
 635    if (env->cregs[8] & (0x8000 >> monitor_class)) {
 636        monitor_event(env, monitor_code, monitor_class, GETPC());
 637    }
 638}
 639
 640#endif /* !CONFIG_USER_ONLY */
 641