qemu/target/s390x/excp_helper.c
<<
>>
Prefs
   1/*
   2 * s390x exception / interrupt helpers
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2011 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "internal.h"
  24#include "exec/helper-proto.h"
  25#include "qemu/timer.h"
  26#include "exec/exec-all.h"
  27#include "exec/cpu_ldst.h"
  28#include "hw/s390x/ioinst.h"
  29#include "exec/address-spaces.h"
  30#include "tcg_s390x.h"
  31#ifndef CONFIG_USER_ONLY
  32#include "sysemu/sysemu.h"
  33#include "hw/s390x/s390_flic.h"
  34#endif
  35
  36void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
  37                                              int ilen, uintptr_t ra)
  38{
  39    CPUState *cs = CPU(s390_env_get_cpu(env));
  40
  41    cpu_restore_state(cs, ra, true);
  42    qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
  43                  env->psw.addr);
  44    trigger_pgm_exception(env, code, ilen);
  45    cpu_loop_exit(cs);
  46}
  47
  48void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
  49                                           uintptr_t ra)
  50{
  51    g_assert(dxc <= 0xff);
  52#if !defined(CONFIG_USER_ONLY)
  53    /* Store the DXC into the lowcore */
  54    stl_phys(CPU(s390_env_get_cpu(env))->as,
  55             env->psa + offsetof(LowCore, data_exc_code), dxc);
  56#endif
  57
  58    /* Store the DXC into the FPC if AFP is enabled */
  59    if (env->cregs[0] & CR0_AFP) {
  60        env->fpc = deposit32(env->fpc, 8, 8, dxc);
  61    }
  62    tcg_s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, ra);
  63}
  64
  65void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
  66{
  67    tcg_s390_data_exception(env, dxc, GETPC());
  68}
  69
  70#if defined(CONFIG_USER_ONLY)
  71
  72void s390_cpu_do_interrupt(CPUState *cs)
  73{
  74    cs->exception_index = -1;
  75}
  76
  77int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
  78                              int rw, int mmu_idx)
  79{
  80    S390CPU *cpu = S390_CPU(cs);
  81
  82    trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
  83    /* On real machines this value is dropped into LowMem.  Since this
  84       is userland, simply put this someplace that cpu_loop can find it.  */
  85    cpu->env.__excp_addr = address;
  86    return 1;
  87}
  88
  89#else /* !CONFIG_USER_ONLY */
  90
  91static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
  92{
  93    switch (mmu_idx) {
  94    case MMU_PRIMARY_IDX:
  95        return PSW_ASC_PRIMARY;
  96    case MMU_SECONDARY_IDX:
  97        return PSW_ASC_SECONDARY;
  98    case MMU_HOME_IDX:
  99        return PSW_ASC_HOME;
 100    default:
 101        abort();
 102    }
 103}
 104
 105int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
 106                              int rw, int mmu_idx)
 107{
 108    S390CPU *cpu = S390_CPU(cs);
 109    CPUS390XState *env = &cpu->env;
 110    target_ulong vaddr, raddr;
 111    uint64_t asc;
 112    int prot;
 113
 114    qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
 115                  __func__, orig_vaddr, rw, mmu_idx);
 116
 117    vaddr = orig_vaddr;
 118
 119    if (mmu_idx < MMU_REAL_IDX) {
 120        asc = cpu_mmu_idx_to_asc(mmu_idx);
 121        /* 31-Bit mode */
 122        if (!(env->psw.mask & PSW_MASK_64)) {
 123            vaddr &= 0x7fffffff;
 124        }
 125        if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
 126            return 1;
 127        }
 128    } else if (mmu_idx == MMU_REAL_IDX) {
 129        /* 31-Bit mode */
 130        if (!(env->psw.mask & PSW_MASK_64)) {
 131            vaddr &= 0x7fffffff;
 132        }
 133        if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
 134            return 1;
 135        }
 136    } else {
 137        abort();
 138    }
 139
 140    /* check out of RAM access */
 141    if (!address_space_access_valid(&address_space_memory, raddr,
 142                                    TARGET_PAGE_SIZE, rw,
 143                                    MEMTXATTRS_UNSPECIFIED)) {
 144        qemu_log_mask(CPU_LOG_MMU,
 145                      "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
 146                      __func__, (uint64_t)raddr, (uint64_t)ram_size);
 147        trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
 148        return 1;
 149    }
 150
 151    qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
 152            __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
 153
 154    tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
 155                 mmu_idx, TARGET_PAGE_SIZE);
 156
 157    return 0;
 158}
 159
 160static void do_program_interrupt(CPUS390XState *env)
 161{
 162    uint64_t mask, addr;
 163    LowCore *lowcore;
 164    int ilen = env->int_pgm_ilen;
 165
 166    if (ilen == ILEN_AUTO) {
 167        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
 168    }
 169    assert(ilen == 2 || ilen == 4 || ilen == 6);
 170
 171    switch (env->int_pgm_code) {
 172    case PGM_PER:
 173        if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
 174            break;
 175        }
 176        /* FALL THROUGH */
 177    case PGM_OPERATION:
 178    case PGM_PRIVILEGED:
 179    case PGM_EXECUTE:
 180    case PGM_PROTECTION:
 181    case PGM_ADDRESSING:
 182    case PGM_SPECIFICATION:
 183    case PGM_DATA:
 184    case PGM_FIXPT_OVERFLOW:
 185    case PGM_FIXPT_DIVIDE:
 186    case PGM_DEC_OVERFLOW:
 187    case PGM_DEC_DIVIDE:
 188    case PGM_HFP_EXP_OVERFLOW:
 189    case PGM_HFP_EXP_UNDERFLOW:
 190    case PGM_HFP_SIGNIFICANCE:
 191    case PGM_HFP_DIVIDE:
 192    case PGM_TRANS_SPEC:
 193    case PGM_SPECIAL_OP:
 194    case PGM_OPERAND:
 195    case PGM_HFP_SQRT:
 196    case PGM_PC_TRANS_SPEC:
 197    case PGM_ALET_SPEC:
 198    case PGM_MONITOR:
 199        /* advance the PSW if our exception is not nullifying */
 200        env->psw.addr += ilen;
 201        break;
 202    }
 203
 204    qemu_log_mask(CPU_LOG_INT,
 205                  "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
 206                  __func__, env->int_pgm_code, ilen, env->psw.mask,
 207                  env->psw.addr);
 208
 209    lowcore = cpu_map_lowcore(env);
 210
 211    /* Signal PER events with the exception.  */
 212    if (env->per_perc_atmid) {
 213        env->int_pgm_code |= PGM_PER;
 214        lowcore->per_address = cpu_to_be64(env->per_address);
 215        lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
 216        env->per_perc_atmid = 0;
 217    }
 218
 219    lowcore->pgm_ilen = cpu_to_be16(ilen);
 220    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
 221    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 222    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
 223    mask = be64_to_cpu(lowcore->program_new_psw.mask);
 224    addr = be64_to_cpu(lowcore->program_new_psw.addr);
 225    lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
 226
 227    cpu_unmap_lowcore(lowcore);
 228
 229    load_psw(env, mask, addr);
 230}
 231
 232static void do_svc_interrupt(CPUS390XState *env)
 233{
 234    uint64_t mask, addr;
 235    LowCore *lowcore;
 236
 237    lowcore = cpu_map_lowcore(env);
 238
 239    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
 240    lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
 241    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 242    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
 243    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
 244    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
 245
 246    cpu_unmap_lowcore(lowcore);
 247
 248    load_psw(env, mask, addr);
 249
 250    /* When a PER event is pending, the PER exception has to happen
 251       immediately after the SERVICE CALL one.  */
 252    if (env->per_perc_atmid) {
 253        env->int_pgm_code = PGM_PER;
 254        env->int_pgm_ilen = env->int_svc_ilen;
 255        do_program_interrupt(env);
 256    }
 257}
 258
 259#define VIRTIO_SUBCODE_64 0x0D00
 260
 261static void do_ext_interrupt(CPUS390XState *env)
 262{
 263    QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
 264    S390CPU *cpu = s390_env_get_cpu(env);
 265    uint64_t mask, addr;
 266    uint16_t cpu_addr;
 267    LowCore *lowcore;
 268
 269    if (!(env->psw.mask & PSW_MASK_EXT)) {
 270        cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
 271    }
 272
 273    lowcore = cpu_map_lowcore(env);
 274
 275    if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
 276        (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
 277        lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
 278        cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
 279        g_assert(cpu_addr < S390_MAX_CPUS);
 280        lowcore->cpu_addr = cpu_to_be16(cpu_addr);
 281        clear_bit(cpu_addr, env->emergency_signals);
 282        if (bitmap_empty(env->emergency_signals, max_cpus)) {
 283            env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
 284        }
 285    } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
 286               (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
 287        lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
 288        lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
 289        env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
 290    } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
 291               (env->cregs[0] & CR0_CKC_SC)) {
 292        lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
 293        lowcore->cpu_addr = 0;
 294        env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
 295    } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
 296               (env->cregs[0] & CR0_CPU_TIMER_SC)) {
 297        lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
 298        lowcore->cpu_addr = 0;
 299        env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
 300    } else if (qemu_s390_flic_has_service(flic) &&
 301               (env->cregs[0] & CR0_SERVICE_SC)) {
 302        uint32_t param;
 303
 304        param = qemu_s390_flic_dequeue_service(flic);
 305        lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
 306        lowcore->ext_params = cpu_to_be32(param);
 307        lowcore->cpu_addr = 0;
 308    } else {
 309        g_assert_not_reached();
 310    }
 311
 312    mask = be64_to_cpu(lowcore->external_new_psw.mask);
 313    addr = be64_to_cpu(lowcore->external_new_psw.addr);
 314    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 315    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
 316
 317    cpu_unmap_lowcore(lowcore);
 318
 319    load_psw(env, mask, addr);
 320}
 321
 322static void do_io_interrupt(CPUS390XState *env)
 323{
 324    QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
 325    uint64_t mask, addr;
 326    QEMUS390FlicIO *io;
 327    LowCore *lowcore;
 328
 329    g_assert(env->psw.mask & PSW_MASK_IO);
 330    io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
 331    g_assert(io);
 332
 333    lowcore = cpu_map_lowcore(env);
 334
 335    lowcore->subchannel_id = cpu_to_be16(io->id);
 336    lowcore->subchannel_nr = cpu_to_be16(io->nr);
 337    lowcore->io_int_parm = cpu_to_be32(io->parm);
 338    lowcore->io_int_word = cpu_to_be32(io->word);
 339    lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 340    lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
 341    mask = be64_to_cpu(lowcore->io_new_psw.mask);
 342    addr = be64_to_cpu(lowcore->io_new_psw.addr);
 343
 344    cpu_unmap_lowcore(lowcore);
 345    g_free(io);
 346
 347    load_psw(env, mask, addr);
 348}
 349
 350typedef struct MchkExtSaveArea {
 351    uint64_t    vregs[32][2];                     /* 0x0000 */
 352    uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
 353} MchkExtSaveArea;
 354QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
 355
 356static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
 357{
 358    hwaddr len = sizeof(MchkExtSaveArea);
 359    MchkExtSaveArea *sa;
 360    int i;
 361
 362    sa = cpu_physical_memory_map(mcesao, &len, 1);
 363    if (!sa) {
 364        return -EFAULT;
 365    }
 366    if (len != sizeof(MchkExtSaveArea)) {
 367        cpu_physical_memory_unmap(sa, len, 1, 0);
 368        return -EFAULT;
 369    }
 370
 371    for (i = 0; i < 32; i++) {
 372        sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0].ll);
 373        sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1].ll);
 374    }
 375
 376    cpu_physical_memory_unmap(sa, len, 1, len);
 377    return 0;
 378}
 379
 380static void do_mchk_interrupt(CPUS390XState *env)
 381{
 382    QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
 383    uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
 384    uint64_t mask, addr, mcesao = 0;
 385    LowCore *lowcore;
 386    int i;
 387
 388    /* for now we only support channel report machine checks (floating) */
 389    g_assert(env->psw.mask & PSW_MASK_MCHECK);
 390    g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
 391
 392    qemu_s390_flic_dequeue_crw_mchk(flic);
 393
 394    lowcore = cpu_map_lowcore(env);
 395
 396    /* extended save area */
 397    if (mcic & MCIC_VB_VR) {
 398        /* length and alignment is 1024 bytes */
 399        mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
 400    }
 401
 402    /* try to store vector registers */
 403    if (!mcesao || mchk_store_vregs(env, mcesao)) {
 404        mcic &= ~MCIC_VB_VR;
 405    }
 406
 407    /* we are always in z/Architecture mode */
 408    lowcore->ar_access_id = 1;
 409
 410    for (i = 0; i < 16; i++) {
 411        lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
 412        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
 413        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
 414        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
 415    }
 416    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
 417    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
 418    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
 419    lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
 420    lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
 421
 422    lowcore->mcic = cpu_to_be64(mcic);
 423    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 424    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
 425    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
 426    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
 427
 428    cpu_unmap_lowcore(lowcore);
 429
 430    load_psw(env, mask, addr);
 431}
 432
 433void s390_cpu_do_interrupt(CPUState *cs)
 434{
 435    QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
 436    S390CPU *cpu = S390_CPU(cs);
 437    CPUS390XState *env = &cpu->env;
 438    bool stopped = false;
 439
 440    qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
 441                  __func__, cs->exception_index, env->psw.mask, env->psw.addr);
 442
 443try_deliver:
 444    /* handle machine checks */
 445    if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
 446        cs->exception_index = EXCP_MCHK;
 447    }
 448    /* handle external interrupts */
 449    if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
 450        cs->exception_index = EXCP_EXT;
 451    }
 452    /* handle I/O interrupts */
 453    if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
 454        cs->exception_index = EXCP_IO;
 455    }
 456    /* RESTART interrupt */
 457    if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
 458        cs->exception_index = EXCP_RESTART;
 459    }
 460    /* STOP interrupt has least priority */
 461    if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
 462        cs->exception_index = EXCP_STOP;
 463    }
 464
 465    switch (cs->exception_index) {
 466    case EXCP_PGM:
 467        do_program_interrupt(env);
 468        break;
 469    case EXCP_SVC:
 470        do_svc_interrupt(env);
 471        break;
 472    case EXCP_EXT:
 473        do_ext_interrupt(env);
 474        break;
 475    case EXCP_IO:
 476        do_io_interrupt(env);
 477        break;
 478    case EXCP_MCHK:
 479        do_mchk_interrupt(env);
 480        break;
 481    case EXCP_RESTART:
 482        do_restart_interrupt(env);
 483        break;
 484    case EXCP_STOP:
 485        do_stop_interrupt(env);
 486        stopped = true;
 487        break;
 488    }
 489
 490    if (cs->exception_index != -1 && !stopped) {
 491        /* check if there are more pending interrupts to deliver */
 492        cs->exception_index = -1;
 493        goto try_deliver;
 494    }
 495    cs->exception_index = -1;
 496
 497    /* we might still have pending interrupts, but not deliverable */
 498    if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
 499        cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
 500    }
 501
 502    /* WAIT PSW during interrupt injection or STOP interrupt */
 503    if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
 504        /* don't trigger a cpu_loop_exit(), use an interrupt instead */
 505        cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
 506    } else if (cs->halted) {
 507        /* unhalt if we had a WAIT PSW somehwere in our injection chain */
 508        s390_cpu_unhalt(cpu);
 509    }
 510}
 511
 512bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 513{
 514    if (interrupt_request & CPU_INTERRUPT_HARD) {
 515        S390CPU *cpu = S390_CPU(cs);
 516        CPUS390XState *env = &cpu->env;
 517
 518        if (env->ex_value) {
 519            /* Execution of the target insn is indivisible from
 520               the parent EXECUTE insn.  */
 521            return false;
 522        }
 523        if (s390_cpu_has_int(cpu)) {
 524            s390_cpu_do_interrupt(cs);
 525            return true;
 526        }
 527        if (env->psw.mask & PSW_MASK_WAIT) {
 528            /* Woken up because of a floating interrupt but it has already
 529             * been delivered. Go back to sleep. */
 530            cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
 531        }
 532    }
 533    return false;
 534}
 535
 536void s390x_cpu_debug_excp_handler(CPUState *cs)
 537{
 538    S390CPU *cpu = S390_CPU(cs);
 539    CPUS390XState *env = &cpu->env;
 540    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
 541
 542    if (wp_hit && wp_hit->flags & BP_CPU) {
 543        /* FIXME: When the storage-alteration-space control bit is set,
 544           the exception should only be triggered if the memory access
 545           is done using an address space with the storage-alteration-event
 546           bit set.  We have no way to detect that with the current
 547           watchpoint code.  */
 548        cs->watchpoint_hit = NULL;
 549
 550        env->per_address = env->psw.addr;
 551        env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
 552        /* FIXME: We currently no way to detect the address space used
 553           to trigger the watchpoint.  For now just consider it is the
 554           current default ASC. This turn to be true except when MVCP
 555           and MVCS instrutions are not used.  */
 556        env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
 557
 558        /* Remove all watchpoints to re-execute the code.  A PER exception
 559           will be triggered, it will call load_psw which will recompute
 560           the watchpoints.  */
 561        cpu_watchpoint_remove_all(cs, BP_CPU);
 562        cpu_loop_exit_noexc(cs);
 563    }
 564}
 565
 566/* Unaligned accesses are only diagnosed with MO_ALIGN.  At the moment,
 567   this is only for the atomic operations, for which we want to raise a
 568   specification exception.  */
 569void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
 570                                   MMUAccessType access_type,
 571                                   int mmu_idx, uintptr_t retaddr)
 572{
 573    S390CPU *cpu = S390_CPU(cs);
 574    CPUS390XState *env = &cpu->env;
 575
 576    s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
 577}
 578
 579#endif /* CONFIG_USER_ONLY */
 580