qemu/target-s390x/helper.c
<<
>>
Prefs
   1/*
   2 *  S/390 helpers
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2011 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qapi/error.h"
  23#include "cpu.h"
  24#include "exec/gdbstub.h"
  25#include "qemu/timer.h"
  26#include "exec/exec-all.h"
  27#include "exec/cpu_ldst.h"
  28#include "hw/s390x/ioinst.h"
  29#ifndef CONFIG_USER_ONLY
  30#include "sysemu/sysemu.h"
  31#endif
  32
  33//#define DEBUG_S390
  34//#define DEBUG_S390_STDOUT
  35
  36#ifdef DEBUG_S390
  37#ifdef DEBUG_S390_STDOUT
  38#define DPRINTF(fmt, ...) \
  39    do { fprintf(stderr, fmt, ## __VA_ARGS__); \
  40         if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
  41#else
  42#define DPRINTF(fmt, ...) \
  43    do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
  44#endif
  45#else
  46#define DPRINTF(fmt, ...) \
  47    do { } while (0)
  48#endif
  49
  50
  51#ifndef CONFIG_USER_ONLY
  52void s390x_tod_timer(void *opaque)
  53{
  54    S390CPU *cpu = opaque;
  55    CPUS390XState *env = &cpu->env;
  56
  57    env->pending_int |= INTERRUPT_TOD;
  58    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
  59}
  60
  61void s390x_cpu_timer(void *opaque)
  62{
  63    S390CPU *cpu = opaque;
  64    CPUS390XState *env = &cpu->env;
  65
  66    env->pending_int |= INTERRUPT_CPUTIMER;
  67    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
  68}
  69#endif
  70
  71S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
  72{
  73    return S390_CPU(object_new(TYPE_S390_CPU));
  74}
  75
  76S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
  77{
  78    S390CPU *cpu;
  79    Error *err = NULL;
  80
  81    cpu = cpu_s390x_create(cpu_model, &err);
  82    if (err != NULL) {
  83        goto out;
  84    }
  85
  86    object_property_set_int(OBJECT(cpu), id, "id", &err);
  87    if (err != NULL) {
  88        goto out;
  89    }
  90    object_property_set_bool(OBJECT(cpu), true, "realized", &err);
  91
  92out:
  93    if (err) {
  94        error_propagate(errp, err);
  95        object_unref(OBJECT(cpu));
  96        cpu = NULL;
  97    }
  98    return cpu;
  99}
 100
 101S390CPU *cpu_s390x_init(const char *cpu_model)
 102{
 103    Error *err = NULL;
 104    S390CPU *cpu;
 105    /* Use to track CPU ID for linux-user only */
 106    static int64_t next_cpu_id;
 107
 108    cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
 109    if (err) {
 110        error_report_err(err);
 111    }
 112    return cpu;
 113}
 114
 115#if defined(CONFIG_USER_ONLY)
 116
 117void s390_cpu_do_interrupt(CPUState *cs)
 118{
 119    cs->exception_index = -1;
 120}
 121
 122int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
 123                              int rw, int mmu_idx)
 124{
 125    S390CPU *cpu = S390_CPU(cs);
 126
 127    cs->exception_index = EXCP_PGM;
 128    cpu->env.int_pgm_code = PGM_ADDRESSING;
 129    /* On real machines this value is dropped into LowMem.  Since this
 130       is userland, simply put this someplace that cpu_loop can find it.  */
 131    cpu->env.__excp_addr = address;
 132    return 1;
 133}
 134
 135#else /* !CONFIG_USER_ONLY */
 136
 137/* Ensure to exit the TB after this call! */
 138void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
 139{
 140    CPUState *cs = CPU(s390_env_get_cpu(env));
 141
 142    cs->exception_index = EXCP_PGM;
 143    env->int_pgm_code = code;
 144    env->int_pgm_ilen = ilen;
 145}
 146
 147int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
 148                              int rw, int mmu_idx)
 149{
 150    S390CPU *cpu = S390_CPU(cs);
 151    CPUS390XState *env = &cpu->env;
 152    uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
 153    target_ulong vaddr, raddr;
 154    int prot;
 155
 156    DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
 157            __func__, orig_vaddr, rw, mmu_idx);
 158
 159    orig_vaddr &= TARGET_PAGE_MASK;
 160    vaddr = orig_vaddr;
 161
 162    /* 31-Bit mode */
 163    if (!(env->psw.mask & PSW_MASK_64)) {
 164        vaddr &= 0x7fffffff;
 165    }
 166
 167    if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
 168        /* Translation ended in exception */
 169        return 1;
 170    }
 171
 172    /* check out of RAM access */
 173    if (raddr > ram_size) {
 174        DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
 175                (uint64_t)raddr, (uint64_t)ram_size);
 176        trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
 177        return 1;
 178    }
 179
 180    qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
 181            __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
 182
 183    tlb_set_page(cs, orig_vaddr, raddr, prot,
 184                 mmu_idx, TARGET_PAGE_SIZE);
 185
 186    return 0;
 187}
 188
 189hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
 190{
 191    S390CPU *cpu = S390_CPU(cs);
 192    CPUS390XState *env = &cpu->env;
 193    target_ulong raddr;
 194    int prot;
 195    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
 196
 197    /* 31-Bit mode */
 198    if (!(env->psw.mask & PSW_MASK_64)) {
 199        vaddr &= 0x7fffffff;
 200    }
 201
 202    if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
 203        return -1;
 204    }
 205    return raddr;
 206}
 207
 208hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
 209{
 210    hwaddr phys_addr;
 211    target_ulong page;
 212
 213    page = vaddr & TARGET_PAGE_MASK;
 214    phys_addr = cpu_get_phys_page_debug(cs, page);
 215    phys_addr += (vaddr & ~TARGET_PAGE_MASK);
 216
 217    return phys_addr;
 218}
 219
 220void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
 221{
 222    uint64_t old_mask = env->psw.mask;
 223
 224    env->psw.addr = addr;
 225    env->psw.mask = mask;
 226    if (tcg_enabled()) {
 227        env->cc_op = (mask >> 44) & 3;
 228    }
 229
 230    if ((old_mask ^ mask) & PSW_MASK_PER) {
 231        s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
 232    }
 233
 234    if (mask & PSW_MASK_WAIT) {
 235        S390CPU *cpu = s390_env_get_cpu(env);
 236        if (s390_cpu_halt(cpu) == 0) {
 237#ifndef CONFIG_USER_ONLY
 238            qemu_system_shutdown_request();
 239#endif
 240        }
 241    }
 242}
 243
 244static uint64_t get_psw_mask(CPUS390XState *env)
 245{
 246    uint64_t r = env->psw.mask;
 247
 248    if (tcg_enabled()) {
 249        env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
 250                             env->cc_vr);
 251
 252        r &= ~PSW_MASK_CC;
 253        assert(!(env->cc_op & ~3));
 254        r |= (uint64_t)env->cc_op << 44;
 255    }
 256
 257    return r;
 258}
 259
 260static LowCore *cpu_map_lowcore(CPUS390XState *env)
 261{
 262    S390CPU *cpu = s390_env_get_cpu(env);
 263    LowCore *lowcore;
 264    hwaddr len = sizeof(LowCore);
 265
 266    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
 267
 268    if (len < sizeof(LowCore)) {
 269        cpu_abort(CPU(cpu), "Could not map lowcore\n");
 270    }
 271
 272    return lowcore;
 273}
 274
 275static void cpu_unmap_lowcore(LowCore *lowcore)
 276{
 277    cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
 278}
 279
 280void do_restart_interrupt(CPUS390XState *env)
 281{
 282    uint64_t mask, addr;
 283    LowCore *lowcore;
 284
 285    lowcore = cpu_map_lowcore(env);
 286
 287    lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 288    lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
 289    mask = be64_to_cpu(lowcore->restart_new_psw.mask);
 290    addr = be64_to_cpu(lowcore->restart_new_psw.addr);
 291
 292    cpu_unmap_lowcore(lowcore);
 293
 294    load_psw(env, mask, addr);
 295}
 296
 297static void do_program_interrupt(CPUS390XState *env)
 298{
 299    uint64_t mask, addr;
 300    LowCore *lowcore;
 301    int ilen = env->int_pgm_ilen;
 302
 303    switch (ilen) {
 304    case ILEN_LATER:
 305        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
 306        break;
 307    case ILEN_LATER_INC:
 308        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
 309        env->psw.addr += ilen;
 310        break;
 311    default:
 312        assert(ilen == 2 || ilen == 4 || ilen == 6);
 313    }
 314
 315    qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
 316                  __func__, env->int_pgm_code, ilen);
 317
 318    lowcore = cpu_map_lowcore(env);
 319
 320    /* Signal PER events with the exception.  */
 321    if (env->per_perc_atmid) {
 322        env->int_pgm_code |= PGM_PER;
 323        lowcore->per_address = cpu_to_be64(env->per_address);
 324        lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
 325        env->per_perc_atmid = 0;
 326    }
 327
 328    lowcore->pgm_ilen = cpu_to_be16(ilen);
 329    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
 330    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 331    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
 332    mask = be64_to_cpu(lowcore->program_new_psw.mask);
 333    addr = be64_to_cpu(lowcore->program_new_psw.addr);
 334    lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
 335
 336    cpu_unmap_lowcore(lowcore);
 337
 338    DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
 339            env->int_pgm_code, ilen, env->psw.mask,
 340            env->psw.addr);
 341
 342    load_psw(env, mask, addr);
 343}
 344
 345static void do_svc_interrupt(CPUS390XState *env)
 346{
 347    uint64_t mask, addr;
 348    LowCore *lowcore;
 349
 350    lowcore = cpu_map_lowcore(env);
 351
 352    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
 353    lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
 354    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 355    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
 356    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
 357    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
 358
 359    cpu_unmap_lowcore(lowcore);
 360
 361    load_psw(env, mask, addr);
 362
 363    /* When a PER event is pending, the PER exception has to happen
 364       immediately after the SERVICE CALL one.  */
 365    if (env->per_perc_atmid) {
 366        env->int_pgm_code = PGM_PER;
 367        env->int_pgm_ilen = env->int_svc_ilen;
 368        do_program_interrupt(env);
 369    }
 370}
 371
 372#define VIRTIO_SUBCODE_64 0x0D00
 373
 374static void do_ext_interrupt(CPUS390XState *env)
 375{
 376    S390CPU *cpu = s390_env_get_cpu(env);
 377    uint64_t mask, addr;
 378    LowCore *lowcore;
 379    ExtQueue *q;
 380
 381    if (!(env->psw.mask & PSW_MASK_EXT)) {
 382        cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
 383    }
 384
 385    if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
 386        cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
 387    }
 388
 389    q = &env->ext_queue[env->ext_index];
 390    lowcore = cpu_map_lowcore(env);
 391
 392    lowcore->ext_int_code = cpu_to_be16(q->code);
 393    lowcore->ext_params = cpu_to_be32(q->param);
 394    lowcore->ext_params2 = cpu_to_be64(q->param64);
 395    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 396    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
 397    lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
 398    mask = be64_to_cpu(lowcore->external_new_psw.mask);
 399    addr = be64_to_cpu(lowcore->external_new_psw.addr);
 400
 401    cpu_unmap_lowcore(lowcore);
 402
 403    env->ext_index--;
 404    if (env->ext_index == -1) {
 405        env->pending_int &= ~INTERRUPT_EXT;
 406    }
 407
 408    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 409            env->psw.mask, env->psw.addr);
 410
 411    load_psw(env, mask, addr);
 412}
 413
 414static void do_io_interrupt(CPUS390XState *env)
 415{
 416    S390CPU *cpu = s390_env_get_cpu(env);
 417    LowCore *lowcore;
 418    IOIntQueue *q;
 419    uint8_t isc;
 420    int disable = 1;
 421    int found = 0;
 422
 423    if (!(env->psw.mask & PSW_MASK_IO)) {
 424        cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
 425    }
 426
 427    for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
 428        uint64_t isc_bits;
 429
 430        if (env->io_index[isc] < 0) {
 431            continue;
 432        }
 433        if (env->io_index[isc] >= MAX_IO_QUEUE) {
 434            cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
 435                      isc, env->io_index[isc]);
 436        }
 437
 438        q = &env->io_queue[env->io_index[isc]][isc];
 439        isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
 440        if (!(env->cregs[6] & isc_bits)) {
 441            disable = 0;
 442            continue;
 443        }
 444        if (!found) {
 445            uint64_t mask, addr;
 446
 447            found = 1;
 448            lowcore = cpu_map_lowcore(env);
 449
 450            lowcore->subchannel_id = cpu_to_be16(q->id);
 451            lowcore->subchannel_nr = cpu_to_be16(q->nr);
 452            lowcore->io_int_parm = cpu_to_be32(q->parm);
 453            lowcore->io_int_word = cpu_to_be32(q->word);
 454            lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 455            lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
 456            mask = be64_to_cpu(lowcore->io_new_psw.mask);
 457            addr = be64_to_cpu(lowcore->io_new_psw.addr);
 458
 459            cpu_unmap_lowcore(lowcore);
 460
 461            env->io_index[isc]--;
 462
 463            DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 464                    env->psw.mask, env->psw.addr);
 465            load_psw(env, mask, addr);
 466        }
 467        if (env->io_index[isc] >= 0) {
 468            disable = 0;
 469        }
 470        continue;
 471    }
 472
 473    if (disable) {
 474        env->pending_int &= ~INTERRUPT_IO;
 475    }
 476
 477}
 478
 479static void do_mchk_interrupt(CPUS390XState *env)
 480{
 481    S390CPU *cpu = s390_env_get_cpu(env);
 482    uint64_t mask, addr;
 483    LowCore *lowcore;
 484    MchkQueue *q;
 485    int i;
 486
 487    if (!(env->psw.mask & PSW_MASK_MCHECK)) {
 488        cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
 489    }
 490
 491    if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
 492        cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
 493    }
 494
 495    q = &env->mchk_queue[env->mchk_index];
 496
 497    if (q->type != 1) {
 498        /* Don't know how to handle this... */
 499        cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
 500    }
 501    if (!(env->cregs[14] & (1 << 28))) {
 502        /* CRW machine checks disabled */
 503        return;
 504    }
 505
 506    lowcore = cpu_map_lowcore(env);
 507
 508    for (i = 0; i < 16; i++) {
 509        lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
 510        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
 511        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
 512        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
 513    }
 514    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
 515    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
 516    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
 517    lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
 518    lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
 519    lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
 520    lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
 521
 522    lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
 523    lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
 524    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 525    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
 526    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
 527    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
 528
 529    cpu_unmap_lowcore(lowcore);
 530
 531    env->mchk_index--;
 532    if (env->mchk_index == -1) {
 533        env->pending_int &= ~INTERRUPT_MCHK;
 534    }
 535
 536    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 537            env->psw.mask, env->psw.addr);
 538
 539    load_psw(env, mask, addr);
 540}
 541
 542void s390_cpu_do_interrupt(CPUState *cs)
 543{
 544    S390CPU *cpu = S390_CPU(cs);
 545    CPUS390XState *env = &cpu->env;
 546
 547    qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
 548                  __func__, cs->exception_index, env->psw.addr);
 549
 550    s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
 551    /* handle machine checks */
 552    if ((env->psw.mask & PSW_MASK_MCHECK) &&
 553        (cs->exception_index == -1)) {
 554        if (env->pending_int & INTERRUPT_MCHK) {
 555            cs->exception_index = EXCP_MCHK;
 556        }
 557    }
 558    /* handle external interrupts */
 559    if ((env->psw.mask & PSW_MASK_EXT) &&
 560        cs->exception_index == -1) {
 561        if (env->pending_int & INTERRUPT_EXT) {
 562            /* code is already in env */
 563            cs->exception_index = EXCP_EXT;
 564        } else if (env->pending_int & INTERRUPT_TOD) {
 565            cpu_inject_ext(cpu, 0x1004, 0, 0);
 566            cs->exception_index = EXCP_EXT;
 567            env->pending_int &= ~INTERRUPT_EXT;
 568            env->pending_int &= ~INTERRUPT_TOD;
 569        } else if (env->pending_int & INTERRUPT_CPUTIMER) {
 570            cpu_inject_ext(cpu, 0x1005, 0, 0);
 571            cs->exception_index = EXCP_EXT;
 572            env->pending_int &= ~INTERRUPT_EXT;
 573            env->pending_int &= ~INTERRUPT_TOD;
 574        }
 575    }
 576    /* handle I/O interrupts */
 577    if ((env->psw.mask & PSW_MASK_IO) &&
 578        (cs->exception_index == -1)) {
 579        if (env->pending_int & INTERRUPT_IO) {
 580            cs->exception_index = EXCP_IO;
 581        }
 582    }
 583
 584    switch (cs->exception_index) {
 585    case EXCP_PGM:
 586        do_program_interrupt(env);
 587        break;
 588    case EXCP_SVC:
 589        do_svc_interrupt(env);
 590        break;
 591    case EXCP_EXT:
 592        do_ext_interrupt(env);
 593        break;
 594    case EXCP_IO:
 595        do_io_interrupt(env);
 596        break;
 597    case EXCP_MCHK:
 598        do_mchk_interrupt(env);
 599        break;
 600    }
 601    cs->exception_index = -1;
 602
 603    if (!env->pending_int) {
 604        cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
 605    }
 606}
 607
 608bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 609{
 610    if (interrupt_request & CPU_INTERRUPT_HARD) {
 611        S390CPU *cpu = S390_CPU(cs);
 612        CPUS390XState *env = &cpu->env;
 613
 614        if (env->psw.mask & PSW_MASK_EXT) {
 615            s390_cpu_do_interrupt(cs);
 616            return true;
 617        }
 618    }
 619    return false;
 620}
 621
 622void s390_cpu_recompute_watchpoints(CPUState *cs)
 623{
 624    const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
 625    S390CPU *cpu = S390_CPU(cs);
 626    CPUS390XState *env = &cpu->env;
 627
 628    /* We are called when the watchpoints have changed. First
 629       remove them all.  */
 630    cpu_watchpoint_remove_all(cs, BP_CPU);
 631
 632    /* Return if PER is not enabled */
 633    if (!(env->psw.mask & PSW_MASK_PER)) {
 634        return;
 635    }
 636
 637    /* Return if storage-alteration event is not enabled.  */
 638    if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
 639        return;
 640    }
 641
 642    if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
 643        /* We can't create a watchoint spanning the whole memory range, so
 644           split it in two parts.   */
 645        cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
 646        cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
 647    } else if (env->cregs[10] > env->cregs[11]) {
 648        /* The address range loops, create two watchpoints.  */
 649        cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
 650                              wp_flags, NULL);
 651        cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
 652
 653    } else {
 654        /* Default case, create a single watchpoint.  */
 655        cpu_watchpoint_insert(cs, env->cregs[10],
 656                              env->cregs[11] - env->cregs[10] + 1,
 657                              wp_flags, NULL);
 658    }
 659}
 660
 661void s390x_cpu_debug_excp_handler(CPUState *cs)
 662{
 663    S390CPU *cpu = S390_CPU(cs);
 664    CPUS390XState *env = &cpu->env;
 665    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
 666
 667    if (wp_hit && wp_hit->flags & BP_CPU) {
 668        /* FIXME: When the storage-alteration-space control bit is set,
 669           the exception should only be triggered if the memory access
 670           is done using an address space with the storage-alteration-event
 671           bit set.  We have no way to detect that with the current
 672           watchpoint code.  */
 673        cs->watchpoint_hit = NULL;
 674
 675        env->per_address = env->psw.addr;
 676        env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
 677        /* FIXME: We currently no way to detect the address space used
 678           to trigger the watchpoint.  For now just consider it is the
 679           current default ASC. This turn to be true except when MVCP
 680           and MVCS instrutions are not used.  */
 681        env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
 682
 683        /* Remove all watchpoints to re-execute the code.  A PER exception
 684           will be triggered, it will call load_psw which will recompute
 685           the watchpoints.  */
 686        cpu_watchpoint_remove_all(cs, BP_CPU);
 687        cpu_loop_exit_noexc(cs);
 688    }
 689}
 690#endif /* CONFIG_USER_ONLY */
 691