qemu/target-s390x/helper.c
<<
>>
Prefs
   1/*
   2 *  S/390 helpers
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2011 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qapi/error.h"
  23#include "cpu.h"
  24#include "exec/gdbstub.h"
  25#include "qemu/timer.h"
  26#include "exec/cpu_ldst.h"
  27#ifndef CONFIG_USER_ONLY
  28#include "sysemu/sysemu.h"
  29#endif
  30
  31//#define DEBUG_S390
  32//#define DEBUG_S390_STDOUT
  33
  34#ifdef DEBUG_S390
  35#ifdef DEBUG_S390_STDOUT
  36#define DPRINTF(fmt, ...) \
  37    do { fprintf(stderr, fmt, ## __VA_ARGS__); \
  38         if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
  39#else
  40#define DPRINTF(fmt, ...) \
  41    do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
  42#endif
  43#else
  44#define DPRINTF(fmt, ...) \
  45    do { } while (0)
  46#endif
  47
  48
  49#ifndef CONFIG_USER_ONLY
  50void s390x_tod_timer(void *opaque)
  51{
  52    S390CPU *cpu = opaque;
  53    CPUS390XState *env = &cpu->env;
  54
  55    env->pending_int |= INTERRUPT_TOD;
  56    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
  57}
  58
  59void s390x_cpu_timer(void *opaque)
  60{
  61    S390CPU *cpu = opaque;
  62    CPUS390XState *env = &cpu->env;
  63
  64    env->pending_int |= INTERRUPT_CPUTIMER;
  65    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
  66}
  67#endif
  68
  69S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
  70{
  71    S390CPU *cpu;
  72
  73    cpu = S390_CPU(object_new(TYPE_S390_CPU));
  74
  75    return cpu;
  76}
  77
  78S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
  79{
  80    S390CPU *cpu;
  81    Error *err = NULL;
  82
  83    cpu = cpu_s390x_create(cpu_model, &err);
  84    if (err != NULL) {
  85        goto out;
  86    }
  87
  88    object_property_set_int(OBJECT(cpu), id, "id", &err);
  89    if (err != NULL) {
  90        goto out;
  91    }
  92    object_property_set_bool(OBJECT(cpu), true, "realized", &err);
  93
  94out:
  95    if (err) {
  96        error_propagate(errp, err);
  97        object_unref(OBJECT(cpu));
  98        cpu = NULL;
  99    }
 100    return cpu;
 101}
 102
 103S390CPU *cpu_s390x_init(const char *cpu_model)
 104{
 105    Error *err = NULL;
 106    S390CPU *cpu;
 107    /* Use to track CPU ID for linux-user only */
 108    static int64_t next_cpu_id;
 109
 110    cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
 111    if (err) {
 112        error_report_err(err);
 113    }
 114    return cpu;
 115}
 116
 117#if defined(CONFIG_USER_ONLY)
 118
 119void s390_cpu_do_interrupt(CPUState *cs)
 120{
 121    cs->exception_index = -1;
 122}
 123
 124int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
 125                              int rw, int mmu_idx)
 126{
 127    S390CPU *cpu = S390_CPU(cs);
 128
 129    cs->exception_index = EXCP_PGM;
 130    cpu->env.int_pgm_code = PGM_ADDRESSING;
 131    /* On real machines this value is dropped into LowMem.  Since this
 132       is userland, simply put this someplace that cpu_loop can find it.  */
 133    cpu->env.__excp_addr = address;
 134    return 1;
 135}
 136
 137#else /* !CONFIG_USER_ONLY */
 138
 139/* Ensure to exit the TB after this call! */
 140void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
 141{
 142    CPUState *cs = CPU(s390_env_get_cpu(env));
 143
 144    cs->exception_index = EXCP_PGM;
 145    env->int_pgm_code = code;
 146    env->int_pgm_ilen = ilen;
 147}
 148
 149int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
 150                              int rw, int mmu_idx)
 151{
 152    S390CPU *cpu = S390_CPU(cs);
 153    CPUS390XState *env = &cpu->env;
 154    uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
 155    target_ulong vaddr, raddr;
 156    int prot;
 157
 158    DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
 159            __func__, orig_vaddr, rw, mmu_idx);
 160
 161    orig_vaddr &= TARGET_PAGE_MASK;
 162    vaddr = orig_vaddr;
 163
 164    /* 31-Bit mode */
 165    if (!(env->psw.mask & PSW_MASK_64)) {
 166        vaddr &= 0x7fffffff;
 167    }
 168
 169    if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
 170        /* Translation ended in exception */
 171        return 1;
 172    }
 173
 174    /* check out of RAM access */
 175    if (raddr > ram_size) {
 176        DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
 177                (uint64_t)raddr, (uint64_t)ram_size);
 178        trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
 179        return 1;
 180    }
 181
 182    qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
 183            __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
 184
 185    tlb_set_page(cs, orig_vaddr, raddr, prot,
 186                 mmu_idx, TARGET_PAGE_SIZE);
 187
 188    return 0;
 189}
 190
 191hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
 192{
 193    S390CPU *cpu = S390_CPU(cs);
 194    CPUS390XState *env = &cpu->env;
 195    target_ulong raddr;
 196    int prot;
 197    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
 198
 199    /* 31-Bit mode */
 200    if (!(env->psw.mask & PSW_MASK_64)) {
 201        vaddr &= 0x7fffffff;
 202    }
 203
 204    if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
 205        return -1;
 206    }
 207    return raddr;
 208}
 209
 210hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
 211{
 212    hwaddr phys_addr;
 213    target_ulong page;
 214
 215    page = vaddr & TARGET_PAGE_MASK;
 216    phys_addr = cpu_get_phys_page_debug(cs, page);
 217    phys_addr += (vaddr & ~TARGET_PAGE_MASK);
 218
 219    return phys_addr;
 220}
 221
 222void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
 223{
 224    uint64_t old_mask = env->psw.mask;
 225
 226    env->psw.addr = addr;
 227    env->psw.mask = mask;
 228    if (tcg_enabled()) {
 229        env->cc_op = (mask >> 44) & 3;
 230    }
 231
 232    if ((old_mask ^ mask) & PSW_MASK_PER) {
 233        s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
 234    }
 235
 236    if (mask & PSW_MASK_WAIT) {
 237        S390CPU *cpu = s390_env_get_cpu(env);
 238        if (s390_cpu_halt(cpu) == 0) {
 239#ifndef CONFIG_USER_ONLY
 240            qemu_system_shutdown_request();
 241#endif
 242        }
 243    }
 244}
 245
 246static uint64_t get_psw_mask(CPUS390XState *env)
 247{
 248    uint64_t r = env->psw.mask;
 249
 250    if (tcg_enabled()) {
 251        env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
 252                             env->cc_vr);
 253
 254        r &= ~PSW_MASK_CC;
 255        assert(!(env->cc_op & ~3));
 256        r |= (uint64_t)env->cc_op << 44;
 257    }
 258
 259    return r;
 260}
 261
 262static LowCore *cpu_map_lowcore(CPUS390XState *env)
 263{
 264    S390CPU *cpu = s390_env_get_cpu(env);
 265    LowCore *lowcore;
 266    hwaddr len = sizeof(LowCore);
 267
 268    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
 269
 270    if (len < sizeof(LowCore)) {
 271        cpu_abort(CPU(cpu), "Could not map lowcore\n");
 272    }
 273
 274    return lowcore;
 275}
 276
 277static void cpu_unmap_lowcore(LowCore *lowcore)
 278{
 279    cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
 280}
 281
 282void do_restart_interrupt(CPUS390XState *env)
 283{
 284    uint64_t mask, addr;
 285    LowCore *lowcore;
 286
 287    lowcore = cpu_map_lowcore(env);
 288
 289    lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 290    lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
 291    mask = be64_to_cpu(lowcore->restart_new_psw.mask);
 292    addr = be64_to_cpu(lowcore->restart_new_psw.addr);
 293
 294    cpu_unmap_lowcore(lowcore);
 295
 296    load_psw(env, mask, addr);
 297}
 298
 299static void do_program_interrupt(CPUS390XState *env)
 300{
 301    uint64_t mask, addr;
 302    LowCore *lowcore;
 303    int ilen = env->int_pgm_ilen;
 304
 305    switch (ilen) {
 306    case ILEN_LATER:
 307        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
 308        break;
 309    case ILEN_LATER_INC:
 310        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
 311        env->psw.addr += ilen;
 312        break;
 313    default:
 314        assert(ilen == 2 || ilen == 4 || ilen == 6);
 315    }
 316
 317    qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
 318                  __func__, env->int_pgm_code, ilen);
 319
 320    lowcore = cpu_map_lowcore(env);
 321
 322    /* Signal PER events with the exception.  */
 323    if (env->per_perc_atmid) {
 324        env->int_pgm_code |= PGM_PER;
 325        lowcore->per_address = cpu_to_be64(env->per_address);
 326        lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
 327        env->per_perc_atmid = 0;
 328    }
 329
 330    lowcore->pgm_ilen = cpu_to_be16(ilen);
 331    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
 332    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 333    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
 334    mask = be64_to_cpu(lowcore->program_new_psw.mask);
 335    addr = be64_to_cpu(lowcore->program_new_psw.addr);
 336    lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
 337
 338    cpu_unmap_lowcore(lowcore);
 339
 340    DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
 341            env->int_pgm_code, ilen, env->psw.mask,
 342            env->psw.addr);
 343
 344    load_psw(env, mask, addr);
 345}
 346
 347static void do_svc_interrupt(CPUS390XState *env)
 348{
 349    uint64_t mask, addr;
 350    LowCore *lowcore;
 351
 352    lowcore = cpu_map_lowcore(env);
 353
 354    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
 355    lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
 356    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 357    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
 358    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
 359    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
 360
 361    cpu_unmap_lowcore(lowcore);
 362
 363    load_psw(env, mask, addr);
 364
 365    /* When a PER event is pending, the PER exception has to happen
 366       immediately after the SERVICE CALL one.  */
 367    if (env->per_perc_atmid) {
 368        env->int_pgm_code = PGM_PER;
 369        env->int_pgm_ilen = env->int_svc_ilen;
 370        do_program_interrupt(env);
 371    }
 372}
 373
 374#define VIRTIO_SUBCODE_64 0x0D00
 375
 376static void do_ext_interrupt(CPUS390XState *env)
 377{
 378    S390CPU *cpu = s390_env_get_cpu(env);
 379    uint64_t mask, addr;
 380    LowCore *lowcore;
 381    ExtQueue *q;
 382
 383    if (!(env->psw.mask & PSW_MASK_EXT)) {
 384        cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
 385    }
 386
 387    if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
 388        cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
 389    }
 390
 391    q = &env->ext_queue[env->ext_index];
 392    lowcore = cpu_map_lowcore(env);
 393
 394    lowcore->ext_int_code = cpu_to_be16(q->code);
 395    lowcore->ext_params = cpu_to_be32(q->param);
 396    lowcore->ext_params2 = cpu_to_be64(q->param64);
 397    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 398    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
 399    lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
 400    mask = be64_to_cpu(lowcore->external_new_psw.mask);
 401    addr = be64_to_cpu(lowcore->external_new_psw.addr);
 402
 403    cpu_unmap_lowcore(lowcore);
 404
 405    env->ext_index--;
 406    if (env->ext_index == -1) {
 407        env->pending_int &= ~INTERRUPT_EXT;
 408    }
 409
 410    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 411            env->psw.mask, env->psw.addr);
 412
 413    load_psw(env, mask, addr);
 414}
 415
 416static void do_io_interrupt(CPUS390XState *env)
 417{
 418    S390CPU *cpu = s390_env_get_cpu(env);
 419    LowCore *lowcore;
 420    IOIntQueue *q;
 421    uint8_t isc;
 422    int disable = 1;
 423    int found = 0;
 424
 425    if (!(env->psw.mask & PSW_MASK_IO)) {
 426        cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
 427    }
 428
 429    for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
 430        uint64_t isc_bits;
 431
 432        if (env->io_index[isc] < 0) {
 433            continue;
 434        }
 435        if (env->io_index[isc] >= MAX_IO_QUEUE) {
 436            cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
 437                      isc, env->io_index[isc]);
 438        }
 439
 440        q = &env->io_queue[env->io_index[isc]][isc];
 441        isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
 442        if (!(env->cregs[6] & isc_bits)) {
 443            disable = 0;
 444            continue;
 445        }
 446        if (!found) {
 447            uint64_t mask, addr;
 448
 449            found = 1;
 450            lowcore = cpu_map_lowcore(env);
 451
 452            lowcore->subchannel_id = cpu_to_be16(q->id);
 453            lowcore->subchannel_nr = cpu_to_be16(q->nr);
 454            lowcore->io_int_parm = cpu_to_be32(q->parm);
 455            lowcore->io_int_word = cpu_to_be32(q->word);
 456            lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 457            lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
 458            mask = be64_to_cpu(lowcore->io_new_psw.mask);
 459            addr = be64_to_cpu(lowcore->io_new_psw.addr);
 460
 461            cpu_unmap_lowcore(lowcore);
 462
 463            env->io_index[isc]--;
 464
 465            DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 466                    env->psw.mask, env->psw.addr);
 467            load_psw(env, mask, addr);
 468        }
 469        if (env->io_index[isc] >= 0) {
 470            disable = 0;
 471        }
 472        continue;
 473    }
 474
 475    if (disable) {
 476        env->pending_int &= ~INTERRUPT_IO;
 477    }
 478
 479}
 480
 481static void do_mchk_interrupt(CPUS390XState *env)
 482{
 483    S390CPU *cpu = s390_env_get_cpu(env);
 484    uint64_t mask, addr;
 485    LowCore *lowcore;
 486    MchkQueue *q;
 487    int i;
 488
 489    if (!(env->psw.mask & PSW_MASK_MCHECK)) {
 490        cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
 491    }
 492
 493    if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
 494        cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
 495    }
 496
 497    q = &env->mchk_queue[env->mchk_index];
 498
 499    if (q->type != 1) {
 500        /* Don't know how to handle this... */
 501        cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
 502    }
 503    if (!(env->cregs[14] & (1 << 28))) {
 504        /* CRW machine checks disabled */
 505        return;
 506    }
 507
 508    lowcore = cpu_map_lowcore(env);
 509
 510    for (i = 0; i < 16; i++) {
 511        lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
 512        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
 513        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
 514        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
 515    }
 516    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
 517    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
 518    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
 519    lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
 520    lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
 521    lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
 522    lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
 523
 524    lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
 525    lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
 526    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
 527    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
 528    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
 529    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
 530
 531    cpu_unmap_lowcore(lowcore);
 532
 533    env->mchk_index--;
 534    if (env->mchk_index == -1) {
 535        env->pending_int &= ~INTERRUPT_MCHK;
 536    }
 537
 538    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
 539            env->psw.mask, env->psw.addr);
 540
 541    load_psw(env, mask, addr);
 542}
 543
 544void s390_cpu_do_interrupt(CPUState *cs)
 545{
 546    S390CPU *cpu = S390_CPU(cs);
 547    CPUS390XState *env = &cpu->env;
 548
 549    qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
 550                  __func__, cs->exception_index, env->psw.addr);
 551
 552    s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
 553    /* handle machine checks */
 554    if ((env->psw.mask & PSW_MASK_MCHECK) &&
 555        (cs->exception_index == -1)) {
 556        if (env->pending_int & INTERRUPT_MCHK) {
 557            cs->exception_index = EXCP_MCHK;
 558        }
 559    }
 560    /* handle external interrupts */
 561    if ((env->psw.mask & PSW_MASK_EXT) &&
 562        cs->exception_index == -1) {
 563        if (env->pending_int & INTERRUPT_EXT) {
 564            /* code is already in env */
 565            cs->exception_index = EXCP_EXT;
 566        } else if (env->pending_int & INTERRUPT_TOD) {
 567            cpu_inject_ext(cpu, 0x1004, 0, 0);
 568            cs->exception_index = EXCP_EXT;
 569            env->pending_int &= ~INTERRUPT_EXT;
 570            env->pending_int &= ~INTERRUPT_TOD;
 571        } else if (env->pending_int & INTERRUPT_CPUTIMER) {
 572            cpu_inject_ext(cpu, 0x1005, 0, 0);
 573            cs->exception_index = EXCP_EXT;
 574            env->pending_int &= ~INTERRUPT_EXT;
 575            env->pending_int &= ~INTERRUPT_TOD;
 576        }
 577    }
 578    /* handle I/O interrupts */
 579    if ((env->psw.mask & PSW_MASK_IO) &&
 580        (cs->exception_index == -1)) {
 581        if (env->pending_int & INTERRUPT_IO) {
 582            cs->exception_index = EXCP_IO;
 583        }
 584    }
 585
 586    switch (cs->exception_index) {
 587    case EXCP_PGM:
 588        do_program_interrupt(env);
 589        break;
 590    case EXCP_SVC:
 591        do_svc_interrupt(env);
 592        break;
 593    case EXCP_EXT:
 594        do_ext_interrupt(env);
 595        break;
 596    case EXCP_IO:
 597        do_io_interrupt(env);
 598        break;
 599    case EXCP_MCHK:
 600        do_mchk_interrupt(env);
 601        break;
 602    }
 603    cs->exception_index = -1;
 604
 605    if (!env->pending_int) {
 606        cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
 607    }
 608}
 609
 610bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 611{
 612    if (interrupt_request & CPU_INTERRUPT_HARD) {
 613        S390CPU *cpu = S390_CPU(cs);
 614        CPUS390XState *env = &cpu->env;
 615
 616        if (env->psw.mask & PSW_MASK_EXT) {
 617            s390_cpu_do_interrupt(cs);
 618            return true;
 619        }
 620    }
 621    return false;
 622}
 623
 624void s390_cpu_recompute_watchpoints(CPUState *cs)
 625{
 626    const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
 627    S390CPU *cpu = S390_CPU(cs);
 628    CPUS390XState *env = &cpu->env;
 629
 630    /* We are called when the watchpoints have changed. First
 631       remove them all.  */
 632    cpu_watchpoint_remove_all(cs, BP_CPU);
 633
 634    /* Return if PER is not enabled */
 635    if (!(env->psw.mask & PSW_MASK_PER)) {
 636        return;
 637    }
 638
 639    /* Return if storage-alteration event is not enabled.  */
 640    if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
 641        return;
 642    }
 643
 644    if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
 645        /* We can't create a watchoint spanning the whole memory range, so
 646           split it in two parts.   */
 647        cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
 648        cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
 649    } else if (env->cregs[10] > env->cregs[11]) {
 650        /* The address range loops, create two watchpoints.  */
 651        cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
 652                              wp_flags, NULL);
 653        cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
 654
 655    } else {
 656        /* Default case, create a single watchpoint.  */
 657        cpu_watchpoint_insert(cs, env->cregs[10],
 658                              env->cregs[11] - env->cregs[10] + 1,
 659                              wp_flags, NULL);
 660    }
 661}
 662
 663void s390x_cpu_debug_excp_handler(CPUState *cs)
 664{
 665    S390CPU *cpu = S390_CPU(cs);
 666    CPUS390XState *env = &cpu->env;
 667    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
 668
 669    if (wp_hit && wp_hit->flags & BP_CPU) {
 670        /* FIXME: When the storage-alteration-space control bit is set,
 671           the exception should only be triggered if the memory access
 672           is done using an address space with the storage-alteration-event
 673           bit set.  We have no way to detect that with the current
 674           watchpoint code.  */
 675        cs->watchpoint_hit = NULL;
 676
 677        env->per_address = env->psw.addr;
 678        env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
 679        /* FIXME: We currently no way to detect the address space used
 680           to trigger the watchpoint.  For now just consider it is the
 681           current default ASC. This turn to be true except when MVCP
 682           and MVCS instrutions are not used.  */
 683        env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
 684
 685        /* Remove all watchpoints to re-execute the code.  A PER exception
 686           will be triggered, it will call load_psw which will recompute
 687           the watchpoints.  */
 688        cpu_watchpoint_remove_all(cs, BP_CPU);
 689        cpu_resume_from_signal(cs, NULL);
 690    }
 691}
 692#endif /* CONFIG_USER_ONLY */
 693