qemu/target/s390x/helper.c
<<
>>
Prefs
   1/*
   2 *  S/390 helpers - sysemu only
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2011 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "s390x-internal.h"
  24#include "exec/gdbstub.h"
  25#include "qemu/timer.h"
  26#include "hw/s390x/ioinst.h"
  27#include "hw/s390x/pv.h"
  28#include "sysemu/hw_accel.h"
  29#include "sysemu/runstate.h"
  30#include "sysemu/tcg.h"
  31
  32void s390x_tod_timer(void *opaque)
  33{
  34    cpu_inject_clock_comparator((S390CPU *) opaque);
  35}
  36
  37void s390x_cpu_timer(void *opaque)
  38{
  39    cpu_inject_cpu_timer((S390CPU *) opaque);
  40}
  41
  42hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
  43{
  44    S390CPU *cpu = S390_CPU(cs);
  45    CPUS390XState *env = &cpu->env;
  46    target_ulong raddr;
  47    int prot;
  48    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
  49    uint64_t tec;
  50
  51    /* 31-Bit mode */
  52    if (!(env->psw.mask & PSW_MASK_64)) {
  53        vaddr &= 0x7fffffff;
  54    }
  55
  56    /* We want to read the code (e.g., see what we are single-stepping).*/
  57    if (asc != PSW_ASC_HOME) {
  58        asc = PSW_ASC_PRIMARY;
  59    }
  60
  61    /*
  62     * We want to read code even if IEP is active. Use MMU_DATA_LOAD instead
  63     * of MMU_INST_FETCH.
  64     */
  65    if (mmu_translate(env, vaddr, MMU_DATA_LOAD, asc, &raddr, &prot, &tec)) {
  66        return -1;
  67    }
  68    return raddr;
  69}
  70
  71hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
  72{
  73    hwaddr phys_addr;
  74    target_ulong page;
  75
  76    page = vaddr & TARGET_PAGE_MASK;
  77    phys_addr = cpu_get_phys_page_debug(cs, page);
  78    phys_addr += (vaddr & ~TARGET_PAGE_MASK);
  79
  80    return phys_addr;
  81}
  82
  83static inline bool is_special_wait_psw(uint64_t psw_addr)
  84{
  85    /* signal quiesce */
  86    return (psw_addr & 0xfffUL) == 0xfffUL;
  87}
  88
  89void s390_handle_wait(S390CPU *cpu)
  90{
  91    CPUState *cs = CPU(cpu);
  92
  93    if (s390_cpu_halt(cpu) == 0) {
  94        if (is_special_wait_psw(cpu->env.psw.addr)) {
  95            qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
  96        } else {
  97            cpu->env.crash_reason = S390_CRASH_REASON_DISABLED_WAIT;
  98            qemu_system_guest_panicked(cpu_get_crash_info(cs));
  99        }
 100    }
 101}
 102
 103LowCore *cpu_map_lowcore(CPUS390XState *env)
 104{
 105    LowCore *lowcore;
 106    hwaddr len = sizeof(LowCore);
 107
 108    lowcore = cpu_physical_memory_map(env->psa, &len, true);
 109
 110    if (len < sizeof(LowCore)) {
 111        cpu_abort(env_cpu(env), "Could not map lowcore\n");
 112    }
 113
 114    return lowcore;
 115}
 116
 117void cpu_unmap_lowcore(LowCore *lowcore)
 118{
 119    cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
 120}
 121
 122void do_restart_interrupt(CPUS390XState *env)
 123{
 124    uint64_t mask, addr;
 125    LowCore *lowcore;
 126
 127    lowcore = cpu_map_lowcore(env);
 128
 129    lowcore->restart_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
 130    lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
 131    mask = be64_to_cpu(lowcore->restart_new_psw.mask);
 132    addr = be64_to_cpu(lowcore->restart_new_psw.addr);
 133
 134    cpu_unmap_lowcore(lowcore);
 135    env->pending_int &= ~INTERRUPT_RESTART;
 136
 137    s390_cpu_set_psw(env, mask, addr);
 138}
 139
 140void s390_cpu_recompute_watchpoints(CPUState *cs)
 141{
 142    const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
 143    S390CPU *cpu = S390_CPU(cs);
 144    CPUS390XState *env = &cpu->env;
 145
 146    /* We are called when the watchpoints have changed. First
 147       remove them all.  */
 148    cpu_watchpoint_remove_all(cs, BP_CPU);
 149
 150    /* Return if PER is not enabled */
 151    if (!(env->psw.mask & PSW_MASK_PER)) {
 152        return;
 153    }
 154
 155    /* Return if storage-alteration event is not enabled.  */
 156    if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
 157        return;
 158    }
 159
 160    if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
 161        /* We can't create a watchoint spanning the whole memory range, so
 162           split it in two parts.   */
 163        cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
 164        cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
 165    } else if (env->cregs[10] > env->cregs[11]) {
 166        /* The address range loops, create two watchpoints.  */
 167        cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
 168                              wp_flags, NULL);
 169        cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
 170
 171    } else {
 172        /* Default case, create a single watchpoint.  */
 173        cpu_watchpoint_insert(cs, env->cregs[10],
 174                              env->cregs[11] - env->cregs[10] + 1,
 175                              wp_flags, NULL);
 176    }
 177}
 178
 179typedef struct SigpSaveArea {
 180    uint64_t    fprs[16];                       /* 0x0000 */
 181    uint64_t    grs[16];                        /* 0x0080 */
 182    PSW         psw;                            /* 0x0100 */
 183    uint8_t     pad_0x0110[0x0118 - 0x0110];    /* 0x0110 */
 184    uint32_t    prefix;                         /* 0x0118 */
 185    uint32_t    fpc;                            /* 0x011c */
 186    uint8_t     pad_0x0120[0x0124 - 0x0120];    /* 0x0120 */
 187    uint32_t    todpr;                          /* 0x0124 */
 188    uint64_t    cputm;                          /* 0x0128 */
 189    uint64_t    ckc;                            /* 0x0130 */
 190    uint8_t     pad_0x0138[0x0140 - 0x0138];    /* 0x0138 */
 191    uint32_t    ars[16];                        /* 0x0140 */
 192    uint64_t    crs[16];                        /* 0x0384 */
 193} SigpSaveArea;
 194QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512);
 195
 196int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
 197{
 198    static const uint8_t ar_id = 1;
 199    SigpSaveArea *sa;
 200    hwaddr len = sizeof(*sa);
 201    int i;
 202
 203    /* For PVMs storing will occur when this cpu enters SIE again */
 204    if (s390_is_pv()) {
 205        return 0;
 206    }
 207
 208    sa = cpu_physical_memory_map(addr, &len, true);
 209    if (!sa) {
 210        return -EFAULT;
 211    }
 212    if (len != sizeof(*sa)) {
 213        cpu_physical_memory_unmap(sa, len, 1, 0);
 214        return -EFAULT;
 215    }
 216
 217    if (store_arch) {
 218        cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
 219    }
 220    for (i = 0; i < 16; ++i) {
 221        sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i));
 222    }
 223    for (i = 0; i < 16; ++i) {
 224        sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
 225    }
 226    sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
 227    sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env));
 228    sa->prefix = cpu_to_be32(cpu->env.psa);
 229    sa->fpc = cpu_to_be32(cpu->env.fpc);
 230    sa->todpr = cpu_to_be32(cpu->env.todpr);
 231    sa->cputm = cpu_to_be64(cpu->env.cputm);
 232    sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
 233    for (i = 0; i < 16; ++i) {
 234        sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
 235    }
 236    for (i = 0; i < 16; ++i) {
 237        sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
 238    }
 239
 240    cpu_physical_memory_unmap(sa, len, 1, len);
 241
 242    return 0;
 243}
 244
 245typedef struct SigpAdtlSaveArea {
 246    uint64_t    vregs[32][2];                     /* 0x0000 */
 247    uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
 248    uint64_t    gscb[4];                          /* 0x0400 */
 249    uint8_t     pad_0x0420[0x1000 - 0x0420];      /* 0x0420 */
 250} SigpAdtlSaveArea;
 251QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096);
 252
 253#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
 254int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
 255{
 256    SigpAdtlSaveArea *sa;
 257    hwaddr save = len;
 258    int i;
 259
 260    sa = cpu_physical_memory_map(addr, &save, true);
 261    if (!sa) {
 262        return -EFAULT;
 263    }
 264    if (save != len) {
 265        cpu_physical_memory_unmap(sa, len, 1, 0);
 266        return -EFAULT;
 267    }
 268
 269    if (s390_has_feat(S390_FEAT_VECTOR)) {
 270        for (i = 0; i < 32; i++) {
 271            sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]);
 272            sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]);
 273        }
 274    }
 275    if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
 276        for (i = 0; i < 4; i++) {
 277            sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]);
 278        }
 279    }
 280
 281    cpu_physical_memory_unmap(sa, len, 1, len);
 282    return 0;
 283}
 284