linux/arch/s390/kvm/sigp.c
<<
>>
Prefs
   1/*
   2 * sigp.c - handlinge interprocessor communication
   3 *
   4 * Copyright IBM Corp. 2008,2009
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  11 *               Christian Borntraeger <borntraeger@de.ibm.com>
  12 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
  13 */
  14
  15#include <linux/kvm.h>
  16#include <linux/kvm_host.h>
  17#include <linux/slab.h>
  18#include "gaccess.h"
  19#include "kvm-s390.h"
  20
  21/* sigp order codes */
  22#define SIGP_SENSE             0x01
  23#define SIGP_EXTERNAL_CALL     0x02
  24#define SIGP_EMERGENCY         0x03
  25#define SIGP_START             0x04
  26#define SIGP_STOP              0x05
  27#define SIGP_RESTART           0x06
  28#define SIGP_STOP_STORE_STATUS 0x09
  29#define SIGP_INITIAL_CPU_RESET 0x0b
  30#define SIGP_CPU_RESET         0x0c
  31#define SIGP_SET_PREFIX        0x0d
  32#define SIGP_STORE_STATUS_ADDR 0x0e
  33#define SIGP_SET_ARCH          0x12
  34#define SIGP_SENSE_RUNNING     0x15
  35
  36/* cpu status bits */
  37#define SIGP_STAT_EQUIPMENT_CHECK   0x80000000UL
  38#define SIGP_STAT_NOT_RUNNING       0x00000400UL
  39#define SIGP_STAT_INCORRECT_STATE   0x00000200UL
  40#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
  41#define SIGP_STAT_EXT_CALL_PENDING  0x00000080UL
  42#define SIGP_STAT_STOPPED           0x00000040UL
  43#define SIGP_STAT_OPERATOR_INTERV   0x00000020UL
  44#define SIGP_STAT_CHECK_STOP        0x00000010UL
  45#define SIGP_STAT_INOPERATIVE       0x00000004UL
  46#define SIGP_STAT_INVALID_ORDER     0x00000002UL
  47#define SIGP_STAT_RECEIVER_CHECK    0x00000001UL
  48
  49
  50static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
  51                        unsigned long *reg)
  52{
  53        struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  54        int rc;
  55
  56        if (cpu_addr >= KVM_MAX_VCPUS)
  57                return 3; /* not operational */
  58
  59        spin_lock(&fi->lock);
  60        if (fi->local_int[cpu_addr] == NULL)
  61                rc = 3; /* not operational */
  62        else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
  63                  & CPUSTAT_STOPPED)) {
  64                *reg &= 0xffffffff00000000UL;
  65                rc = 1; /* status stored */
  66        } else {
  67                *reg &= 0xffffffff00000000UL;
  68                *reg |= SIGP_STAT_STOPPED;
  69                rc = 1; /* status stored */
  70        }
  71        spin_unlock(&fi->lock);
  72
  73        VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
  74        return rc;
  75}
  76
  77static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
  78{
  79        struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  80        struct kvm_s390_local_interrupt *li;
  81        struct kvm_s390_interrupt_info *inti;
  82        int rc;
  83
  84        if (cpu_addr >= KVM_MAX_VCPUS)
  85                return 3; /* not operational */
  86
  87        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  88        if (!inti)
  89                return -ENOMEM;
  90
  91        inti->type = KVM_S390_INT_EMERGENCY;
  92        inti->emerg.code = vcpu->vcpu_id;
  93
  94        spin_lock(&fi->lock);
  95        li = fi->local_int[cpu_addr];
  96        if (li == NULL) {
  97                rc = 3; /* not operational */
  98                kfree(inti);
  99                goto unlock;
 100        }
 101        spin_lock_bh(&li->lock);
 102        list_add_tail(&inti->list, &li->list);
 103        atomic_set(&li->active, 1);
 104        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
 105        if (waitqueue_active(&li->wq))
 106                wake_up_interruptible(&li->wq);
 107        spin_unlock_bh(&li->lock);
 108        rc = 0; /* order accepted */
 109        VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
 110unlock:
 111        spin_unlock(&fi->lock);
 112        return rc;
 113}
 114
 115static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
 116{
 117        struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 118        struct kvm_s390_local_interrupt *li;
 119        struct kvm_s390_interrupt_info *inti;
 120        int rc;
 121
 122        if (cpu_addr >= KVM_MAX_VCPUS)
 123                return 3; /* not operational */
 124
 125        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
 126        if (!inti)
 127                return -ENOMEM;
 128
 129        inti->type = KVM_S390_INT_EXTERNAL_CALL;
 130        inti->extcall.code = vcpu->vcpu_id;
 131
 132        spin_lock(&fi->lock);
 133        li = fi->local_int[cpu_addr];
 134        if (li == NULL) {
 135                rc = 3; /* not operational */
 136                kfree(inti);
 137                goto unlock;
 138        }
 139        spin_lock_bh(&li->lock);
 140        list_add_tail(&inti->list, &li->list);
 141        atomic_set(&li->active, 1);
 142        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
 143        if (waitqueue_active(&li->wq))
 144                wake_up_interruptible(&li->wq);
 145        spin_unlock_bh(&li->lock);
 146        rc = 0; /* order accepted */
 147        VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
 148unlock:
 149        spin_unlock(&fi->lock);
 150        return rc;
 151}
 152
 153static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
 154{
 155        struct kvm_s390_interrupt_info *inti;
 156
 157        inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
 158        if (!inti)
 159                return -ENOMEM;
 160        inti->type = KVM_S390_SIGP_STOP;
 161
 162        spin_lock_bh(&li->lock);
 163        list_add_tail(&inti->list, &li->list);
 164        atomic_set(&li->active, 1);
 165        atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
 166        li->action_bits |= action;
 167        if (waitqueue_active(&li->wq))
 168                wake_up_interruptible(&li->wq);
 169        spin_unlock_bh(&li->lock);
 170
 171        return 0; /* order accepted */
 172}
 173
 174static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
 175{
 176        struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 177        struct kvm_s390_local_interrupt *li;
 178        int rc;
 179
 180        if (cpu_addr >= KVM_MAX_VCPUS)
 181                return 3; /* not operational */
 182
 183        spin_lock(&fi->lock);
 184        li = fi->local_int[cpu_addr];
 185        if (li == NULL) {
 186                rc = 3; /* not operational */
 187                goto unlock;
 188        }
 189
 190        rc = __inject_sigp_stop(li, action);
 191
 192unlock:
 193        spin_unlock(&fi->lock);
 194        VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
 195        return rc;
 196}
 197
 198int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
 199{
 200        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 201        return __inject_sigp_stop(li, action);
 202}
 203
 204static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
 205{
 206        int rc;
 207
 208        switch (parameter & 0xff) {
 209        case 0:
 210                rc = 3; /* not operational */
 211                break;
 212        case 1:
 213        case 2:
 214                rc = 0; /* order accepted */
 215                break;
 216        default:
 217                rc = -EOPNOTSUPP;
 218        }
 219        return rc;
 220}
 221
 222static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
 223                             unsigned long *reg)
 224{
 225        struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 226        struct kvm_s390_local_interrupt *li = NULL;
 227        struct kvm_s390_interrupt_info *inti;
 228        int rc;
 229        u8 tmp;
 230
 231        /* make sure that the new value is valid memory */
 232        address = address & 0x7fffe000u;
 233        if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
 234           copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
 235                *reg |= SIGP_STAT_INVALID_PARAMETER;
 236                return 1; /* invalid parameter */
 237        }
 238
 239        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
 240        if (!inti)
 241                return 2; /* busy */
 242
 243        spin_lock(&fi->lock);
 244        if (cpu_addr < KVM_MAX_VCPUS)
 245                li = fi->local_int[cpu_addr];
 246
 247        if (li == NULL) {
 248                rc = 1; /* incorrect state */
 249                *reg &= SIGP_STAT_INCORRECT_STATE;
 250                kfree(inti);
 251                goto out_fi;
 252        }
 253
 254        spin_lock_bh(&li->lock);
 255        /* cpu must be in stopped state */
 256        if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
 257                rc = 1; /* incorrect state */
 258                *reg &= SIGP_STAT_INCORRECT_STATE;
 259                kfree(inti);
 260                goto out_li;
 261        }
 262
 263        inti->type = KVM_S390_SIGP_SET_PREFIX;
 264        inti->prefix.address = address;
 265
 266        list_add_tail(&inti->list, &li->list);
 267        atomic_set(&li->active, 1);
 268        if (waitqueue_active(&li->wq))
 269                wake_up_interruptible(&li->wq);
 270        rc = 0; /* order accepted */
 271
 272        VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
 273out_li:
 274        spin_unlock_bh(&li->lock);
 275out_fi:
 276        spin_unlock(&fi->lock);
 277        return rc;
 278}
 279
 280static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
 281                                unsigned long *reg)
 282{
 283        int rc;
 284        struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 285
 286        if (cpu_addr >= KVM_MAX_VCPUS)
 287                return 3; /* not operational */
 288
 289        spin_lock(&fi->lock);
 290        if (fi->local_int[cpu_addr] == NULL)
 291                rc = 3; /* not operational */
 292        else {
 293                if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
 294                    & CPUSTAT_RUNNING) {
 295                        /* running */
 296                        rc = 1;
 297                } else {
 298                        /* not running */
 299                        *reg &= 0xffffffff00000000UL;
 300                        *reg |= SIGP_STAT_NOT_RUNNING;
 301                        rc = 0;
 302                }
 303        }
 304        spin_unlock(&fi->lock);
 305
 306        VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
 307                   rc);
 308
 309        return rc;
 310}
 311
 312int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
 313{
 314        int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
 315        int r3 = vcpu->arch.sie_block->ipa & 0x000f;
 316        int base2 = vcpu->arch.sie_block->ipb >> 28;
 317        int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
 318        u32 parameter;
 319        u16 cpu_addr = vcpu->arch.guest_gprs[r3];
 320        u8 order_code;
 321        int rc;
 322
 323        /* sigp in userspace can exit */
 324        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 325                return kvm_s390_inject_program_int(vcpu,
 326                                                   PGM_PRIVILEGED_OPERATION);
 327
 328        order_code = disp2;
 329        if (base2)
 330                order_code += vcpu->arch.guest_gprs[base2];
 331
 332        if (r1 % 2)
 333                parameter = vcpu->arch.guest_gprs[r1];
 334        else
 335                parameter = vcpu->arch.guest_gprs[r1 + 1];
 336
 337        switch (order_code) {
 338        case SIGP_SENSE:
 339                vcpu->stat.instruction_sigp_sense++;
 340                rc = __sigp_sense(vcpu, cpu_addr,
 341                                  &vcpu->arch.guest_gprs[r1]);
 342                break;
 343        case SIGP_EXTERNAL_CALL:
 344                vcpu->stat.instruction_sigp_external_call++;
 345                rc = __sigp_external_call(vcpu, cpu_addr);
 346                break;
 347        case SIGP_EMERGENCY:
 348                vcpu->stat.instruction_sigp_emergency++;
 349                rc = __sigp_emergency(vcpu, cpu_addr);
 350                break;
 351        case SIGP_STOP:
 352                vcpu->stat.instruction_sigp_stop++;
 353                rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
 354                break;
 355        case SIGP_STOP_STORE_STATUS:
 356                vcpu->stat.instruction_sigp_stop++;
 357                rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP);
 358                break;
 359        case SIGP_SET_ARCH:
 360                vcpu->stat.instruction_sigp_arch++;
 361                rc = __sigp_set_arch(vcpu, parameter);
 362                break;
 363        case SIGP_SET_PREFIX:
 364                vcpu->stat.instruction_sigp_prefix++;
 365                rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
 366                                       &vcpu->arch.guest_gprs[r1]);
 367                break;
 368        case SIGP_SENSE_RUNNING:
 369                vcpu->stat.instruction_sigp_sense_running++;
 370                rc = __sigp_sense_running(vcpu, cpu_addr,
 371                                          &vcpu->arch.guest_gprs[r1]);
 372                break;
 373        case SIGP_RESTART:
 374                vcpu->stat.instruction_sigp_restart++;
 375                /* user space must know about restart */
 376        default:
 377                return -EOPNOTSUPP;
 378        }
 379
 380        if (rc < 0)
 381                return rc;
 382
 383        vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
 384        vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
 385        return 0;
 386}
 387