linux/arch/s390/kvm/priv.c
<<
>>
Prefs
   1/*
   2 * handling privileged instructions
   3 *
   4 * Copyright IBM Corp. 2008, 2013
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  11 *               Christian Borntraeger <borntraeger@de.ibm.com>
  12 */
  13
  14#include <linux/kvm.h>
  15#include <linux/gfp.h>
  16#include <linux/errno.h>
  17#include <linux/compat.h>
  18#include <asm/asm-offsets.h>
  19#include <asm/facility.h>
  20#include <asm/current.h>
  21#include <asm/debug.h>
  22#include <asm/ebcdic.h>
  23#include <asm/sysinfo.h>
  24#include <asm/pgtable.h>
  25#include <asm/pgalloc.h>
  26#include <asm/io.h>
  27#include <asm/ptrace.h>
  28#include <asm/compat.h>
  29#include "gaccess.h"
  30#include "kvm-s390.h"
  31#include "trace.h"
  32
  33/* Handle SCK (SET CLOCK) interception */
  34static int handle_set_clock(struct kvm_vcpu *vcpu)
  35{
  36        struct kvm_vcpu *cpup;
  37        s64 hostclk, val;
  38        int i, rc;
  39        u64 op2;
  40
  41        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  42                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  43
  44        op2 = kvm_s390_get_base_disp_s(vcpu);
  45        if (op2 & 7)    /* Operand must be on a doubleword boundary */
  46                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  47        rc = read_guest(vcpu, op2, &val, sizeof(val));
  48        if (rc)
  49                return kvm_s390_inject_prog_cond(vcpu, rc);
  50
  51        if (store_tod_clock(&hostclk)) {
  52                kvm_s390_set_psw_cc(vcpu, 3);
  53                return 0;
  54        }
  55        val = (val - hostclk) & ~0x3fUL;
  56
  57        mutex_lock(&vcpu->kvm->lock);
  58        kvm_for_each_vcpu(i, cpup, vcpu->kvm)
  59                cpup->arch.sie_block->epoch = val;
  60        mutex_unlock(&vcpu->kvm->lock);
  61
  62        kvm_s390_set_psw_cc(vcpu, 0);
  63        return 0;
  64}
  65
  66static int handle_set_prefix(struct kvm_vcpu *vcpu)
  67{
  68        u64 operand2;
  69        u32 address;
  70        int rc;
  71
  72        vcpu->stat.instruction_spx++;
  73
  74        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  75                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  76
  77        operand2 = kvm_s390_get_base_disp_s(vcpu);
  78
  79        /* must be word boundary */
  80        if (operand2 & 3)
  81                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  82
  83        /* get the value */
  84        rc = read_guest(vcpu, operand2, &address, sizeof(address));
  85        if (rc)
  86                return kvm_s390_inject_prog_cond(vcpu, rc);
  87
  88        address &= 0x7fffe000u;
  89
  90        /*
  91         * Make sure the new value is valid memory. We only need to check the
  92         * first page, since address is 8k aligned and memory pieces are always
  93         * at least 1MB aligned and have at least a size of 1MB.
  94         */
  95        if (kvm_is_error_gpa(vcpu->kvm, address))
  96                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  97
  98        kvm_s390_set_prefix(vcpu, address);
  99
 100        VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
 101        trace_kvm_s390_handle_prefix(vcpu, 1, address);
 102        return 0;
 103}
 104
 105static int handle_store_prefix(struct kvm_vcpu *vcpu)
 106{
 107        u64 operand2;
 108        u32 address;
 109        int rc;
 110
 111        vcpu->stat.instruction_stpx++;
 112
 113        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 114                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 115
 116        operand2 = kvm_s390_get_base_disp_s(vcpu);
 117
 118        /* must be word boundary */
 119        if (operand2 & 3)
 120                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 121
 122        address = kvm_s390_get_prefix(vcpu);
 123
 124        /* get the value */
 125        rc = write_guest(vcpu, operand2, &address, sizeof(address));
 126        if (rc)
 127                return kvm_s390_inject_prog_cond(vcpu, rc);
 128
 129        VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
 130        trace_kvm_s390_handle_prefix(vcpu, 0, address);
 131        return 0;
 132}
 133
 134static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
 135{
 136        u16 vcpu_id = vcpu->vcpu_id;
 137        u64 ga;
 138        int rc;
 139
 140        vcpu->stat.instruction_stap++;
 141
 142        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 143                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 144
 145        ga = kvm_s390_get_base_disp_s(vcpu);
 146
 147        if (ga & 1)
 148                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 149
 150        rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
 151        if (rc)
 152                return kvm_s390_inject_prog_cond(vcpu, rc);
 153
 154        VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
 155        trace_kvm_s390_handle_stap(vcpu, ga);
 156        return 0;
 157}
 158
 159static void __skey_check_enable(struct kvm_vcpu *vcpu)
 160{
 161        if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
 162                return;
 163
 164        s390_enable_skey();
 165        trace_kvm_s390_skey_related_inst(vcpu);
 166        vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
 167}
 168
 169
 170static int handle_skey(struct kvm_vcpu *vcpu)
 171{
 172        __skey_check_enable(vcpu);
 173
 174        vcpu->stat.instruction_storage_key++;
 175
 176        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 177                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 178
 179        vcpu->arch.sie_block->gpsw.addr =
 180                __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
 181        VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
 182        return 0;
 183}
 184
 185static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
 186{
 187        psw_t *psw = &vcpu->arch.sie_block->gpsw;
 188
 189        vcpu->stat.instruction_ipte_interlock++;
 190        if (psw_bits(*psw).p)
 191                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 192        wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
 193        psw->addr = __rewind_psw(*psw, 4);
 194        VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
 195        return 0;
 196}
 197
 198static int handle_test_block(struct kvm_vcpu *vcpu)
 199{
 200        gpa_t addr;
 201        int reg2;
 202
 203        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 204                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 205
 206        kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
 207        addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
 208        addr = kvm_s390_logical_to_effective(vcpu, addr);
 209        if (kvm_s390_check_low_addr_protection(vcpu, addr))
 210                return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
 211        addr = kvm_s390_real_to_abs(vcpu, addr);
 212
 213        if (kvm_is_error_gpa(vcpu->kvm, addr))
 214                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 215        /*
 216         * We don't expect errors on modern systems, and do not care
 217         * about storage keys (yet), so let's just clear the page.
 218         */
 219        if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
 220                return -EFAULT;
 221        kvm_s390_set_psw_cc(vcpu, 0);
 222        vcpu->run->s.regs.gprs[0] = 0;
 223        return 0;
 224}
 225
 226static int handle_tpi(struct kvm_vcpu *vcpu)
 227{
 228        struct kvm_s390_interrupt_info *inti;
 229        unsigned long len;
 230        u32 tpi_data[3];
 231        int cc, rc;
 232        u64 addr;
 233
 234        rc = 0;
 235        addr = kvm_s390_get_base_disp_s(vcpu);
 236        if (addr & 3)
 237                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 238        cc = 0;
 239        inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
 240        if (!inti)
 241                goto no_interrupt;
 242        cc = 1;
 243        tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
 244        tpi_data[1] = inti->io.io_int_parm;
 245        tpi_data[2] = inti->io.io_int_word;
 246        if (addr) {
 247                /*
 248                 * Store the two-word I/O interruption code into the
 249                 * provided area.
 250                 */
 251                len = sizeof(tpi_data) - 4;
 252                rc = write_guest(vcpu, addr, &tpi_data, len);
 253                if (rc)
 254                        return kvm_s390_inject_prog_cond(vcpu, rc);
 255        } else {
 256                /*
 257                 * Store the three-word I/O interruption code into
 258                 * the appropriate lowcore area.
 259                 */
 260                len = sizeof(tpi_data);
 261                if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
 262                        rc = -EFAULT;
 263        }
 264        /*
 265         * If we encounter a problem storing the interruption code, the
 266         * instruction is suppressed from the guest's view: reinject the
 267         * interrupt.
 268         */
 269        if (!rc)
 270                kfree(inti);
 271        else
 272                kvm_s390_reinject_io_int(vcpu->kvm, inti);
 273no_interrupt:
 274        /* Set condition code and we're done. */
 275        if (!rc)
 276                kvm_s390_set_psw_cc(vcpu, cc);
 277        return rc ? -EFAULT : 0;
 278}
 279
 280static int handle_tsch(struct kvm_vcpu *vcpu)
 281{
 282        struct kvm_s390_interrupt_info *inti;
 283
 284        inti = kvm_s390_get_io_int(vcpu->kvm, 0,
 285                                   vcpu->run->s.regs.gprs[1]);
 286
 287        /*
 288         * Prepare exit to userspace.
 289         * We indicate whether we dequeued a pending I/O interrupt
 290         * so that userspace can re-inject it if the instruction gets
 291         * a program check. While this may re-order the pending I/O
 292         * interrupts, this is no problem since the priority is kept
 293         * intact.
 294         */
 295        vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
 296        vcpu->run->s390_tsch.dequeued = !!inti;
 297        if (inti) {
 298                vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
 299                vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
 300                vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
 301                vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
 302        }
 303        vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
 304        kfree(inti);
 305        return -EREMOTE;
 306}
 307
 308static int handle_io_inst(struct kvm_vcpu *vcpu)
 309{
 310        VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
 311
 312        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 313                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 314
 315        if (vcpu->kvm->arch.css_support) {
 316                /*
 317                 * Most I/O instructions will be handled by userspace.
 318                 * Exceptions are tpi and the interrupt portion of tsch.
 319                 */
 320                if (vcpu->arch.sie_block->ipa == 0xb236)
 321                        return handle_tpi(vcpu);
 322                if (vcpu->arch.sie_block->ipa == 0xb235)
 323                        return handle_tsch(vcpu);
 324                /* Handle in userspace. */
 325                return -EOPNOTSUPP;
 326        } else {
 327                /*
 328                 * Set condition code 3 to stop the guest from issuing channel
 329                 * I/O instructions.
 330                 */
 331                kvm_s390_set_psw_cc(vcpu, 3);
 332                return 0;
 333        }
 334}
 335
 336static int handle_stfl(struct kvm_vcpu *vcpu)
 337{
 338        int rc;
 339
 340        vcpu->stat.instruction_stfl++;
 341
 342        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 343                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 344
 345        rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
 346                            vfacilities, 4);
 347        if (rc)
 348                return rc;
 349        VCPU_EVENT(vcpu, 5, "store facility list value %x",
 350                   *(unsigned int *) vfacilities);
 351        trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
 352        return 0;
 353}
 354
 355static void handle_new_psw(struct kvm_vcpu *vcpu)
 356{
 357        /* Check whether the new psw is enabled for machine checks. */
 358        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
 359                kvm_s390_deliver_pending_machine_checks(vcpu);
 360}
 361
 362#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
 363#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
 364#define PSW_ADDR_24 0x0000000000ffffffUL
 365#define PSW_ADDR_31 0x000000007fffffffUL
 366
 367int is_valid_psw(psw_t *psw)
 368{
 369        if (psw->mask & PSW_MASK_UNASSIGNED)
 370                return 0;
 371        if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
 372                if (psw->addr & ~PSW_ADDR_31)
 373                        return 0;
 374        }
 375        if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
 376                return 0;
 377        if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
 378                return 0;
 379        if (psw->addr & 1)
 380                return 0;
 381        return 1;
 382}
 383
 384int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
 385{
 386        psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
 387        psw_compat_t new_psw;
 388        u64 addr;
 389        int rc;
 390
 391        if (gpsw->mask & PSW_MASK_PSTATE)
 392                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 393
 394        addr = kvm_s390_get_base_disp_s(vcpu);
 395        if (addr & 7)
 396                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 397
 398        rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
 399        if (rc)
 400                return kvm_s390_inject_prog_cond(vcpu, rc);
 401        if (!(new_psw.mask & PSW32_MASK_BASE))
 402                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 403        gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
 404        gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
 405        gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
 406        if (!is_valid_psw(gpsw))
 407                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 408        handle_new_psw(vcpu);
 409        return 0;
 410}
 411
 412static int handle_lpswe(struct kvm_vcpu *vcpu)
 413{
 414        psw_t new_psw;
 415        u64 addr;
 416        int rc;
 417
 418        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 419                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 420
 421        addr = kvm_s390_get_base_disp_s(vcpu);
 422        if (addr & 7)
 423                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 424        rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
 425        if (rc)
 426                return kvm_s390_inject_prog_cond(vcpu, rc);
 427        vcpu->arch.sie_block->gpsw = new_psw;
 428        if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
 429                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 430        handle_new_psw(vcpu);
 431        return 0;
 432}
 433
 434static int handle_stidp(struct kvm_vcpu *vcpu)
 435{
 436        u64 stidp_data = vcpu->arch.stidp_data;
 437        u64 operand2;
 438        int rc;
 439
 440        vcpu->stat.instruction_stidp++;
 441
 442        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 443                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 444
 445        operand2 = kvm_s390_get_base_disp_s(vcpu);
 446
 447        if (operand2 & 7)
 448                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 449
 450        rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
 451        if (rc)
 452                return kvm_s390_inject_prog_cond(vcpu, rc);
 453
 454        VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
 455        return 0;
 456}
 457
 458static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
 459{
 460        int cpus = 0;
 461        int n;
 462
 463        cpus = atomic_read(&vcpu->kvm->online_vcpus);
 464
 465        /* deal with other level 3 hypervisors */
 466        if (stsi(mem, 3, 2, 2))
 467                mem->count = 0;
 468        if (mem->count < 8)
 469                mem->count++;
 470        for (n = mem->count - 1; n > 0 ; n--)
 471                memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
 472
 473        mem->vm[0].cpus_total = cpus;
 474        mem->vm[0].cpus_configured = cpus;
 475        mem->vm[0].cpus_standby = 0;
 476        mem->vm[0].cpus_reserved = 0;
 477        mem->vm[0].caf = 1000;
 478        memcpy(mem->vm[0].name, "KVMguest", 8);
 479        ASCEBC(mem->vm[0].name, 8);
 480        memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
 481        ASCEBC(mem->vm[0].cpi, 16);
 482}
 483
 484static int handle_stsi(struct kvm_vcpu *vcpu)
 485{
 486        int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
 487        int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
 488        int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
 489        unsigned long mem = 0;
 490        u64 operand2;
 491        int rc = 0;
 492
 493        vcpu->stat.instruction_stsi++;
 494        VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
 495
 496        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 497                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 498
 499        if (fc > 3) {
 500                kvm_s390_set_psw_cc(vcpu, 3);
 501                return 0;
 502        }
 503
 504        if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
 505            || vcpu->run->s.regs.gprs[1] & 0xffff0000)
 506                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 507
 508        if (fc == 0) {
 509                vcpu->run->s.regs.gprs[0] = 3 << 28;
 510                kvm_s390_set_psw_cc(vcpu, 0);
 511                return 0;
 512        }
 513
 514        operand2 = kvm_s390_get_base_disp_s(vcpu);
 515
 516        if (operand2 & 0xfff)
 517                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 518
 519        switch (fc) {
 520        case 1: /* same handling for 1 and 2 */
 521        case 2:
 522                mem = get_zeroed_page(GFP_KERNEL);
 523                if (!mem)
 524                        goto out_no_data;
 525                if (stsi((void *) mem, fc, sel1, sel2))
 526                        goto out_no_data;
 527                break;
 528        case 3:
 529                if (sel1 != 2 || sel2 != 2)
 530                        goto out_no_data;
 531                mem = get_zeroed_page(GFP_KERNEL);
 532                if (!mem)
 533                        goto out_no_data;
 534                handle_stsi_3_2_2(vcpu, (void *) mem);
 535                break;
 536        }
 537
 538        rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
 539        if (rc) {
 540                rc = kvm_s390_inject_prog_cond(vcpu, rc);
 541                goto out;
 542        }
 543        trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
 544        free_page(mem);
 545        kvm_s390_set_psw_cc(vcpu, 0);
 546        vcpu->run->s.regs.gprs[0] = 0;
 547        return 0;
 548out_no_data:
 549        kvm_s390_set_psw_cc(vcpu, 3);
 550out:
 551        free_page(mem);
 552        return rc;
 553}
 554
 555static const intercept_handler_t b2_handlers[256] = {
 556        [0x02] = handle_stidp,
 557        [0x04] = handle_set_clock,
 558        [0x10] = handle_set_prefix,
 559        [0x11] = handle_store_prefix,
 560        [0x12] = handle_store_cpu_address,
 561        [0x21] = handle_ipte_interlock,
 562        [0x29] = handle_skey,
 563        [0x2a] = handle_skey,
 564        [0x2b] = handle_skey,
 565        [0x2c] = handle_test_block,
 566        [0x30] = handle_io_inst,
 567        [0x31] = handle_io_inst,
 568        [0x32] = handle_io_inst,
 569        [0x33] = handle_io_inst,
 570        [0x34] = handle_io_inst,
 571        [0x35] = handle_io_inst,
 572        [0x36] = handle_io_inst,
 573        [0x37] = handle_io_inst,
 574        [0x38] = handle_io_inst,
 575        [0x39] = handle_io_inst,
 576        [0x3a] = handle_io_inst,
 577        [0x3b] = handle_io_inst,
 578        [0x3c] = handle_io_inst,
 579        [0x50] = handle_ipte_interlock,
 580        [0x5f] = handle_io_inst,
 581        [0x74] = handle_io_inst,
 582        [0x76] = handle_io_inst,
 583        [0x7d] = handle_stsi,
 584        [0xb1] = handle_stfl,
 585        [0xb2] = handle_lpswe,
 586};
 587
 588int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
 589{
 590        intercept_handler_t handler;
 591
 592        /*
 593         * A lot of B2 instructions are priviledged. Here we check for
 594         * the privileged ones, that we can handle in the kernel.
 595         * Anything else goes to userspace.
 596         */
 597        handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
 598        if (handler)
 599                return handler(vcpu);
 600
 601        return -EOPNOTSUPP;
 602}
 603
 604static int handle_epsw(struct kvm_vcpu *vcpu)
 605{
 606        int reg1, reg2;
 607
 608        kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 609
 610        /* This basically extracts the mask half of the psw. */
 611        vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
 612        vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
 613        if (reg2) {
 614                vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
 615                vcpu->run->s.regs.gprs[reg2] |=
 616                        vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
 617        }
 618        return 0;
 619}
 620
 621#define PFMF_RESERVED   0xfffc0101UL
 622#define PFMF_SK         0x00020000UL
 623#define PFMF_CF         0x00010000UL
 624#define PFMF_UI         0x00008000UL
 625#define PFMF_FSC        0x00007000UL
 626#define PFMF_NQ         0x00000800UL
 627#define PFMF_MR         0x00000400UL
 628#define PFMF_MC         0x00000200UL
 629#define PFMF_KEY        0x000000feUL
 630
 631static int handle_pfmf(struct kvm_vcpu *vcpu)
 632{
 633        int reg1, reg2;
 634        unsigned long start, end;
 635
 636        vcpu->stat.instruction_pfmf++;
 637
 638        kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 639
 640        if (!MACHINE_HAS_PFMF)
 641                return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
 642
 643        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 644                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 645
 646        if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
 647                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 648
 649        /* Only provide non-quiescing support if the host supports it */
 650        if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
 651                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 652
 653        /* No support for conditional-SSKE */
 654        if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
 655                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 656
 657        start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
 658        if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
 659                if (kvm_s390_check_low_addr_protection(vcpu, start))
 660                        return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
 661        }
 662
 663        switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
 664        case 0x00000000:
 665                end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
 666                break;
 667        case 0x00001000:
 668                end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
 669                break;
 670        /* We dont support EDAT2
 671        case 0x00002000:
 672                end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
 673                break;*/
 674        default:
 675                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 676        }
 677        while (start < end) {
 678                unsigned long useraddr, abs_addr;
 679
 680                /* Translate guest address to host address */
 681                if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
 682                        abs_addr = kvm_s390_real_to_abs(vcpu, start);
 683                else
 684                        abs_addr = start;
 685                useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
 686                if (kvm_is_error_hva(useraddr))
 687                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 688
 689                if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
 690                        if (clear_user((void __user *)useraddr, PAGE_SIZE))
 691                                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 692                }
 693
 694                if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
 695                        __skey_check_enable(vcpu);
 696                        if (set_guest_storage_key(current->mm, useraddr,
 697                                        vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
 698                                        vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
 699                                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 700                }
 701
 702                start += PAGE_SIZE;
 703        }
 704        if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
 705                vcpu->run->s.regs.gprs[reg2] = end;
 706        return 0;
 707}
 708
 709static int handle_essa(struct kvm_vcpu *vcpu)
 710{
 711        /* entries expected to be 1FF */
 712        int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
 713        unsigned long *cbrlo, cbrle;
 714        struct gmap *gmap;
 715        int i;
 716
 717        VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
 718        gmap = vcpu->arch.gmap;
 719        vcpu->stat.instruction_essa++;
 720        if (!kvm_s390_cmma_enabled(vcpu->kvm))
 721                return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
 722
 723        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 724                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 725
 726        if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
 727                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 728
 729        /* Rewind PSW to repeat the ESSA instruction */
 730        vcpu->arch.sie_block->gpsw.addr =
 731                __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
 732        vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
 733        cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
 734        down_read(&gmap->mm->mmap_sem);
 735        for (i = 0; i < entries; ++i) {
 736                cbrle = cbrlo[i];
 737                if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
 738                        /* invalid entry */
 739                        break;
 740                /* try to free backing */
 741                __gmap_zap(cbrle, gmap);
 742        }
 743        up_read(&gmap->mm->mmap_sem);
 744        if (i < entries)
 745                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 746        return 0;
 747}
 748
 749static const intercept_handler_t b9_handlers[256] = {
 750        [0x8a] = handle_ipte_interlock,
 751        [0x8d] = handle_epsw,
 752        [0x8e] = handle_ipte_interlock,
 753        [0x8f] = handle_ipte_interlock,
 754        [0xab] = handle_essa,
 755        [0xaf] = handle_pfmf,
 756};
 757
 758int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
 759{
 760        intercept_handler_t handler;
 761
 762        /* This is handled just as for the B2 instructions. */
 763        handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
 764        if (handler)
 765                return handler(vcpu);
 766
 767        return -EOPNOTSUPP;
 768}
 769
 770int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
 771{
 772        int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
 773        int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
 774        u32 val = 0;
 775        int reg, rc;
 776        u64 ga;
 777
 778        vcpu->stat.instruction_lctl++;
 779
 780        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 781                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 782
 783        ga = kvm_s390_get_base_disp_rs(vcpu);
 784
 785        if (ga & 3)
 786                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 787
 788        VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
 789        trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
 790
 791        reg = reg1;
 792        do {
 793                rc = read_guest(vcpu, ga, &val, sizeof(val));
 794                if (rc)
 795                        return kvm_s390_inject_prog_cond(vcpu, rc);
 796                vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
 797                vcpu->arch.sie_block->gcr[reg] |= val;
 798                ga += 4;
 799                if (reg == reg3)
 800                        break;
 801                reg = (reg + 1) % 16;
 802        } while (1);
 803
 804        return 0;
 805}
 806
 807int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
 808{
 809        int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
 810        int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
 811        u64 ga;
 812        u32 val;
 813        int reg, rc;
 814
 815        vcpu->stat.instruction_stctl++;
 816
 817        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 818                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 819
 820        ga = kvm_s390_get_base_disp_rs(vcpu);
 821
 822        if (ga & 3)
 823                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 824
 825        VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
 826        trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
 827
 828        reg = reg1;
 829        do {
 830                val = vcpu->arch.sie_block->gcr[reg] &  0x00000000fffffffful;
 831                rc = write_guest(vcpu, ga, &val, sizeof(val));
 832                if (rc)
 833                        return kvm_s390_inject_prog_cond(vcpu, rc);
 834                ga += 4;
 835                if (reg == reg3)
 836                        break;
 837                reg = (reg + 1) % 16;
 838        } while (1);
 839
 840        return 0;
 841}
 842
 843static int handle_lctlg(struct kvm_vcpu *vcpu)
 844{
 845        int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
 846        int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
 847        u64 ga, val;
 848        int reg, rc;
 849
 850        vcpu->stat.instruction_lctlg++;
 851
 852        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 853                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 854
 855        ga = kvm_s390_get_base_disp_rsy(vcpu);
 856
 857        if (ga & 7)
 858                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 859
 860        reg = reg1;
 861
 862        VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
 863        trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
 864
 865        do {
 866                rc = read_guest(vcpu, ga, &val, sizeof(val));
 867                if (rc)
 868                        return kvm_s390_inject_prog_cond(vcpu, rc);
 869                vcpu->arch.sie_block->gcr[reg] = val;
 870                ga += 8;
 871                if (reg == reg3)
 872                        break;
 873                reg = (reg + 1) % 16;
 874        } while (1);
 875
 876        return 0;
 877}
 878
 879static int handle_stctg(struct kvm_vcpu *vcpu)
 880{
 881        int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
 882        int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
 883        u64 ga, val;
 884        int reg, rc;
 885
 886        vcpu->stat.instruction_stctg++;
 887
 888        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 889                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 890
 891        ga = kvm_s390_get_base_disp_rsy(vcpu);
 892
 893        if (ga & 7)
 894                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 895
 896        reg = reg1;
 897
 898        VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
 899        trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
 900
 901        do {
 902                val = vcpu->arch.sie_block->gcr[reg];
 903                rc = write_guest(vcpu, ga, &val, sizeof(val));
 904                if (rc)
 905                        return kvm_s390_inject_prog_cond(vcpu, rc);
 906                ga += 8;
 907                if (reg == reg3)
 908                        break;
 909                reg = (reg + 1) % 16;
 910        } while (1);
 911
 912        return 0;
 913}
 914
 915static const intercept_handler_t eb_handlers[256] = {
 916        [0x2f] = handle_lctlg,
 917        [0x25] = handle_stctg,
 918};
 919
 920int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
 921{
 922        intercept_handler_t handler;
 923
 924        handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
 925        if (handler)
 926                return handler(vcpu);
 927        return -EOPNOTSUPP;
 928}
 929
 930static int handle_tprot(struct kvm_vcpu *vcpu)
 931{
 932        u64 address1, address2;
 933        unsigned long hva, gpa;
 934        int ret = 0, cc = 0;
 935        bool writable;
 936
 937        vcpu->stat.instruction_tprot++;
 938
 939        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 940                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 941
 942        kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
 943
 944        /* we only handle the Linux memory detection case:
 945         * access key == 0
 946         * everything else goes to userspace. */
 947        if (address2 & 0xf0)
 948                return -EOPNOTSUPP;
 949        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
 950                ipte_lock(vcpu);
 951        ret = guest_translate_address(vcpu, address1, &gpa, 1);
 952        if (ret == PGM_PROTECTION) {
 953                /* Write protected? Try again with read-only... */
 954                cc = 1;
 955                ret = guest_translate_address(vcpu, address1, &gpa, 0);
 956        }
 957        if (ret) {
 958                if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
 959                        ret = kvm_s390_inject_program_int(vcpu, ret);
 960                } else if (ret > 0) {
 961                        /* Translation not available */
 962                        kvm_s390_set_psw_cc(vcpu, 3);
 963                        ret = 0;
 964                }
 965                goto out_unlock;
 966        }
 967
 968        hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
 969        if (kvm_is_error_hva(hva)) {
 970                ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 971        } else {
 972                if (!writable)
 973                        cc = 1;         /* Write not permitted ==> read-only */
 974                kvm_s390_set_psw_cc(vcpu, cc);
 975                /* Note: CC2 only occurs for storage keys (not supported yet) */
 976        }
 977out_unlock:
 978        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
 979                ipte_unlock(vcpu);
 980        return ret;
 981}
 982
 983int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
 984{
 985        /* For e5xx... instructions we only handle TPROT */
 986        if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
 987                return handle_tprot(vcpu);
 988        return -EOPNOTSUPP;
 989}
 990
 991static int handle_sckpf(struct kvm_vcpu *vcpu)
 992{
 993        u32 value;
 994
 995        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 996                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 997
 998        if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
 999                return kvm_s390_inject_program_int(vcpu,
1000                                                   PGM_SPECIFICATION);
1001
1002        value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1003        vcpu->arch.sie_block->todpr = value;
1004
1005        return 0;
1006}
1007
1008static const intercept_handler_t x01_handlers[256] = {
1009        [0x07] = handle_sckpf,
1010};
1011
1012int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1013{
1014        intercept_handler_t handler;
1015
1016        handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1017        if (handler)
1018                return handler(vcpu);
1019        return -EOPNOTSUPP;
1020}
1021