linux/arch/s390/kvm/kvm-s390.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * definition for kvm on s390
   4 *
   5 * Copyright IBM Corp. 2008, 2020
   6 *
   7 *    Author(s): Carsten Otte <cotte@de.ibm.com>
   8 *               Christian Borntraeger <borntraeger@de.ibm.com>
   9 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
  10 */
  11
  12#ifndef ARCH_S390_KVM_S390_H
  13#define ARCH_S390_KVM_S390_H
  14
  15#include <linux/hrtimer.h>
  16#include <linux/kvm.h>
  17#include <linux/kvm_host.h>
  18#include <linux/lockdep.h>
  19#include <asm/facility.h>
  20#include <asm/processor.h>
  21#include <asm/sclp.h>
  22
  23/* Transactional Memory Execution related macros */
  24#define IS_TE_ENABLED(vcpu)     ((vcpu->arch.sie_block->ecb & ECB_TE))
  25#define TDB_FORMAT1             1
  26#define IS_ITDB_VALID(vcpu)     ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
  27
  28extern debug_info_t *kvm_s390_dbf;
  29extern debug_info_t *kvm_s390_dbf_uv;
  30
  31#define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
  32do { \
  33        debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
  34          d_args); \
  35        debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
  36                            "%d: " d_string "\n", (d_kvm)->userspace_pid, \
  37                            d_args); \
  38} while (0)
  39
  40#define KVM_EVENT(d_loglevel, d_string, d_args...)\
  41do { \
  42        debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
  43          d_args); \
  44} while (0)
  45
  46#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
  47do { \
  48        debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
  49          d_args); \
  50} while (0)
  51
  52#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
  53do { \
  54        debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
  55          "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
  56          d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
  57          d_args); \
  58} while (0)
  59
  60static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
  61{
  62        atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
  63}
  64
  65static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
  66{
  67        atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
  68}
  69
  70static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
  71{
  72        return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
  73}
  74
  75static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
  76{
  77        return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
  78}
  79
  80static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
  81{
  82        return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
  83}
  84
  85static inline int kvm_is_ucontrol(struct kvm *kvm)
  86{
  87#ifdef CONFIG_KVM_S390_UCONTROL
  88        if (kvm->arch.gmap)
  89                return 0;
  90        return 1;
  91#else
  92        return 0;
  93#endif
  94}
  95
  96#define GUEST_PREFIX_SHIFT 13
  97static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
  98{
  99        return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
 100}
 101
 102static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
 103{
 104        VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
 105                   prefix);
 106        vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
 107        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 108        kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
 109}
 110
 111static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
 112{
 113        u32 base2 = vcpu->arch.sie_block->ipb >> 28;
 114        u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
 115
 116        if (ar)
 117                *ar = base2;
 118
 119        return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
 120}
 121
 122static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
 123                                              u64 *address1, u64 *address2,
 124                                              u8 *ar_b1, u8 *ar_b2)
 125{
 126        u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
 127        u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
 128        u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
 129        u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
 130
 131        *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
 132        *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
 133
 134        if (ar_b1)
 135                *ar_b1 = base1;
 136        if (ar_b2)
 137                *ar_b2 = base2;
 138}
 139
 140static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
 141{
 142        if (r1)
 143                *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
 144        if (r2)
 145                *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
 146}
 147
 148static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
 149{
 150        u32 base2 = vcpu->arch.sie_block->ipb >> 28;
 151        u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
 152                        ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
 153        /* The displacement is a 20bit _SIGNED_ value */
 154        if (disp2 & 0x80000)
 155                disp2+=0xfff00000;
 156
 157        if (ar)
 158                *ar = base2;
 159
 160        return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
 161}
 162
 163static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
 164{
 165        u32 base2 = vcpu->arch.sie_block->ipb >> 28;
 166        u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
 167
 168        if (ar)
 169                *ar = base2;
 170
 171        return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
 172}
 173
 174/* Set the condition code in the guest program status word */
 175static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
 176{
 177        vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
 178        vcpu->arch.sie_block->gpsw.mask |= cc << 44;
 179}
 180
 181/* test availability of facility in a kvm instance */
 182static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
 183{
 184        return __test_facility(nr, kvm->arch.model.fac_mask) &&
 185                __test_facility(nr, kvm->arch.model.fac_list);
 186}
 187
 188static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
 189{
 190        unsigned char *ptr;
 191
 192        if (nr >= MAX_FACILITY_BIT)
 193                return -EINVAL;
 194        ptr = (unsigned char *) fac_list + (nr >> 3);
 195        *ptr |= (0x80UL >> (nr & 7));
 196        return 0;
 197}
 198
 199static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
 200{
 201        WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
 202        return test_bit_inv(nr, kvm->arch.cpu_feat);
 203}
 204
 205/* are cpu states controlled by user space */
 206static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
 207{
 208        return kvm->arch.user_cpu_state_ctrl != 0;
 209}
 210
 211/* implemented in pv.c */
 212int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
 213int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
 214int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
 215int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
 216int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
 217                              u16 *rrc);
 218int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
 219                       unsigned long tweak, u16 *rc, u16 *rrc);
 220int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
 221
 222static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
 223{
 224        return kvm->arch.pv.handle;
 225}
 226
 227static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
 228{
 229        return vcpu->arch.pv.handle;
 230}
 231
 232static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
 233{
 234        lockdep_assert_held(&kvm->lock);
 235        return !!kvm_s390_pv_get_handle(kvm);
 236}
 237
 238static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
 239{
 240        lockdep_assert_held(&vcpu->mutex);
 241        return !!kvm_s390_pv_cpu_get_handle(vcpu);
 242}
 243
 244/* implemented in interrupt.c */
 245int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
 246void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
 247enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
 248int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
 249void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
 250void kvm_s390_clear_float_irqs(struct kvm *kvm);
 251int __must_check kvm_s390_inject_vm(struct kvm *kvm,
 252                                    struct kvm_s390_interrupt *s390int);
 253int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
 254                                      struct kvm_s390_irq *irq);
 255static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
 256                                           struct kvm_s390_pgm_info *pgm_info)
 257{
 258        struct kvm_s390_irq irq = {
 259                .type = KVM_S390_PROGRAM_INT,
 260                .u.pgm = *pgm_info,
 261        };
 262
 263        return kvm_s390_inject_vcpu(vcpu, &irq);
 264}
 265static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
 266{
 267        struct kvm_s390_irq irq = {
 268                .type = KVM_S390_PROGRAM_INT,
 269                .u.pgm.code = code,
 270        };
 271
 272        return kvm_s390_inject_vcpu(vcpu, &irq);
 273}
 274struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
 275                                                    u64 isc_mask, u32 schid);
 276int kvm_s390_reinject_io_int(struct kvm *kvm,
 277                             struct kvm_s390_interrupt_info *inti);
 278int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
 279
 280/* implemented in intercept.c */
 281u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
 282int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
 283static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
 284{
 285        struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
 286
 287        sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
 288}
 289static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
 290{
 291        kvm_s390_rewind_psw(vcpu, -ilen);
 292}
 293static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
 294{
 295        /* don't inject PER events if we re-execute the instruction */
 296        vcpu->arch.sie_block->icptstatus &= ~0x02;
 297        kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
 298}
 299
 300int handle_sthyi(struct kvm_vcpu *vcpu);
 301
 302/* implemented in priv.c */
 303int is_valid_psw(psw_t *psw);
 304int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
 305int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
 306int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
 307int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
 308int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
 309int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
 310int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
 311int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
 312int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
 313int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
 314int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
 315
 316/* implemented in vsie.c */
 317int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
 318void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
 319void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
 320                                 unsigned long end);
 321void kvm_s390_vsie_init(struct kvm *kvm);
 322void kvm_s390_vsie_destroy(struct kvm *kvm);
 323
 324/* implemented in sigp.c */
 325int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
 326int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
 327
 328/* implemented in kvm-s390.c */
 329void kvm_s390_set_tod_clock(struct kvm *kvm,
 330                            const struct kvm_s390_vm_tod_clock *gtod);
 331long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
 332int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
 333int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
 334int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
 335int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
 336void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
 337void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
 338bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
 339void exit_sie(struct kvm_vcpu *vcpu);
 340void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
 341int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
 342void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
 343void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
 344__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
 345
 346/* implemented in diag.c */
 347int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
 348
 349static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
 350{
 351        int i;
 352        struct kvm_vcpu *vcpu;
 353
 354        WARN_ON(!mutex_is_locked(&kvm->lock));
 355        kvm_for_each_vcpu(i, vcpu, kvm)
 356                kvm_s390_vcpu_block(vcpu);
 357}
 358
 359static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
 360{
 361        int i;
 362        struct kvm_vcpu *vcpu;
 363
 364        kvm_for_each_vcpu(i, vcpu, kvm)
 365                kvm_s390_vcpu_unblock(vcpu);
 366}
 367
 368static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
 369{
 370        u64 rc;
 371
 372        preempt_disable();
 373        rc = get_tod_clock_fast() + kvm->arch.epoch;
 374        preempt_enable();
 375        return rc;
 376}
 377
 378/**
 379 * kvm_s390_inject_prog_cond - conditionally inject a program check
 380 * @vcpu: virtual cpu
 381 * @rc: original return/error code
 382 *
 383 * This function is supposed to be used after regular guest access functions
 384 * failed, to conditionally inject a program check to a vcpu. The typical
 385 * pattern would look like
 386 *
 387 * rc = write_guest(vcpu, addr, data, len);
 388 * if (rc)
 389 *      return kvm_s390_inject_prog_cond(vcpu, rc);
 390 *
 391 * A negative return code from guest access functions implies an internal error
 392 * like e.g. out of memory. In these cases no program check should be injected
 393 * to the guest.
 394 * A positive value implies that an exception happened while accessing a guest's
 395 * memory. In this case all data belonging to the corresponding program check
 396 * has been stored in vcpu->arch.pgm and can be injected with
 397 * kvm_s390_inject_prog_irq().
 398 *
 399 * Returns: - the original @rc value if @rc was negative (internal error)
 400 *          - zero if @rc was already zero
 401 *          - zero or error code from injecting if @rc was positive
 402 *            (program check injected to @vcpu)
 403 */
 404static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
 405{
 406        if (rc <= 0)
 407                return rc;
 408        return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
 409}
 410
 411int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
 412                        struct kvm_s390_irq *s390irq);
 413
 414/* implemented in interrupt.c */
 415int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
 416int psw_extint_disabled(struct kvm_vcpu *vcpu);
 417void kvm_s390_destroy_adapters(struct kvm *kvm);
 418int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
 419extern struct kvm_device_ops kvm_flic_ops;
 420int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
 421void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
 422int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
 423                           void __user *buf, int len);
 424int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
 425                           __u8 __user *buf, int len);
 426void kvm_s390_gisa_init(struct kvm *kvm);
 427void kvm_s390_gisa_clear(struct kvm *kvm);
 428void kvm_s390_gisa_destroy(struct kvm *kvm);
 429int kvm_s390_gib_init(u8 nisc);
 430void kvm_s390_gib_destroy(void);
 431
 432/* implemented in guestdbg.c */
 433void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
 434void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
 435void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
 436int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
 437                            struct kvm_guest_debug *dbg);
 438void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
 439void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
 440int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
 441int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
 442
 443/* support for Basic/Extended SCA handling */
 444static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
 445{
 446        struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
 447
 448        return &sca->ipte_control;
 449}
 450static inline int kvm_s390_use_sca_entries(void)
 451{
 452        /*
 453         * Without SIGP interpretation, only SRS interpretation (if available)
 454         * might use the entries. By not setting the entries and keeping them
 455         * invalid, hardware will not access them but intercept.
 456         */
 457        return sclp.has_sigpif;
 458}
 459void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
 460                                     struct mcck_volatile_info *mcck_info);
 461
 462/**
 463 * kvm_s390_vcpu_crypto_reset_all
 464 *
 465 * Reset the crypto attributes for each vcpu. This can be done while the vcpus
 466 * are running as each vcpu will be removed from SIE before resetting the crypt
 467 * attributes and restored to SIE afterward.
 468 *
 469 * Note: The kvm->lock must be held while calling this function
 470 *
 471 * @kvm: the KVM guest
 472 */
 473void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
 474
 475/**
 476 * diag9c_forwarding_hz
 477 *
 478 * Set the maximum number of diag9c forwarding per second
 479 */
 480extern unsigned int diag9c_forwarding_hz;
 481
 482#endif
 483