linux/arch/s390/kvm/gaccess.h
<<
>>
Prefs
   1/*
   2 * gaccess.h -  access guest memory
   3 *
   4 * Copyright IBM Corp. 2008,2009
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  11 */
  12
  13#ifndef __KVM_S390_GACCESS_H
  14#define __KVM_S390_GACCESS_H
  15
  16#include <linux/compiler.h>
  17#include <linux/kvm_host.h>
  18#include <asm/uaccess.h>
  19#include "kvm-s390.h"
  20
  21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
  22                                               unsigned long guestaddr)
  23{
  24        unsigned long prefix  = vcpu->arch.sie_block->prefix;
  25        unsigned long origin  = vcpu->arch.sie_block->gmsor;
  26        unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
  27
  28        if (guestaddr < 2 * PAGE_SIZE)
  29                guestaddr += prefix;
  30        else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
  31                guestaddr -= prefix;
  32
  33        if (guestaddr > memsize)
  34                return (void __user __force *) ERR_PTR(-EFAULT);
  35
  36        guestaddr += origin;
  37
  38        return (void __user *) guestaddr;
  39}
  40
  41static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  42                                u64 *result)
  43{
  44        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  45
  46        BUG_ON(guestaddr & 7);
  47
  48        if (IS_ERR((void __force *) uptr))
  49                return PTR_ERR((void __force *) uptr);
  50
  51        return get_user(*result, (unsigned long __user *) uptr);
  52}
  53
  54static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  55                                u32 *result)
  56{
  57        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  58
  59        BUG_ON(guestaddr & 3);
  60
  61        if (IS_ERR((void __force *) uptr))
  62                return PTR_ERR((void __force *) uptr);
  63
  64        return get_user(*result, (u32 __user *) uptr);
  65}
  66
  67static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  68                                u16 *result)
  69{
  70        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  71
  72        BUG_ON(guestaddr & 1);
  73
  74        if (IS_ERR(uptr))
  75                return PTR_ERR(uptr);
  76
  77        return get_user(*result, (u16 __user *) uptr);
  78}
  79
  80static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  81                               u8 *result)
  82{
  83        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  84
  85        if (IS_ERR((void __force *) uptr))
  86                return PTR_ERR((void __force *) uptr);
  87
  88        return get_user(*result, (u8 __user *) uptr);
  89}
  90
  91static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  92                                u64 value)
  93{
  94        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  95
  96        BUG_ON(guestaddr & 7);
  97
  98        if (IS_ERR((void __force *) uptr))
  99                return PTR_ERR((void __force *) uptr);
 100
 101        return put_user(value, (u64 __user *) uptr);
 102}
 103
 104static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 105                                u32 value)
 106{
 107        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 108
 109        BUG_ON(guestaddr & 3);
 110
 111        if (IS_ERR((void __force *) uptr))
 112                return PTR_ERR((void __force *) uptr);
 113
 114        return put_user(value, (u32 __user *) uptr);
 115}
 116
 117static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 118                                u16 value)
 119{
 120        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 121
 122        BUG_ON(guestaddr & 1);
 123
 124        if (IS_ERR((void __force *) uptr))
 125                return PTR_ERR((void __force *) uptr);
 126
 127        return put_user(value, (u16 __user *) uptr);
 128}
 129
 130static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 131                               u8 value)
 132{
 133        void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 134
 135        if (IS_ERR((void __force *) uptr))
 136                return PTR_ERR((void __force *) uptr);
 137
 138        return put_user(value, (u8 __user *) uptr);
 139}
 140
 141
 142static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
 143                                       unsigned long guestdest,
 144                                       const void *from, unsigned long n)
 145{
 146        int rc;
 147        unsigned long i;
 148        const u8 *data = from;
 149
 150        for (i = 0; i < n; i++) {
 151                rc = put_guest_u8(vcpu, guestdest++, *(data++));
 152                if (rc < 0)
 153                        return rc;
 154        }
 155        return 0;
 156}
 157
 158static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
 159                                const void *from, unsigned long n)
 160{
 161        unsigned long prefix  = vcpu->arch.sie_block->prefix;
 162        unsigned long origin  = vcpu->arch.sie_block->gmsor;
 163        unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
 164
 165        if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
 166                goto slowpath;
 167
 168        if ((guestdest < prefix) && (guestdest + n > prefix))
 169                goto slowpath;
 170
 171        if ((guestdest < prefix + 2 * PAGE_SIZE)
 172            && (guestdest + n > prefix + 2 * PAGE_SIZE))
 173                goto slowpath;
 174
 175        if (guestdest < 2 * PAGE_SIZE)
 176                guestdest += prefix;
 177        else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
 178                guestdest -= prefix;
 179
 180        if (guestdest + n > memsize)
 181                return -EFAULT;
 182
 183        if (guestdest + n < guestdest)
 184                return -EFAULT;
 185
 186        guestdest += origin;
 187
 188        return copy_to_user((void __user *) guestdest, from, n);
 189slowpath:
 190        return __copy_to_guest_slow(vcpu, guestdest, from, n);
 191}
 192
 193static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
 194                                         unsigned long guestsrc,
 195                                         unsigned long n)
 196{
 197        int rc;
 198        unsigned long i;
 199        u8 *data = to;
 200
 201        for (i = 0; i < n; i++) {
 202                rc = get_guest_u8(vcpu, guestsrc++, data++);
 203                if (rc < 0)
 204                        return rc;
 205        }
 206        return 0;
 207}
 208
 209static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
 210                                  unsigned long guestsrc, unsigned long n)
 211{
 212        unsigned long prefix  = vcpu->arch.sie_block->prefix;
 213        unsigned long origin  = vcpu->arch.sie_block->gmsor;
 214        unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
 215
 216        if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
 217                goto slowpath;
 218
 219        if ((guestsrc < prefix) && (guestsrc + n > prefix))
 220                goto slowpath;
 221
 222        if ((guestsrc < prefix + 2 * PAGE_SIZE)
 223            && (guestsrc + n > prefix + 2 * PAGE_SIZE))
 224                goto slowpath;
 225
 226        if (guestsrc < 2 * PAGE_SIZE)
 227                guestsrc += prefix;
 228        else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
 229                guestsrc -= prefix;
 230
 231        if (guestsrc + n > memsize)
 232                return -EFAULT;
 233
 234        if (guestsrc + n < guestsrc)
 235                return -EFAULT;
 236
 237        guestsrc += origin;
 238
 239        return copy_from_user(to, (void __user *) guestsrc, n);
 240slowpath:
 241        return __copy_from_guest_slow(vcpu, to, guestsrc, n);
 242}
 243
 244static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
 245                                         unsigned long guestdest,
 246                                         const void *from, unsigned long n)
 247{
 248        unsigned long origin  = vcpu->arch.sie_block->gmsor;
 249        unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
 250
 251        if (guestdest + n > memsize)
 252                return -EFAULT;
 253
 254        if (guestdest + n < guestdest)
 255                return -EFAULT;
 256
 257        guestdest += origin;
 258
 259        return copy_to_user((void __user *) guestdest, from, n);
 260}
 261
 262static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
 263                                           unsigned long guestsrc,
 264                                           unsigned long n)
 265{
 266        unsigned long origin  = vcpu->arch.sie_block->gmsor;
 267        unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
 268
 269        if (guestsrc + n > memsize)
 270                return -EFAULT;
 271
 272        if (guestsrc + n < guestsrc)
 273                return -EFAULT;
 274
 275        guestsrc += origin;
 276
 277        return copy_from_user(to, (void __user *) guestsrc, n);
 278}
 279#endif
 280