qemu/target/i386/xsave_helper.c
<<
>>
Prefs
   1/*
   2 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   3 * See the COPYING file in the top-level directory.
   4 */
   5#include "qemu/osdep.h"
   6
   7#include "cpu.h"
   8
   9void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
  10{
  11    CPUX86State *env = &cpu->env;
  12    const ExtSaveArea *e, *f;
  13    int i;
  14
  15    X86LegacyXSaveArea *legacy;
  16    X86XSaveHeader *header;
  17    uint16_t cwd, swd, twd;
  18
  19    memset(buf, 0, buflen);
  20
  21    e = &x86_ext_save_areas[XSTATE_FP_BIT];
  22
  23    legacy = buf + e->offset;
  24    header = buf + e->offset + sizeof(*legacy);
  25
  26    twd = 0;
  27    swd = env->fpus & ~(7 << 11);
  28    swd |= (env->fpstt & 7) << 11;
  29    cwd = env->fpuc;
  30    for (i = 0; i < 8; ++i) {
  31        twd |= (!env->fptags[i]) << i;
  32    }
  33    legacy->fcw = cwd;
  34    legacy->fsw = swd;
  35    legacy->ftw = twd;
  36    legacy->fpop = env->fpop;
  37    legacy->fpip = env->fpip;
  38    legacy->fpdp = env->fpdp;
  39    memcpy(&legacy->fpregs, env->fpregs,
  40           sizeof(env->fpregs));
  41    legacy->mxcsr = env->mxcsr;
  42
  43    for (i = 0; i < CPU_NB_REGS; i++) {
  44        uint8_t *xmm = legacy->xmm_regs[i];
  45
  46        stq_p(xmm,     env->xmm_regs[i].ZMM_Q(0));
  47        stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
  48    }
  49
  50    header->xstate_bv = env->xstate_bv;
  51
  52    e = &x86_ext_save_areas[XSTATE_YMM_BIT];
  53    if (e->size && e->offset) {
  54        XSaveAVX *avx;
  55
  56        avx = buf + e->offset;
  57
  58        for (i = 0; i < CPU_NB_REGS; i++) {
  59            uint8_t *ymmh = avx->ymmh[i];
  60
  61            stq_p(ymmh,     env->xmm_regs[i].ZMM_Q(2));
  62            stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
  63        }
  64    }
  65
  66    e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
  67    if (e->size && e->offset) {
  68        XSaveBNDREG *bndreg;
  69        XSaveBNDCSR *bndcsr;
  70
  71        f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
  72        assert(f->size);
  73        assert(f->offset);
  74
  75        bndreg = buf + e->offset;
  76        bndcsr = buf + f->offset;
  77
  78        memcpy(&bndreg->bnd_regs, env->bnd_regs,
  79               sizeof(env->bnd_regs));
  80        bndcsr->bndcsr = env->bndcs_regs;
  81    }
  82
  83    e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
  84    if (e->size && e->offset) {
  85        XSaveOpmask *opmask;
  86        XSaveZMM_Hi256 *zmm_hi256;
  87#ifdef TARGET_X86_64
  88        XSaveHi16_ZMM *hi16_zmm;
  89#endif
  90
  91        f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
  92        assert(f->size);
  93        assert(f->offset);
  94
  95        opmask = buf + e->offset;
  96        zmm_hi256 = buf + f->offset;
  97
  98        memcpy(&opmask->opmask_regs, env->opmask_regs,
  99               sizeof(env->opmask_regs));
 100
 101        for (i = 0; i < CPU_NB_REGS; i++) {
 102            uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
 103
 104            stq_p(zmmh,      env->xmm_regs[i].ZMM_Q(4));
 105            stq_p(zmmh + 8,  env->xmm_regs[i].ZMM_Q(5));
 106            stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
 107            stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
 108        }
 109
 110#ifdef TARGET_X86_64
 111        f = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
 112        assert(f->size);
 113        assert(f->offset);
 114
 115        hi16_zmm = buf + f->offset;
 116
 117        memcpy(&hi16_zmm->hi16_zmm, &env->xmm_regs[16],
 118               16 * sizeof(env->xmm_regs[16]));
 119#endif
 120    }
 121
 122#ifdef TARGET_X86_64
 123    e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
 124    if (e->size && e->offset) {
 125        XSavePKRU *pkru = buf + e->offset;
 126
 127        memcpy(pkru, &env->pkru, sizeof(env->pkru));
 128    }
 129#endif
 130}
 131
 132void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
 133{
 134    CPUX86State *env = &cpu->env;
 135    const ExtSaveArea *e, *f, *g;
 136    int i;
 137
 138    const X86LegacyXSaveArea *legacy;
 139    const X86XSaveHeader *header;
 140    uint16_t cwd, swd, twd;
 141
 142    e = &x86_ext_save_areas[XSTATE_FP_BIT];
 143
 144    legacy = buf + e->offset;
 145    header = buf + e->offset + sizeof(*legacy);
 146
 147    cwd = legacy->fcw;
 148    swd = legacy->fsw;
 149    twd = legacy->ftw;
 150    env->fpop = legacy->fpop;
 151    env->fpstt = (swd >> 11) & 7;
 152    env->fpus = swd;
 153    env->fpuc = cwd;
 154    for (i = 0; i < 8; ++i) {
 155        env->fptags[i] = !((twd >> i) & 1);
 156    }
 157    env->fpip = legacy->fpip;
 158    env->fpdp = legacy->fpdp;
 159    env->mxcsr = legacy->mxcsr;
 160    memcpy(env->fpregs, &legacy->fpregs,
 161           sizeof(env->fpregs));
 162
 163    for (i = 0; i < CPU_NB_REGS; i++) {
 164        const uint8_t *xmm = legacy->xmm_regs[i];
 165
 166        env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
 167        env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
 168    }
 169
 170    env->xstate_bv = header->xstate_bv;
 171
 172    e = &x86_ext_save_areas[XSTATE_YMM_BIT];
 173    if (e->size && e->offset) {
 174        const XSaveAVX *avx;
 175
 176        avx = buf + e->offset;
 177        for (i = 0; i < CPU_NB_REGS; i++) {
 178            const uint8_t *ymmh = avx->ymmh[i];
 179
 180            env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
 181            env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
 182        }
 183    }
 184
 185    e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
 186    if (e->size && e->offset) {
 187        const XSaveBNDREG *bndreg;
 188        const XSaveBNDCSR *bndcsr;
 189
 190        f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
 191        assert(f->size);
 192        assert(f->offset);
 193
 194        bndreg = buf + e->offset;
 195        bndcsr = buf + f->offset;
 196
 197        memcpy(env->bnd_regs, &bndreg->bnd_regs,
 198               sizeof(env->bnd_regs));
 199        env->bndcs_regs = bndcsr->bndcsr;
 200    }
 201
 202    e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
 203    if (e->size && e->offset) {
 204        const XSaveOpmask *opmask;
 205        const XSaveZMM_Hi256 *zmm_hi256;
 206#ifdef TARGET_X86_64
 207        const XSaveHi16_ZMM *hi16_zmm;
 208#endif
 209
 210        f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
 211        assert(f->size);
 212        assert(f->offset);
 213
 214        g = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
 215        assert(g->size);
 216        assert(g->offset);
 217
 218        opmask = buf + e->offset;
 219        zmm_hi256 = buf + f->offset;
 220#ifdef TARGET_X86_64
 221        hi16_zmm = buf + g->offset;
 222#endif
 223
 224        memcpy(env->opmask_regs, &opmask->opmask_regs,
 225               sizeof(env->opmask_regs));
 226
 227        for (i = 0; i < CPU_NB_REGS; i++) {
 228            const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
 229
 230            env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
 231            env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
 232            env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
 233            env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
 234        }
 235
 236#ifdef TARGET_X86_64
 237        memcpy(&env->xmm_regs[16], &hi16_zmm->hi16_zmm,
 238               16 * sizeof(env->xmm_regs[16]));
 239#endif
 240    }
 241
 242#ifdef TARGET_X86_64
 243    e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
 244    if (e->size && e->offset) {
 245        const XSavePKRU *pkru;
 246
 247        pkru = buf + e->offset;
 248        memcpy(&env->pkru, pkru, sizeof(env->pkru));
 249    }
 250#endif
 251}
 252