qemu/target/i386/xsave_helper.c
<<
>>
Prefs
   1/*
   2 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   3 * See the COPYING file in the top-level directory.
   4 */
   5#include "qemu/osdep.h"
   6
   7#include "cpu.h"
   8
   9void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
  10{
  11    CPUX86State *env = &cpu->env;
  12    const ExtSaveArea *e, *f;
  13    int i;
  14
  15    X86LegacyXSaveArea *legacy;
  16    X86XSaveHeader *header;
  17    uint16_t cwd, swd, twd;
  18
  19    memset(buf, 0, buflen);
  20
  21    e = &x86_ext_save_areas[XSTATE_FP_BIT];
  22
  23    legacy = buf + e->offset;
  24    header = buf + e->offset + sizeof(*legacy);
  25
  26    twd = 0;
  27    swd = env->fpus & ~(7 << 11);
  28    swd |= (env->fpstt & 7) << 11;
  29    cwd = env->fpuc;
  30    for (i = 0; i < 8; ++i) {
  31        twd |= (!env->fptags[i]) << i;
  32    }
  33    legacy->fcw = cwd;
  34    legacy->fsw = swd;
  35    legacy->ftw = twd;
  36    legacy->fpop = env->fpop;
  37    legacy->fpip = env->fpip;
  38    legacy->fpdp = env->fpdp;
  39    memcpy(&legacy->fpregs, env->fpregs,
  40           sizeof(env->fpregs));
  41    legacy->mxcsr = env->mxcsr;
  42
  43    for (i = 0; i < CPU_NB_REGS; i++) {
  44        uint8_t *xmm = legacy->xmm_regs[i];
  45
  46        stq_p(xmm,     env->xmm_regs[i].ZMM_Q(0));
  47        stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
  48    }
  49
  50    header->xstate_bv = env->xstate_bv;
  51
  52    e = &x86_ext_save_areas[XSTATE_YMM_BIT];
  53    if (e->size && e->offset) {
  54        XSaveAVX *avx;
  55
  56        avx = buf + e->offset;
  57
  58        for (i = 0; i < CPU_NB_REGS; i++) {
  59            uint8_t *ymmh = avx->ymmh[i];
  60
  61            stq_p(ymmh,     env->xmm_regs[i].ZMM_Q(2));
  62            stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
  63        }
  64    }
  65
  66    e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
  67    if (e->size && e->offset) {
  68        XSaveBNDREG *bndreg;
  69        XSaveBNDCSR *bndcsr;
  70
  71        f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
  72        assert(f->size);
  73        assert(f->offset);
  74
  75        bndreg = buf + e->offset;
  76        bndcsr = buf + f->offset;
  77
  78        memcpy(&bndreg->bnd_regs, env->bnd_regs,
  79               sizeof(env->bnd_regs));
  80        bndcsr->bndcsr = env->bndcs_regs;
  81    }
  82
  83    e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
  84    if (e->size && e->offset) {
  85        XSaveOpmask *opmask;
  86        XSaveZMM_Hi256 *zmm_hi256;
  87#ifdef TARGET_X86_64
  88        XSaveHi16_ZMM *hi16_zmm;
  89#endif
  90
  91        f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
  92        assert(f->size);
  93        assert(f->offset);
  94
  95        opmask = buf + e->offset;
  96        zmm_hi256 = buf + f->offset;
  97
  98        memcpy(&opmask->opmask_regs, env->opmask_regs,
  99               sizeof(env->opmask_regs));
 100
 101        for (i = 0; i < CPU_NB_REGS; i++) {
 102            uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
 103
 104            stq_p(zmmh,      env->xmm_regs[i].ZMM_Q(4));
 105            stq_p(zmmh + 8,  env->xmm_regs[i].ZMM_Q(5));
 106            stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
 107            stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
 108        }
 109
 110#ifdef TARGET_X86_64
 111        f = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
 112        assert(f->size);
 113        assert(f->offset);
 114
 115        hi16_zmm = buf + f->offset;
 116
 117        memcpy(&hi16_zmm->hi16_zmm, &env->xmm_regs[16],
 118               16 * sizeof(env->xmm_regs[16]));
 119#endif
 120    }
 121
 122#ifdef TARGET_X86_64
 123    e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
 124    if (e->size && e->offset) {
 125        XSavePKRU *pkru = buf + e->offset;
 126
 127        memcpy(pkru, &env->pkru, sizeof(env->pkru));
 128    }
 129
 130    e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
 131    if (e->size && e->offset) {
 132        XSaveXTILECFG *tilecfg = buf + e->offset;
 133
 134        memcpy(tilecfg, &env->xtilecfg, sizeof(env->xtilecfg));
 135    }
 136
 137    e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
 138    if (e->size && e->offset && buflen >= e->size + e->offset) {
 139        XSaveXTILEDATA *tiledata = buf + e->offset;
 140
 141        memcpy(tiledata, &env->xtiledata, sizeof(env->xtiledata));
 142    }
 143#endif
 144}
 145
 146void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
 147{
 148    CPUX86State *env = &cpu->env;
 149    const ExtSaveArea *e, *f, *g;
 150    int i;
 151
 152    const X86LegacyXSaveArea *legacy;
 153    const X86XSaveHeader *header;
 154    uint16_t cwd, swd, twd;
 155
 156    e = &x86_ext_save_areas[XSTATE_FP_BIT];
 157
 158    legacy = buf + e->offset;
 159    header = buf + e->offset + sizeof(*legacy);
 160
 161    cwd = legacy->fcw;
 162    swd = legacy->fsw;
 163    twd = legacy->ftw;
 164    env->fpop = legacy->fpop;
 165    env->fpstt = (swd >> 11) & 7;
 166    env->fpus = swd;
 167    env->fpuc = cwd;
 168    for (i = 0; i < 8; ++i) {
 169        env->fptags[i] = !((twd >> i) & 1);
 170    }
 171    env->fpip = legacy->fpip;
 172    env->fpdp = legacy->fpdp;
 173    env->mxcsr = legacy->mxcsr;
 174    memcpy(env->fpregs, &legacy->fpregs,
 175           sizeof(env->fpregs));
 176
 177    for (i = 0; i < CPU_NB_REGS; i++) {
 178        const uint8_t *xmm = legacy->xmm_regs[i];
 179
 180        env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
 181        env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
 182    }
 183
 184    env->xstate_bv = header->xstate_bv;
 185
 186    e = &x86_ext_save_areas[XSTATE_YMM_BIT];
 187    if (e->size && e->offset) {
 188        const XSaveAVX *avx;
 189
 190        avx = buf + e->offset;
 191        for (i = 0; i < CPU_NB_REGS; i++) {
 192            const uint8_t *ymmh = avx->ymmh[i];
 193
 194            env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
 195            env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
 196        }
 197    }
 198
 199    e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
 200    if (e->size && e->offset) {
 201        const XSaveBNDREG *bndreg;
 202        const XSaveBNDCSR *bndcsr;
 203
 204        f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
 205        assert(f->size);
 206        assert(f->offset);
 207
 208        bndreg = buf + e->offset;
 209        bndcsr = buf + f->offset;
 210
 211        memcpy(env->bnd_regs, &bndreg->bnd_regs,
 212               sizeof(env->bnd_regs));
 213        env->bndcs_regs = bndcsr->bndcsr;
 214    }
 215
 216    e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
 217    if (e->size && e->offset) {
 218        const XSaveOpmask *opmask;
 219        const XSaveZMM_Hi256 *zmm_hi256;
 220#ifdef TARGET_X86_64
 221        const XSaveHi16_ZMM *hi16_zmm;
 222#endif
 223
 224        f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
 225        assert(f->size);
 226        assert(f->offset);
 227
 228        g = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
 229        assert(g->size);
 230        assert(g->offset);
 231
 232        opmask = buf + e->offset;
 233        zmm_hi256 = buf + f->offset;
 234#ifdef TARGET_X86_64
 235        hi16_zmm = buf + g->offset;
 236#endif
 237
 238        memcpy(env->opmask_regs, &opmask->opmask_regs,
 239               sizeof(env->opmask_regs));
 240
 241        for (i = 0; i < CPU_NB_REGS; i++) {
 242            const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
 243
 244            env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
 245            env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
 246            env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
 247            env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
 248        }
 249
 250#ifdef TARGET_X86_64
 251        memcpy(&env->xmm_regs[16], &hi16_zmm->hi16_zmm,
 252               16 * sizeof(env->xmm_regs[16]));
 253#endif
 254    }
 255
 256#ifdef TARGET_X86_64
 257    e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
 258    if (e->size && e->offset) {
 259        const XSavePKRU *pkru;
 260
 261        pkru = buf + e->offset;
 262        memcpy(&env->pkru, pkru, sizeof(env->pkru));
 263    }
 264
 265    e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
 266    if (e->size && e->offset) {
 267        const XSaveXTILECFG *tilecfg = buf + e->offset;
 268
 269        memcpy(&env->xtilecfg, tilecfg, sizeof(env->xtilecfg));
 270    }
 271
 272    e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
 273    if (e->size && e->offset && buflen >= e->size + e->offset) {
 274        const XSaveXTILEDATA *tiledata = buf + e->offset;
 275
 276        memcpy(&env->xtiledata, tiledata, sizeof(env->xtiledata));
 277    }
 278#endif
 279}
 280