qemu/linux-user/aarch64/target_prctl.h
<<
>>
Prefs
   1/*
   2 * AArch64 specific prctl functions for linux-user
   3 *
   4 * SPDX-License-Identifier: GPL-2.0-or-later
   5 */
   6#ifndef AARCH64_TARGET_PRCTL_H
   7#define AARCH64_TARGET_PRCTL_H
   8
   9static abi_long do_prctl_sve_get_vl(CPUArchState *env)
  10{
  11    ARMCPU *cpu = env_archcpu(env);
  12    if (cpu_isar_feature(aa64_sve, cpu)) {
  13        /* PSTATE.SM is always unset on syscall entry. */
  14        return sve_vq(env) * 16;
  15    }
  16    return -TARGET_EINVAL;
  17}
  18#define do_prctl_sve_get_vl do_prctl_sve_get_vl
  19
  20static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
  21{
  22    /*
  23     * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
  24     * Note the kernel definition of sve_vl_valid allows for VQ=512,
  25     * i.e. VL=8192, even though the current architectural maximum is VQ=16.
  26     */
  27    if (cpu_isar_feature(aa64_sve, env_archcpu(env))
  28        && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
  29        uint32_t vq, old_vq;
  30
  31        /* PSTATE.SM is always unset on syscall entry. */
  32        old_vq = sve_vq(env);
  33
  34        /*
  35         * Bound the value of arg2, so that we know that it fits into
  36         * the 4-bit field in ZCR_EL1.  Rely on the hflags rebuild to
  37         * sort out the length supported by the cpu.
  38         */
  39        vq = MAX(arg2 / 16, 1);
  40        vq = MIN(vq, ARM_MAX_VQ);
  41        env->vfp.zcr_el[1] = vq - 1;
  42        arm_rebuild_hflags(env);
  43
  44        vq = sve_vq(env);
  45        if (vq < old_vq) {
  46            aarch64_sve_narrow_vq(env, vq);
  47        }
  48        return vq * 16;
  49    }
  50    return -TARGET_EINVAL;
  51}
  52#define do_prctl_sve_set_vl do_prctl_sve_set_vl
  53
  54static abi_long do_prctl_sme_get_vl(CPUArchState *env)
  55{
  56    ARMCPU *cpu = env_archcpu(env);
  57    if (cpu_isar_feature(aa64_sme, cpu)) {
  58        return sme_vq(env) * 16;
  59    }
  60    return -TARGET_EINVAL;
  61}
  62#define do_prctl_sme_get_vl do_prctl_sme_get_vl
  63
  64static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
  65{
  66    /*
  67     * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
  68     * Note the kernel definition of sve_vl_valid allows for VQ=512,
  69     * i.e. VL=8192, even though the architectural maximum is VQ=16.
  70     */
  71    if (cpu_isar_feature(aa64_sme, env_archcpu(env))
  72        && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
  73        int vq, old_vq;
  74
  75        old_vq = sme_vq(env);
  76
  77        /*
  78         * Bound the value of vq, so that we know that it fits into
  79         * the 4-bit field in SMCR_EL1.  Because PSTATE.SM is cleared
  80         * on syscall entry, we are not modifying the current SVE
  81         * vector length.
  82         */
  83        vq = MAX(arg2 / 16, 1);
  84        vq = MIN(vq, 16);
  85        env->vfp.smcr_el[1] =
  86            FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
  87
  88        /* Delay rebuilding hflags until we know if ZA must change. */
  89        vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
  90
  91        if (vq != old_vq) {
  92            /*
  93             * PSTATE.ZA state is cleared on any change to SVL.
  94             * We need not call arm_rebuild_hflags because PSTATE.SM was
  95             * cleared on syscall entry, so this hasn't changed VL.
  96             */
  97            env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
  98            arm_rebuild_hflags(env);
  99        }
 100        return vq * 16;
 101    }
 102    return -TARGET_EINVAL;
 103}
 104#define do_prctl_sme_set_vl do_prctl_sme_set_vl
 105
 106static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
 107{
 108    ARMCPU *cpu = env_archcpu(env);
 109
 110    if (cpu_isar_feature(aa64_pauth, cpu)) {
 111        int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY |
 112                   PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY);
 113        int ret = 0;
 114        Error *err = NULL;
 115
 116        if (arg2 == 0) {
 117            arg2 = all;
 118        } else if (arg2 & ~all) {
 119            return -TARGET_EINVAL;
 120        }
 121        if (arg2 & PR_PAC_APIAKEY) {
 122            ret |= qemu_guest_getrandom(&env->keys.apia,
 123                                        sizeof(ARMPACKey), &err);
 124        }
 125        if (arg2 & PR_PAC_APIBKEY) {
 126            ret |= qemu_guest_getrandom(&env->keys.apib,
 127                                        sizeof(ARMPACKey), &err);
 128        }
 129        if (arg2 & PR_PAC_APDAKEY) {
 130            ret |= qemu_guest_getrandom(&env->keys.apda,
 131                                        sizeof(ARMPACKey), &err);
 132        }
 133        if (arg2 & PR_PAC_APDBKEY) {
 134            ret |= qemu_guest_getrandom(&env->keys.apdb,
 135                                        sizeof(ARMPACKey), &err);
 136        }
 137        if (arg2 & PR_PAC_APGAKEY) {
 138            ret |= qemu_guest_getrandom(&env->keys.apga,
 139                                        sizeof(ARMPACKey), &err);
 140        }
 141        if (ret != 0) {
 142            /*
 143             * Some unknown failure in the crypto.  The best
 144             * we can do is log it and fail the syscall.
 145             * The real syscall cannot fail this way.
 146             */
 147            qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s",
 148                          error_get_pretty(err));
 149            error_free(err);
 150            return -TARGET_EIO;
 151        }
 152        return 0;
 153    }
 154    return -TARGET_EINVAL;
 155}
 156#define do_prctl_reset_keys do_prctl_reset_keys
 157
 158static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2)
 159{
 160    abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE;
 161    ARMCPU *cpu = env_archcpu(env);
 162
 163    if (cpu_isar_feature(aa64_mte, cpu)) {
 164        valid_mask |= PR_MTE_TCF_MASK;
 165        valid_mask |= PR_MTE_TAG_MASK;
 166    }
 167
 168    if (arg2 & ~valid_mask) {
 169        return -TARGET_EINVAL;
 170    }
 171    env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE;
 172
 173    if (cpu_isar_feature(aa64_mte, cpu)) {
 174        switch (arg2 & PR_MTE_TCF_MASK) {
 175        case PR_MTE_TCF_NONE:
 176        case PR_MTE_TCF_SYNC:
 177        case PR_MTE_TCF_ASYNC:
 178            break;
 179        default:
 180            return -EINVAL;
 181        }
 182
 183        /*
 184         * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
 185         * Note that the syscall values are consistent with hw.
 186         */
 187        env->cp15.sctlr_el[1] =
 188            deposit64(env->cp15.sctlr_el[1], 38, 2, arg2 >> PR_MTE_TCF_SHIFT);
 189
 190        /*
 191         * Write PR_MTE_TAG to GCR_EL1[Exclude].
 192         * Note that the syscall uses an include mask,
 193         * and hardware uses an exclude mask -- invert.
 194         */
 195        env->cp15.gcr_el1 =
 196            deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT);
 197        arm_rebuild_hflags(env);
 198    }
 199    return 0;
 200}
 201#define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
 202
 203static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
 204{
 205    ARMCPU *cpu = env_archcpu(env);
 206    abi_long ret = 0;
 207
 208    if (env->tagged_addr_enable) {
 209        ret |= PR_TAGGED_ADDR_ENABLE;
 210    }
 211    if (cpu_isar_feature(aa64_mte, cpu)) {
 212        /* See do_prctl_set_tagged_addr_ctrl. */
 213        ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT;
 214        ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1);
 215    }
 216    return ret;
 217}
 218#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
 219
 220#endif /* AARCH64_TARGET_PRCTL_H */
 221