qemu/linux-user/aarch64/signal.c
<<
>>
Prefs
   1/*
   2 *  Emulation of Linux signals
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu.h"
  21#include "user-internals.h"
  22#include "signal-common.h"
  23#include "linux-user/trace.h"
  24
  25struct target_sigcontext {
  26    uint64_t fault_address;
  27    /* AArch64 registers */
  28    uint64_t regs[31];
  29    uint64_t sp;
  30    uint64_t pc;
  31    uint64_t pstate;
  32    /* 4K reserved for FP/SIMD state and future expansion */
  33    char __reserved[4096] __attribute__((__aligned__(16)));
  34};
  35
  36struct target_ucontext {
  37    abi_ulong tuc_flags;
  38    abi_ulong tuc_link;
  39    target_stack_t tuc_stack;
  40    target_sigset_t tuc_sigmask;
  41    /* glibc uses a 1024-bit sigset_t */
  42    char __unused[1024 / 8 - sizeof(target_sigset_t)];
  43    /* last for future expansion */
  44    struct target_sigcontext tuc_mcontext;
  45};
  46
  47/*
  48 * Header to be used at the beginning of structures extending the user
  49 * context. Such structures must be placed after the rt_sigframe on the stack
  50 * and be 16-byte aligned. The last structure must be a dummy one with the
  51 * magic and size set to 0.
  52 */
  53struct target_aarch64_ctx {
  54    uint32_t magic;
  55    uint32_t size;
  56};
  57
  58#define TARGET_FPSIMD_MAGIC 0x46508001
  59
  60struct target_fpsimd_context {
  61    struct target_aarch64_ctx head;
  62    uint32_t fpsr;
  63    uint32_t fpcr;
  64    uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
  65};
  66
  67#define TARGET_EXTRA_MAGIC  0x45585401
  68
  69struct target_extra_context {
  70    struct target_aarch64_ctx head;
  71    uint64_t datap; /* 16-byte aligned pointer to extra space cast to __u64 */
  72    uint32_t size; /* size in bytes of the extra space */
  73    uint32_t reserved[3];
  74};
  75
  76#define TARGET_SVE_MAGIC    0x53564501
  77
  78struct target_sve_context {
  79    struct target_aarch64_ctx head;
  80    uint16_t vl;
  81    uint16_t flags;
  82    uint16_t reserved[2];
  83    /* The actual SVE data immediately follows.  It is laid out
  84     * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
  85     * the original struct pointer.
  86     */
  87};
  88
  89#define TARGET_SVE_VQ_BYTES  16
  90
  91#define TARGET_SVE_SIG_ZREG_SIZE(VQ)  ((VQ) * TARGET_SVE_VQ_BYTES)
  92#define TARGET_SVE_SIG_PREG_SIZE(VQ)  ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
  93
  94#define TARGET_SVE_SIG_REGS_OFFSET \
  95    QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
  96#define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
  97    (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
  98#define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
  99    (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
 100#define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
 101    (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
 102#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
 103    (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
 104
 105#define TARGET_SVE_SIG_FLAG_SM  1
 106
 107#define TARGET_ZA_MAGIC        0x54366345
 108
 109struct target_za_context {
 110    struct target_aarch64_ctx head;
 111    uint16_t vl;
 112    uint16_t reserved[3];
 113    /* The actual ZA data immediately follows. */
 114};
 115
 116#define TARGET_ZA_SIG_REGS_OFFSET \
 117    QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
 118#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
 119    (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
 120#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
 121    TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
 122
 123struct target_rt_sigframe {
 124    struct target_siginfo info;
 125    struct target_ucontext uc;
 126};
 127
 128struct target_rt_frame_record {
 129    uint64_t fp;
 130    uint64_t lr;
 131};
 132
 133static void target_setup_general_frame(struct target_rt_sigframe *sf,
 134                                       CPUARMState *env, target_sigset_t *set)
 135{
 136    int i;
 137
 138    __put_user(0, &sf->uc.tuc_flags);
 139    __put_user(0, &sf->uc.tuc_link);
 140
 141    target_save_altstack(&sf->uc.tuc_stack, env);
 142
 143    for (i = 0; i < 31; i++) {
 144        __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
 145    }
 146    __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
 147    __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
 148    __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
 149
 150    __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
 151
 152    for (i = 0; i < TARGET_NSIG_WORDS; i++) {
 153        __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
 154    }
 155}
 156
 157static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
 158                                       CPUARMState *env)
 159{
 160    int i;
 161
 162    __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic);
 163    __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size);
 164    __put_user(vfp_get_fpsr(env), &fpsimd->fpsr);
 165    __put_user(vfp_get_fpcr(env), &fpsimd->fpcr);
 166
 167    for (i = 0; i < 32; i++) {
 168        uint64_t *q = aa64_vfp_qreg(env, i);
 169#if TARGET_BIG_ENDIAN
 170        __put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
 171        __put_user(q[1], &fpsimd->vregs[i * 2]);
 172#else
 173        __put_user(q[0], &fpsimd->vregs[i * 2]);
 174        __put_user(q[1], &fpsimd->vregs[i * 2 + 1]);
 175#endif
 176    }
 177}
 178
 179static void target_setup_extra_record(struct target_extra_context *extra,
 180                                      uint64_t datap, uint32_t extra_size)
 181{
 182    __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic);
 183    __put_user(sizeof(struct target_extra_context), &extra->head.size);
 184    __put_user(datap, &extra->datap);
 185    __put_user(extra_size, &extra->size);
 186}
 187
 188static void target_setup_end_record(struct target_aarch64_ctx *end)
 189{
 190    __put_user(0, &end->magic);
 191    __put_user(0, &end->size);
 192}
 193
 194static void target_setup_sve_record(struct target_sve_context *sve,
 195                                    CPUARMState *env, int size)
 196{
 197    int i, j, vq = sve_vq(env);
 198
 199    memset(sve, 0, sizeof(*sve));
 200    __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
 201    __put_user(size, &sve->head.size);
 202    __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
 203    if (FIELD_EX64(env->svcr, SVCR, SM)) {
 204        __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags);
 205    }
 206
 207    /* Note that SVE regs are stored as a byte stream, with each byte element
 208     * at a subsequent address.  This corresponds to a little-endian store
 209     * of our 64-bit hunks.
 210     */
 211    for (i = 0; i < 32; ++i) {
 212        uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
 213        for (j = 0; j < vq * 2; ++j) {
 214            __put_user_e(env->vfp.zregs[i].d[j], z + j, le);
 215        }
 216    }
 217    for (i = 0; i <= 16; ++i) {
 218        uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
 219        for (j = 0; j < vq; ++j) {
 220            uint64_t r = env->vfp.pregs[i].p[j >> 2];
 221            __put_user_e(r >> ((j & 3) * 16), p + j, le);
 222        }
 223    }
 224}
 225
 226static void target_setup_za_record(struct target_za_context *za,
 227                                   CPUARMState *env, int size)
 228{
 229    int vq = sme_vq(env);
 230    int vl = vq * TARGET_SVE_VQ_BYTES;
 231    int i, j;
 232
 233    memset(za, 0, sizeof(*za));
 234    __put_user(TARGET_ZA_MAGIC, &za->head.magic);
 235    __put_user(size, &za->head.size);
 236    __put_user(vl, &za->vl);
 237
 238    if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
 239        return;
 240    }
 241    assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
 242
 243    /*
 244     * Note that ZA vectors are stored as a byte stream,
 245     * with each byte element at a subsequent address.
 246     */
 247    for (i = 0; i < vl; ++i) {
 248        uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
 249        for (j = 0; j < vq * 2; ++j) {
 250            __put_user_e(env->zarray[i].d[j], z + j, le);
 251        }
 252    }
 253}
 254
 255static void target_restore_general_frame(CPUARMState *env,
 256                                         struct target_rt_sigframe *sf)
 257{
 258    sigset_t set;
 259    uint64_t pstate;
 260    int i;
 261
 262    target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
 263    set_sigmask(&set);
 264
 265    for (i = 0; i < 31; i++) {
 266        __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
 267    }
 268
 269    __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
 270    __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
 271    __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
 272    pstate_write(env, pstate);
 273}
 274
 275static void target_restore_fpsimd_record(CPUARMState *env,
 276                                         struct target_fpsimd_context *fpsimd)
 277{
 278    uint32_t fpsr, fpcr;
 279    int i;
 280
 281    __get_user(fpsr, &fpsimd->fpsr);
 282    vfp_set_fpsr(env, fpsr);
 283    __get_user(fpcr, &fpsimd->fpcr);
 284    vfp_set_fpcr(env, fpcr);
 285
 286    for (i = 0; i < 32; i++) {
 287        uint64_t *q = aa64_vfp_qreg(env, i);
 288#if TARGET_BIG_ENDIAN
 289        __get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
 290        __get_user(q[1], &fpsimd->vregs[i * 2]);
 291#else
 292        __get_user(q[0], &fpsimd->vregs[i * 2]);
 293        __get_user(q[1], &fpsimd->vregs[i * 2 + 1]);
 294#endif
 295    }
 296}
 297
 298static bool target_restore_sve_record(CPUARMState *env,
 299                                      struct target_sve_context *sve,
 300                                      int size, int *svcr)
 301{
 302    int i, j, vl, vq, flags;
 303    bool sm;
 304
 305    __get_user(vl, &sve->vl);
 306    __get_user(flags, &sve->flags);
 307
 308    sm = flags & TARGET_SVE_SIG_FLAG_SM;
 309
 310    /* The cpu must support Streaming or Non-streaming SVE. */
 311    if (sm
 312        ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
 313        : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
 314        return false;
 315    }
 316
 317    /*
 318     * Note that we cannot use sve_vq() because that depends on the
 319     * current setting of PSTATE.SM, not the state to be restored.
 320     */
 321    vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
 322
 323    /* Reject mismatched VL. */
 324    if (vl != vq * TARGET_SVE_VQ_BYTES) {
 325        return false;
 326    }
 327
 328    /* Accept empty record -- used to clear PSTATE.SM. */
 329    if (size <= sizeof(*sve)) {
 330        return true;
 331    }
 332
 333    /* Reject non-empty but incomplete record. */
 334    if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
 335        return false;
 336    }
 337
 338    *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
 339
 340    /*
 341     * Note that SVE regs are stored as a byte stream, with each byte element
 342     * at a subsequent address.  This corresponds to a little-endian load
 343     * of our 64-bit hunks.
 344     */
 345    for (i = 0; i < 32; ++i) {
 346        uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
 347        for (j = 0; j < vq * 2; ++j) {
 348            __get_user_e(env->vfp.zregs[i].d[j], z + j, le);
 349        }
 350    }
 351    for (i = 0; i <= 16; ++i) {
 352        uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
 353        for (j = 0; j < vq; ++j) {
 354            uint16_t r;
 355            __get_user_e(r, p + j, le);
 356            if (j & 3) {
 357                env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
 358            } else {
 359                env->vfp.pregs[i].p[j >> 2] = r;
 360            }
 361        }
 362    }
 363    return true;
 364}
 365
 366static bool target_restore_za_record(CPUARMState *env,
 367                                     struct target_za_context *za,
 368                                     int size, int *svcr)
 369{
 370    int i, j, vl, vq;
 371
 372    if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
 373        return false;
 374    }
 375
 376    __get_user(vl, &za->vl);
 377    vq = sme_vq(env);
 378
 379    /* Reject mismatched VL. */
 380    if (vl != vq * TARGET_SVE_VQ_BYTES) {
 381        return false;
 382    }
 383
 384    /* Accept empty record -- used to clear PSTATE.ZA. */
 385    if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
 386        return true;
 387    }
 388
 389    /* Reject non-empty but incomplete record. */
 390    if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
 391        return false;
 392    }
 393
 394    *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
 395
 396    for (i = 0; i < vl; ++i) {
 397        uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
 398        for (j = 0; j < vq * 2; ++j) {
 399            __get_user_e(env->zarray[i].d[j], z + j, le);
 400        }
 401    }
 402    return true;
 403}
 404
 405static int target_restore_sigframe(CPUARMState *env,
 406                                   struct target_rt_sigframe *sf)
 407{
 408    struct target_aarch64_ctx *ctx, *extra = NULL;
 409    struct target_fpsimd_context *fpsimd = NULL;
 410    struct target_sve_context *sve = NULL;
 411    struct target_za_context *za = NULL;
 412    uint64_t extra_datap = 0;
 413    bool used_extra = false;
 414    int sve_size = 0;
 415    int za_size = 0;
 416    int svcr = 0;
 417
 418    target_restore_general_frame(env, sf);
 419
 420    ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
 421    while (ctx) {
 422        uint32_t magic, size, extra_size;
 423
 424        __get_user(magic, &ctx->magic);
 425        __get_user(size, &ctx->size);
 426        switch (magic) {
 427        case 0:
 428            if (size != 0) {
 429                goto err;
 430            }
 431            if (used_extra) {
 432                ctx = NULL;
 433            } else {
 434                ctx = extra;
 435                used_extra = true;
 436            }
 437            continue;
 438
 439        case TARGET_FPSIMD_MAGIC:
 440            if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
 441                goto err;
 442            }
 443            fpsimd = (struct target_fpsimd_context *)ctx;
 444            break;
 445
 446        case TARGET_SVE_MAGIC:
 447            if (sve || size < sizeof(struct target_sve_context)) {
 448                goto err;
 449            }
 450            sve = (struct target_sve_context *)ctx;
 451            sve_size = size;
 452            break;
 453
 454        case TARGET_ZA_MAGIC:
 455            if (za || size < sizeof(struct target_za_context)) {
 456                goto err;
 457            }
 458            za = (struct target_za_context *)ctx;
 459            za_size = size;
 460            break;
 461
 462        case TARGET_EXTRA_MAGIC:
 463            if (extra || size != sizeof(struct target_extra_context)) {
 464                goto err;
 465            }
 466            __get_user(extra_datap,
 467                       &((struct target_extra_context *)ctx)->datap);
 468            __get_user(extra_size,
 469                       &((struct target_extra_context *)ctx)->size);
 470            extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
 471            if (!extra) {
 472                return 1;
 473            }
 474            break;
 475
 476        default:
 477            /* Unknown record -- we certainly didn't generate it.
 478             * Did we in fact get out of sync?
 479             */
 480            goto err;
 481        }
 482        ctx = (void *)ctx + size;
 483    }
 484
 485    /* Require FPSIMD always.  */
 486    if (fpsimd) {
 487        target_restore_fpsimd_record(env, fpsimd);
 488    } else {
 489        goto err;
 490    }
 491
 492    /* SVE data, if present, overwrites FPSIMD data.  */
 493    if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
 494        goto err;
 495    }
 496    if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
 497        goto err;
 498    }
 499    if (env->svcr != svcr) {
 500        env->svcr = svcr;
 501        arm_rebuild_hflags(env);
 502    }
 503    unlock_user(extra, extra_datap, 0);
 504    return 0;
 505
 506 err:
 507    unlock_user(extra, extra_datap, 0);
 508    return 1;
 509}
 510
 511static abi_ulong get_sigframe(struct target_sigaction *ka,
 512                              CPUARMState *env, int size)
 513{
 514    abi_ulong sp;
 515
 516    sp = target_sigsp(get_sp_from_cpustate(env), ka);
 517
 518    sp = (sp - size) & ~15;
 519
 520    return sp;
 521}
 522
 523typedef struct {
 524    int total_size;
 525    int extra_base;
 526    int extra_size;
 527    int std_end_ofs;
 528    int extra_ofs;
 529    int extra_end_ofs;
 530} target_sigframe_layout;
 531
 532static int alloc_sigframe_space(int this_size, target_sigframe_layout *l)
 533{
 534    /* Make sure there will always be space for the end marker.  */
 535    const int std_size = sizeof(struct target_rt_sigframe)
 536                         - sizeof(struct target_aarch64_ctx);
 537    int this_loc = l->total_size;
 538
 539    if (l->extra_base) {
 540        /* Once we have begun an extra space, all allocations go there.  */
 541        l->extra_size += this_size;
 542    } else if (this_size + this_loc > std_size) {
 543        /* This allocation does not fit in the standard space.  */
 544        /* Allocate the extra record.  */
 545        l->extra_ofs = this_loc;
 546        l->total_size += sizeof(struct target_extra_context);
 547
 548        /* Allocate the standard end record.  */
 549        l->std_end_ofs = l->total_size;
 550        l->total_size += sizeof(struct target_aarch64_ctx);
 551
 552        /* Allocate the requested record.  */
 553        l->extra_base = this_loc = l->total_size;
 554        l->extra_size = this_size;
 555    }
 556    l->total_size += this_size;
 557
 558    return this_loc;
 559}
 560
 561static void target_setup_frame(int usig, struct target_sigaction *ka,
 562                               target_siginfo_t *info, target_sigset_t *set,
 563                               CPUARMState *env)
 564{
 565    target_sigframe_layout layout = {
 566        /* Begin with the size pointing to the reserved space.  */
 567        .total_size = offsetof(struct target_rt_sigframe,
 568                               uc.tuc_mcontext.__reserved),
 569    };
 570    int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
 571    int sve_size = 0, za_size = 0;
 572    struct target_rt_sigframe *frame;
 573    struct target_rt_frame_record *fr;
 574    abi_ulong frame_addr, return_addr;
 575
 576    /* FPSIMD record is always in the standard space.  */
 577    fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
 578                                      &layout);
 579
 580    /* SVE state needs saving only if it exists.  */
 581    if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
 582        cpu_isar_feature(aa64_sme, env_archcpu(env))) {
 583        sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
 584        sve_ofs = alloc_sigframe_space(sve_size, &layout);
 585    }
 586    if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
 587        /* ZA state needs saving only if it is enabled.  */
 588        if (FIELD_EX64(env->svcr, SVCR, ZA)) {
 589            za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
 590        } else {
 591            za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
 592        }
 593        za_ofs = alloc_sigframe_space(za_size, &layout);
 594    }
 595
 596    if (layout.extra_ofs) {
 597        /* Reserve space for the extra end marker.  The standard end marker
 598         * will have been allocated when we allocated the extra record.
 599         */
 600        layout.extra_end_ofs
 601            = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout);
 602    } else {
 603        /* Reserve space for the standard end marker.
 604         * Do not use alloc_sigframe_space because we cheat
 605         * std_size therein to reserve space for this.
 606         */
 607        layout.std_end_ofs = layout.total_size;
 608        layout.total_size += sizeof(struct target_aarch64_ctx);
 609    }
 610
 611    /* We must always provide at least the standard 4K reserved space,
 612     * even if we don't use all of it (this is part of the ABI)
 613     */
 614    layout.total_size = MAX(layout.total_size,
 615                            sizeof(struct target_rt_sigframe));
 616
 617    /*
 618     * Reserve space for the standard frame unwind pair: fp, lr.
 619     * Despite the name this is not a "real" record within the frame.
 620     */
 621    fr_ofs = layout.total_size;
 622    layout.total_size += sizeof(struct target_rt_frame_record);
 623
 624    frame_addr = get_sigframe(ka, env, layout.total_size);
 625    trace_user_setup_frame(env, frame_addr);
 626    frame = lock_user(VERIFY_WRITE, frame_addr, layout.total_size, 0);
 627    if (!frame) {
 628        goto give_sigsegv;
 629    }
 630
 631    target_setup_general_frame(frame, env, set);
 632    target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
 633    target_setup_end_record((void *)frame + layout.std_end_ofs);
 634    if (layout.extra_ofs) {
 635        target_setup_extra_record((void *)frame + layout.extra_ofs,
 636                                  frame_addr + layout.extra_base,
 637                                  layout.extra_size);
 638        target_setup_end_record((void *)frame + layout.extra_end_ofs);
 639    }
 640    if (sve_ofs) {
 641        target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
 642    }
 643    if (za_ofs) {
 644        target_setup_za_record((void *)frame + za_ofs, env, za_size);
 645    }
 646
 647    /* Set up the stack frame for unwinding.  */
 648    fr = (void *)frame + fr_ofs;
 649    __put_user(env->xregs[29], &fr->fp);
 650    __put_user(env->xregs[30], &fr->lr);
 651
 652    if (ka->sa_flags & TARGET_SA_RESTORER) {
 653        return_addr = ka->sa_restorer;
 654    } else {
 655        return_addr = default_rt_sigreturn;
 656    }
 657    env->xregs[0] = usig;
 658    env->xregs[29] = frame_addr + fr_ofs;
 659    env->xregs[30] = return_addr;
 660    env->xregs[31] = frame_addr;
 661    env->pc = ka->_sa_handler;
 662
 663    /* Invoke the signal handler as if by indirect call.  */
 664    if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
 665        env->btype = 2;
 666    }
 667
 668    /* Invoke the signal handler with both SM and ZA disabled. */
 669    aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
 670
 671    if (info) {
 672        tswap_siginfo(&frame->info, info);
 673        env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
 674        env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
 675    }
 676
 677    unlock_user(frame, frame_addr, layout.total_size);
 678    return;
 679
 680 give_sigsegv:
 681    unlock_user(frame, frame_addr, layout.total_size);
 682    force_sigsegv(usig);
 683}
 684
 685void setup_rt_frame(int sig, struct target_sigaction *ka,
 686                    target_siginfo_t *info, target_sigset_t *set,
 687                    CPUARMState *env)
 688{
 689    target_setup_frame(sig, ka, info, set, env);
 690}
 691
 692void setup_frame(int sig, struct target_sigaction *ka,
 693                 target_sigset_t *set, CPUARMState *env)
 694{
 695    target_setup_frame(sig, ka, 0, set, env);
 696}
 697
 698long do_rt_sigreturn(CPUARMState *env)
 699{
 700    struct target_rt_sigframe *frame = NULL;
 701    abi_ulong frame_addr = env->xregs[31];
 702
 703    trace_user_do_rt_sigreturn(env, frame_addr);
 704    if (frame_addr & 15) {
 705        goto badframe;
 706    }
 707
 708    if  (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
 709        goto badframe;
 710    }
 711
 712    if (target_restore_sigframe(env, frame)) {
 713        goto badframe;
 714    }
 715
 716    target_restore_altstack(&frame->uc.tuc_stack, env);
 717
 718    unlock_user_struct(frame, frame_addr, 0);
 719    return -QEMU_ESIGRETURN;
 720
 721 badframe:
 722    unlock_user_struct(frame, frame_addr, 0);
 723    force_sig(TARGET_SIGSEGV);
 724    return -QEMU_ESIGRETURN;
 725}
 726
 727long do_sigreturn(CPUARMState *env)
 728{
 729    return do_rt_sigreturn(env);
 730}
 731
 732void setup_sigtramp(abi_ulong sigtramp_page)
 733{
 734    uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
 735    assert(tramp != NULL);
 736
 737    /*
 738     * mov x8,#__NR_rt_sigreturn; svc #0
 739     * Since these are instructions they need to be put as little-endian
 740     * regardless of target default or current CPU endianness.
 741     */
 742    __put_user_e(0xd2801168, &tramp[0], le);
 743    __put_user_e(0xd4000001, &tramp[1], le);
 744
 745    default_rt_sigreturn = sigtramp_page;
 746    unlock_user(tramp, sigtramp_page, 8);
 747}
 748