1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu.h"
21#include "signal-common.h"
22#include "linux-user/trace.h"
23
24struct target_sigcontext {
25 uint64_t fault_address;
26
27 uint64_t regs[31];
28 uint64_t sp;
29 uint64_t pc;
30 uint64_t pstate;
31
32 char __reserved[4096] __attribute__((__aligned__(16)));
33};
34
35struct target_ucontext {
36 abi_ulong tuc_flags;
37 abi_ulong tuc_link;
38 target_stack_t tuc_stack;
39 target_sigset_t tuc_sigmask;
40
41 char __unused[1024 / 8 - sizeof(target_sigset_t)];
42
43 struct target_sigcontext tuc_mcontext;
44};
45
46
47
48
49
50
51
52struct target_aarch64_ctx {
53 uint32_t magic;
54 uint32_t size;
55};
56
57#define TARGET_FPSIMD_MAGIC 0x46508001
58
59struct target_fpsimd_context {
60 struct target_aarch64_ctx head;
61 uint32_t fpsr;
62 uint32_t fpcr;
63 uint64_t vregs[32 * 2];
64};
65
66#define TARGET_EXTRA_MAGIC 0x45585401
67
68struct target_extra_context {
69 struct target_aarch64_ctx head;
70 uint64_t datap;
71 uint32_t size;
72 uint32_t reserved[3];
73};
74
75#define TARGET_SVE_MAGIC 0x53564501
76
77struct target_sve_context {
78 struct target_aarch64_ctx head;
79 uint16_t vl;
80 uint16_t reserved[3];
81
82
83
84
85};
86
87#define TARGET_SVE_VQ_BYTES 16
88
89#define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES)
90#define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
91
92#define TARGET_SVE_SIG_REGS_OFFSET \
93 QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
94#define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
95 (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
96#define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
97 (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
98#define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
99 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
100#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
101 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
102
103struct target_rt_sigframe {
104 struct target_siginfo info;
105 struct target_ucontext uc;
106};
107
108struct target_rt_frame_record {
109 uint64_t fp;
110 uint64_t lr;
111 uint32_t tramp[2];
112};
113
114static void target_setup_general_frame(struct target_rt_sigframe *sf,
115 CPUARMState *env, target_sigset_t *set)
116{
117 int i;
118
119 __put_user(0, &sf->uc.tuc_flags);
120 __put_user(0, &sf->uc.tuc_link);
121
122 target_save_altstack(&sf->uc.tuc_stack, env);
123
124 for (i = 0; i < 31; i++) {
125 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
126 }
127 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
128 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
129 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
130
131 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
132
133 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
134 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
135 }
136}
137
138static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
139 CPUARMState *env)
140{
141 int i;
142
143 __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic);
144 __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size);
145 __put_user(vfp_get_fpsr(env), &fpsimd->fpsr);
146 __put_user(vfp_get_fpcr(env), &fpsimd->fpcr);
147
148 for (i = 0; i < 32; i++) {
149 uint64_t *q = aa64_vfp_qreg(env, i);
150#ifdef TARGET_WORDS_BIGENDIAN
151 __put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
152 __put_user(q[1], &fpsimd->vregs[i * 2]);
153#else
154 __put_user(q[0], &fpsimd->vregs[i * 2]);
155 __put_user(q[1], &fpsimd->vregs[i * 2 + 1]);
156#endif
157 }
158}
159
160static void target_setup_extra_record(struct target_extra_context *extra,
161 uint64_t datap, uint32_t extra_size)
162{
163 __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic);
164 __put_user(sizeof(struct target_extra_context), &extra->head.size);
165 __put_user(datap, &extra->datap);
166 __put_user(extra_size, &extra->size);
167}
168
169static void target_setup_end_record(struct target_aarch64_ctx *end)
170{
171 __put_user(0, &end->magic);
172 __put_user(0, &end->size);
173}
174
175static void target_setup_sve_record(struct target_sve_context *sve,
176 CPUARMState *env, int vq, int size)
177{
178 int i, j;
179
180 __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
181 __put_user(size, &sve->head.size);
182 __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
183
184
185
186
187
188 for (i = 0; i < 32; ++i) {
189 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
190 for (j = 0; j < vq * 2; ++j) {
191 __put_user_e(env->vfp.zregs[i].d[j], z + j, le);
192 }
193 }
194 for (i = 0; i <= 16; ++i) {
195 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
196 for (j = 0; j < vq; ++j) {
197 uint64_t r = env->vfp.pregs[i].p[j >> 2];
198 __put_user_e(r >> ((j & 3) * 16), p + j, le);
199 }
200 }
201}
202
203static void target_restore_general_frame(CPUARMState *env,
204 struct target_rt_sigframe *sf)
205{
206 sigset_t set;
207 uint64_t pstate;
208 int i;
209
210 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
211 set_sigmask(&set);
212
213 for (i = 0; i < 31; i++) {
214 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
215 }
216
217 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
218 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
219 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
220 pstate_write(env, pstate);
221}
222
223static void target_restore_fpsimd_record(CPUARMState *env,
224 struct target_fpsimd_context *fpsimd)
225{
226 uint32_t fpsr, fpcr;
227 int i;
228
229 __get_user(fpsr, &fpsimd->fpsr);
230 vfp_set_fpsr(env, fpsr);
231 __get_user(fpcr, &fpsimd->fpcr);
232 vfp_set_fpcr(env, fpcr);
233
234 for (i = 0; i < 32; i++) {
235 uint64_t *q = aa64_vfp_qreg(env, i);
236#ifdef TARGET_WORDS_BIGENDIAN
237 __get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
238 __get_user(q[1], &fpsimd->vregs[i * 2]);
239#else
240 __get_user(q[0], &fpsimd->vregs[i * 2]);
241 __get_user(q[1], &fpsimd->vregs[i * 2 + 1]);
242#endif
243 }
244}
245
246static void target_restore_sve_record(CPUARMState *env,
247 struct target_sve_context *sve, int vq)
248{
249 int i, j;
250
251
252
253
254
255 for (i = 0; i < 32; ++i) {
256 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
257 for (j = 0; j < vq * 2; ++j) {
258 __get_user_e(env->vfp.zregs[i].d[j], z + j, le);
259 }
260 }
261 for (i = 0; i <= 16; ++i) {
262 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
263 for (j = 0; j < vq; ++j) {
264 uint16_t r;
265 __get_user_e(r, p + j, le);
266 if (j & 3) {
267 env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
268 } else {
269 env->vfp.pregs[i].p[j >> 2] = r;
270 }
271 }
272 }
273}
274
275static int target_restore_sigframe(CPUARMState *env,
276 struct target_rt_sigframe *sf)
277{
278 struct target_aarch64_ctx *ctx, *extra = NULL;
279 struct target_fpsimd_context *fpsimd = NULL;
280 struct target_sve_context *sve = NULL;
281 uint64_t extra_datap = 0;
282 bool used_extra = false;
283 bool err = false;
284 int vq = 0, sve_size = 0;
285
286 target_restore_general_frame(env, sf);
287
288 ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
289 while (ctx) {
290 uint32_t magic, size, extra_size;
291
292 __get_user(magic, &ctx->magic);
293 __get_user(size, &ctx->size);
294 switch (magic) {
295 case 0:
296 if (size != 0) {
297 err = true;
298 goto exit;
299 }
300 if (used_extra) {
301 ctx = NULL;
302 } else {
303 ctx = extra;
304 used_extra = true;
305 }
306 continue;
307
308 case TARGET_FPSIMD_MAGIC:
309 if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
310 err = true;
311 goto exit;
312 }
313 fpsimd = (struct target_fpsimd_context *)ctx;
314 break;
315
316 case TARGET_SVE_MAGIC:
317 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
318 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
319 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
320 if (!sve && size == sve_size) {
321 sve = (struct target_sve_context *)ctx;
322 break;
323 }
324 }
325 err = true;
326 goto exit;
327
328 case TARGET_EXTRA_MAGIC:
329 if (extra || size != sizeof(struct target_extra_context)) {
330 err = true;
331 goto exit;
332 }
333 __get_user(extra_datap,
334 &((struct target_extra_context *)ctx)->datap);
335 __get_user(extra_size,
336 &((struct target_extra_context *)ctx)->size);
337 extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
338 break;
339
340 default:
341
342
343
344 err = true;
345 goto exit;
346 }
347 ctx = (void *)ctx + size;
348 }
349
350
351 if (fpsimd) {
352 target_restore_fpsimd_record(env, fpsimd);
353 } else {
354 err = true;
355 }
356
357
358 if (sve) {
359 target_restore_sve_record(env, sve, vq);
360 }
361
362 exit:
363 unlock_user(extra, extra_datap, 0);
364 return err;
365}
366
367static abi_ulong get_sigframe(struct target_sigaction *ka,
368 CPUARMState *env, int size)
369{
370 abi_ulong sp;
371
372 sp = target_sigsp(get_sp_from_cpustate(env), ka);
373
374 sp = (sp - size) & ~15;
375
376 return sp;
377}
378
379typedef struct {
380 int total_size;
381 int extra_base;
382 int extra_size;
383 int std_end_ofs;
384 int extra_ofs;
385 int extra_end_ofs;
386} target_sigframe_layout;
387
388static int alloc_sigframe_space(int this_size, target_sigframe_layout *l)
389{
390
391 const int std_size = sizeof(struct target_rt_sigframe)
392 - sizeof(struct target_aarch64_ctx);
393 int this_loc = l->total_size;
394
395 if (l->extra_base) {
396
397 l->extra_size += this_size;
398 } else if (this_size + this_loc > std_size) {
399
400
401 l->extra_ofs = this_loc;
402 l->total_size += sizeof(struct target_extra_context);
403
404
405 l->std_end_ofs = l->total_size;
406 l->total_size += sizeof(struct target_aarch64_ctx);
407
408
409 l->extra_base = this_loc = l->total_size;
410 l->extra_size = this_size;
411 }
412 l->total_size += this_size;
413
414 return this_loc;
415}
416
417static void target_setup_frame(int usig, struct target_sigaction *ka,
418 target_siginfo_t *info, target_sigset_t *set,
419 CPUARMState *env)
420{
421 target_sigframe_layout layout = {
422
423 .total_size = offsetof(struct target_rt_sigframe,
424 uc.tuc_mcontext.__reserved),
425 };
426 int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
427 struct target_rt_sigframe *frame;
428 struct target_rt_frame_record *fr;
429 abi_ulong frame_addr, return_addr;
430
431
432 fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
433 &layout);
434
435
436 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
437 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
438 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
439 sve_ofs = alloc_sigframe_space(sve_size, &layout);
440 }
441
442 if (layout.extra_ofs) {
443
444
445
446 layout.extra_end_ofs
447 = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout);
448 } else {
449
450
451
452
453 layout.std_end_ofs = layout.total_size;
454 layout.total_size += sizeof(struct target_aarch64_ctx);
455 }
456
457
458
459
460 layout.total_size = MAX(layout.total_size,
461 sizeof(struct target_rt_sigframe));
462
463
464
465
466
467 fr_ofs = layout.total_size;
468 layout.total_size += sizeof(struct target_rt_frame_record);
469
470 frame_addr = get_sigframe(ka, env, layout.total_size);
471 trace_user_setup_frame(env, frame_addr);
472 frame = lock_user(VERIFY_WRITE, frame_addr, layout.total_size, 0);
473 if (!frame) {
474 goto give_sigsegv;
475 }
476
477 target_setup_general_frame(frame, env, set);
478 target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
479 target_setup_end_record((void *)frame + layout.std_end_ofs);
480 if (layout.extra_ofs) {
481 target_setup_extra_record((void *)frame + layout.extra_ofs,
482 frame_addr + layout.extra_base,
483 layout.extra_size);
484 target_setup_end_record((void *)frame + layout.extra_end_ofs);
485 }
486 if (sve_ofs) {
487 target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
488 }
489
490
491 fr = (void *)frame + fr_ofs;
492 __put_user(env->xregs[29], &fr->fp);
493 __put_user(env->xregs[30], &fr->lr);
494
495 if (ka->sa_flags & TARGET_SA_RESTORER) {
496 return_addr = ka->sa_restorer;
497 } else {
498
499
500
501
502
503 __put_user_e(0xd2801168, &fr->tramp[0], le);
504 __put_user_e(0xd4000001, &fr->tramp[1], le);
505 return_addr = frame_addr + fr_ofs
506 + offsetof(struct target_rt_frame_record, tramp);
507 }
508 env->xregs[0] = usig;
509 env->xregs[29] = frame_addr + fr_ofs;
510 env->xregs[30] = return_addr;
511 env->xregs[31] = frame_addr;
512 env->pc = ka->_sa_handler;
513
514
515 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
516 env->btype = 2;
517 }
518
519 if (info) {
520 tswap_siginfo(&frame->info, info);
521 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
522 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
523 }
524
525 unlock_user(frame, frame_addr, layout.total_size);
526 return;
527
528 give_sigsegv:
529 unlock_user(frame, frame_addr, layout.total_size);
530 force_sigsegv(usig);
531}
532
533void setup_rt_frame(int sig, struct target_sigaction *ka,
534 target_siginfo_t *info, target_sigset_t *set,
535 CPUARMState *env)
536{
537 target_setup_frame(sig, ka, info, set, env);
538}
539
540void setup_frame(int sig, struct target_sigaction *ka,
541 target_sigset_t *set, CPUARMState *env)
542{
543 target_setup_frame(sig, ka, 0, set, env);
544}
545
546long do_rt_sigreturn(CPUARMState *env)
547{
548 struct target_rt_sigframe *frame = NULL;
549 abi_ulong frame_addr = env->xregs[31];
550
551 trace_user_do_rt_sigreturn(env, frame_addr);
552 if (frame_addr & 15) {
553 goto badframe;
554 }
555
556 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
557 goto badframe;
558 }
559
560 if (target_restore_sigframe(env, frame)) {
561 goto badframe;
562 }
563
564 target_restore_altstack(&frame->uc.tuc_stack, env);
565
566 unlock_user_struct(frame, frame_addr, 0);
567 return -TARGET_QEMU_ESIGRETURN;
568
569 badframe:
570 unlock_user_struct(frame, frame_addr, 0);
571 force_sig(TARGET_SIGSEGV);
572 return -TARGET_QEMU_ESIGRETURN;
573}
574
575long do_sigreturn(CPUARMState *env)
576{
577 return do_rt_sigreturn(env);
578}
579