linux/arch/arm64/kernel/perf_callchain.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * arm64 callchain support
   4 *
   5 * Copyright (C) 2015 ARM Limited
   6 */
   7#include <linux/perf_event.h>
   8#include <linux/stacktrace.h>
   9#include <linux/uaccess.h>
  10
  11#include <asm/pointer_auth.h>
  12
  13struct frame_tail {
  14        struct frame_tail       __user *fp;
  15        unsigned long           lr;
  16} __attribute__((packed));
  17
  18/*
  19 * Get the return address for a single stackframe and return a pointer to the
  20 * next frame tail.
  21 */
  22static struct frame_tail __user *
  23user_backtrace(struct frame_tail __user *tail,
  24               struct perf_callchain_entry_ctx *entry)
  25{
  26        struct frame_tail buftail;
  27        unsigned long err;
  28        unsigned long lr;
  29
  30        /* Also check accessibility of one struct frame_tail beyond */
  31        if (!access_ok(tail, sizeof(buftail)))
  32                return NULL;
  33
  34        pagefault_disable();
  35        err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
  36        pagefault_enable();
  37
  38        if (err)
  39                return NULL;
  40
  41        lr = ptrauth_strip_insn_pac(buftail.lr);
  42
  43        perf_callchain_store(entry, lr);
  44
  45        /*
  46         * Frame pointers should strictly progress back up the stack
  47         * (towards higher addresses).
  48         */
  49        if (tail >= buftail.fp)
  50                return NULL;
  51
  52        return buftail.fp;
  53}
  54
  55#ifdef CONFIG_COMPAT
  56/*
  57 * The registers we're interested in are at the end of the variable
  58 * length saved register structure. The fp points at the end of this
  59 * structure so the address of this struct is:
  60 * (struct compat_frame_tail *)(xxx->fp)-1
  61 *
  62 * This code has been adapted from the ARM OProfile support.
  63 */
  64struct compat_frame_tail {
  65        compat_uptr_t   fp; /* a (struct compat_frame_tail *) in compat mode */
  66        u32             sp;
  67        u32             lr;
  68} __attribute__((packed));
  69
  70static struct compat_frame_tail __user *
  71compat_user_backtrace(struct compat_frame_tail __user *tail,
  72                      struct perf_callchain_entry_ctx *entry)
  73{
  74        struct compat_frame_tail buftail;
  75        unsigned long err;
  76
  77        /* Also check accessibility of one struct frame_tail beyond */
  78        if (!access_ok(tail, sizeof(buftail)))
  79                return NULL;
  80
  81        pagefault_disable();
  82        err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
  83        pagefault_enable();
  84
  85        if (err)
  86                return NULL;
  87
  88        perf_callchain_store(entry, buftail.lr);
  89
  90        /*
  91         * Frame pointers should strictly progress back up the stack
  92         * (towards higher addresses).
  93         */
  94        if (tail + 1 >= (struct compat_frame_tail __user *)
  95                        compat_ptr(buftail.fp))
  96                return NULL;
  97
  98        return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
  99}
 100#endif /* CONFIG_COMPAT */
 101
 102void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
 103                         struct pt_regs *regs)
 104{
 105        if (perf_guest_state()) {
 106                /* We don't support guest os callchain now */
 107                return;
 108        }
 109
 110        perf_callchain_store(entry, regs->pc);
 111
 112        if (!compat_user_mode(regs)) {
 113                /* AARCH64 mode */
 114                struct frame_tail __user *tail;
 115
 116                tail = (struct frame_tail __user *)regs->regs[29];
 117
 118                while (entry->nr < entry->max_stack &&
 119                       tail && !((unsigned long)tail & 0x7))
 120                        tail = user_backtrace(tail, entry);
 121        } else {
 122#ifdef CONFIG_COMPAT
 123                /* AARCH32 compat mode */
 124                struct compat_frame_tail __user *tail;
 125
 126                tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
 127
 128                while ((entry->nr < entry->max_stack) &&
 129                        tail && !((unsigned long)tail & 0x3))
 130                        tail = compat_user_backtrace(tail, entry);
 131#endif
 132        }
 133}
 134
 135static bool callchain_trace(void *data, unsigned long pc)
 136{
 137        struct perf_callchain_entry_ctx *entry = data;
 138        return perf_callchain_store(entry, pc) == 0;
 139}
 140
 141void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
 142                           struct pt_regs *regs)
 143{
 144        if (perf_guest_state()) {
 145                /* We don't support guest os callchain now */
 146                return;
 147        }
 148
 149        arch_stack_walk(callchain_trace, entry, current, regs);
 150}
 151
 152unsigned long perf_instruction_pointer(struct pt_regs *regs)
 153{
 154        if (perf_guest_state())
 155                return perf_guest_get_ip();
 156
 157        return instruction_pointer(regs);
 158}
 159
 160unsigned long perf_misc_flags(struct pt_regs *regs)
 161{
 162        unsigned int guest_state = perf_guest_state();
 163        int misc = 0;
 164
 165        if (guest_state) {
 166                if (guest_state & PERF_GUEST_USER)
 167                        misc |= PERF_RECORD_MISC_GUEST_USER;
 168                else
 169                        misc |= PERF_RECORD_MISC_GUEST_KERNEL;
 170        } else {
 171                if (user_mode(regs))
 172                        misc |= PERF_RECORD_MISC_USER;
 173                else
 174                        misc |= PERF_RECORD_MISC_KERNEL;
 175        }
 176
 177        return misc;
 178}
 179