1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_CONTEXT_TRACKING_H 3#define _LINUX_CONTEXT_TRACKING_H 4 5#include <linux/sched.h> 6#include <linux/vtime.h> 7#include <linux/context_tracking_state.h> 8#include <linux/instrumentation.h> 9 10#include <asm/ptrace.h> 11 12 13#ifdef CONFIG_CONTEXT_TRACKING 14extern void context_tracking_cpu_set(int cpu); 15 16/* Called with interrupts disabled. */ 17extern void __context_tracking_enter(enum ctx_state state); 18extern void __context_tracking_exit(enum ctx_state state); 19 20extern void context_tracking_enter(enum ctx_state state); 21extern void context_tracking_exit(enum ctx_state state); 22extern void context_tracking_user_enter(void); 23extern void context_tracking_user_exit(void); 24 25static inline void user_enter(void) 26{ 27 if (context_tracking_enabled()) 28 context_tracking_enter(CONTEXT_USER); 29 30} 31static inline void user_exit(void) 32{ 33 if (context_tracking_enabled()) 34 context_tracking_exit(CONTEXT_USER); 35} 36 37/* Called with interrupts disabled. */ 38static __always_inline void user_enter_irqoff(void) 39{ 40 if (context_tracking_enabled()) 41 __context_tracking_enter(CONTEXT_USER); 42 43} 44static __always_inline void user_exit_irqoff(void) 45{ 46 if (context_tracking_enabled()) 47 __context_tracking_exit(CONTEXT_USER); 48} 49 50static inline enum ctx_state exception_enter(void) 51{ 52 enum ctx_state prev_ctx; 53 54 if (!context_tracking_enabled()) 55 return 0; 56 57 prev_ctx = this_cpu_read(context_tracking.state); 58 if (prev_ctx != CONTEXT_KERNEL) 59 context_tracking_exit(prev_ctx); 60 61 return prev_ctx; 62} 63 64static inline void exception_exit(enum ctx_state prev_ctx) 65{ 66 if (context_tracking_enabled()) { 67 if (prev_ctx != CONTEXT_KERNEL) 68 context_tracking_enter(prev_ctx); 69 } 70} 71 72 73/** 74 * ct_state() - return the current context tracking state if known 75 * 76 * Returns the current cpu's context tracking state if context tracking 77 * is enabled. If context tracking is disabled, returns 78 * CONTEXT_DISABLED. This should be used primarily for debugging. 79 */ 80static __always_inline enum ctx_state ct_state(void) 81{ 82 return context_tracking_enabled() ? 83 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; 84} 85#else 86static inline void user_enter(void) { } 87static inline void user_exit(void) { } 88static inline void user_enter_irqoff(void) { } 89static inline void user_exit_irqoff(void) { } 90static inline enum ctx_state exception_enter(void) { return 0; } 91static inline void exception_exit(enum ctx_state prev_ctx) { } 92static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } 93#endif /* !CONFIG_CONTEXT_TRACKING */ 94 95#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) 96 97#ifdef CONFIG_CONTEXT_TRACKING_FORCE 98extern void context_tracking_init(void); 99#else 100static inline void context_tracking_init(void) { } 101#endif /* CONFIG_CONTEXT_TRACKING_FORCE */ 102 103 104#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 105/* must be called with irqs disabled */ 106static __always_inline void guest_enter_irqoff(void) 107{ 108 instrumentation_begin(); 109 if (vtime_accounting_enabled_this_cpu()) 110 vtime_guest_enter(current); 111 else 112 current->flags |= PF_VCPU; 113 instrumentation_end(); 114 115 if (context_tracking_enabled()) 116 __context_tracking_enter(CONTEXT_GUEST); 117 118 /* KVM does not hold any references to rcu protected data when it 119 * switches CPU into a guest mode. In fact switching to a guest mode 120 * is very similar to exiting to userspace from rcu point of view. In 121 * addition CPU may stay in a guest mode for quite a long time (up to 122 * one time slice). Lets treat guest mode as quiescent state, just like 123 * we do with user-mode execution. 124 */ 125 if (!context_tracking_enabled_this_cpu()) { 126 instrumentation_begin(); 127 rcu_virt_note_context_switch(smp_processor_id()); 128 instrumentation_end(); 129 } 130} 131 132static __always_inline void guest_exit_irqoff(void) 133{ 134 if (context_tracking_enabled()) 135 __context_tracking_exit(CONTEXT_GUEST); 136 137 instrumentation_begin(); 138 if (vtime_accounting_enabled_this_cpu()) 139 vtime_guest_exit(current); 140 else 141 current->flags &= ~PF_VCPU; 142 instrumentation_end(); 143} 144 145#else 146static __always_inline void guest_enter_irqoff(void) 147{ 148 /* 149 * This is running in ioctl context so its safe 150 * to assume that it's the stime pending cputime 151 * to flush. 152 */ 153 instrumentation_begin(); 154 vtime_account_kernel(current); 155 current->flags |= PF_VCPU; 156 rcu_virt_note_context_switch(smp_processor_id()); 157 instrumentation_end(); 158} 159 160static __always_inline void guest_exit_irqoff(void) 161{ 162 instrumentation_begin(); 163 /* Flush the guest cputime we spent on the guest */ 164 vtime_account_kernel(current); 165 current->flags &= ~PF_VCPU; 166 instrumentation_end(); 167} 168#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 169 170static inline void guest_exit(void) 171{ 172 unsigned long flags; 173 174 local_irq_save(flags); 175 guest_exit_irqoff(); 176 local_irq_restore(flags); 177} 178 179#endif 180