1#ifndef _LINUX_CONTEXT_TRACKING_H 2#define _LINUX_CONTEXT_TRACKING_H 3 4#include <linux/sched.h> 5#include <linux/vtime.h> 6#include <linux/context_tracking_state.h> 7#include <asm/ptrace.h> 8 9 10#ifdef CONFIG_CONTEXT_TRACKING 11extern void context_tracking_cpu_set(int cpu); 12 13/* Called with interrupts disabled. */ 14extern void __context_tracking_enter(enum ctx_state state); 15extern void __context_tracking_exit(enum ctx_state state); 16 17extern void context_tracking_enter(enum ctx_state state); 18extern void context_tracking_exit(enum ctx_state state); 19extern void context_tracking_user_enter(void); 20extern void context_tracking_user_exit(void); 21 22static inline void user_enter(void) 23{ 24 if (context_tracking_is_enabled()) 25 context_tracking_enter(CONTEXT_USER); 26 27} 28static inline void user_exit(void) 29{ 30 if (context_tracking_is_enabled()) 31 context_tracking_exit(CONTEXT_USER); 32} 33 34/* Called with interrupts disabled. */ 35static inline void user_enter_irqoff(void) 36{ 37 if (context_tracking_is_enabled()) 38 __context_tracking_enter(CONTEXT_USER); 39 40} 41static inline void user_exit_irqoff(void) 42{ 43 if (context_tracking_is_enabled()) 44 __context_tracking_exit(CONTEXT_USER); 45} 46 47static inline enum ctx_state exception_enter(void) 48{ 49 enum ctx_state prev_ctx; 50 51 if (!context_tracking_is_enabled()) 52 return 0; 53 54 prev_ctx = this_cpu_read(context_tracking.state); 55 if (prev_ctx != CONTEXT_KERNEL) 56 context_tracking_exit(prev_ctx); 57 58 return prev_ctx; 59} 60 61static inline void exception_exit(enum ctx_state prev_ctx) 62{ 63 if (context_tracking_is_enabled()) { 64 if (prev_ctx != CONTEXT_KERNEL) 65 context_tracking_enter(prev_ctx); 66 } 67} 68 69 70/** 71 * ct_state() - return the current context tracking state if known 72 * 73 * Returns the current cpu's context tracking state if context tracking 74 * is enabled. If context tracking is disabled, returns 75 * CONTEXT_DISABLED. This should be used primarily for debugging. 76 */ 77static inline enum ctx_state ct_state(void) 78{ 79 return context_tracking_is_enabled() ? 80 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; 81} 82#else 83static inline void user_enter(void) { } 84static inline void user_exit(void) { } 85static inline void user_enter_irqoff(void) { } 86static inline void user_exit_irqoff(void) { } 87static inline enum ctx_state exception_enter(void) { return 0; } 88static inline void exception_exit(enum ctx_state prev_ctx) { } 89static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } 90#endif /* !CONFIG_CONTEXT_TRACKING */ 91 92#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond)) 93 94#ifdef CONFIG_CONTEXT_TRACKING_FORCE 95extern void context_tracking_init(void); 96#else 97static inline void context_tracking_init(void) { } 98#endif /* CONFIG_CONTEXT_TRACKING_FORCE */ 99 100 101#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 102/* must be called with irqs disabled */ 103static inline void guest_enter_irqoff(void) 104{ 105 if (vtime_accounting_cpu_enabled()) 106 vtime_guest_enter(current); 107 else 108 current->flags |= PF_VCPU; 109 110 if (context_tracking_is_enabled()) 111 __context_tracking_enter(CONTEXT_GUEST); 112 113 /* KVM does not hold any references to rcu protected data when it 114 * switches CPU into a guest mode. In fact switching to a guest mode 115 * is very similar to exiting to userspace from rcu point of view. In 116 * addition CPU may stay in a guest mode for quite a long time (up to 117 * one time slice). Lets treat guest mode as quiescent state, just like 118 * we do with user-mode execution. 119 */ 120 if (!context_tracking_cpu_is_enabled()) 121 rcu_virt_note_context_switch(smp_processor_id()); 122} 123 124static inline void guest_exit_irqoff(void) 125{ 126 if (context_tracking_is_enabled()) 127 __context_tracking_exit(CONTEXT_GUEST); 128 129 if (vtime_accounting_cpu_enabled()) 130 vtime_guest_exit(current); 131 else 132 current->flags &= ~PF_VCPU; 133} 134 135#else 136static inline void guest_enter_irqoff(void) 137{ 138 /* 139 * This is running in ioctl context so its safe 140 * to assume that it's the stime pending cputime 141 * to flush. 142 */ 143 vtime_account_system(current); 144 current->flags |= PF_VCPU; 145 rcu_virt_note_context_switch(smp_processor_id()); 146} 147 148static inline void guest_exit_irqoff(void) 149{ 150 /* Flush the guest cputime we spent on the guest */ 151 vtime_account_system(current); 152 current->flags &= ~PF_VCPU; 153} 154#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 155 156static inline void guest_enter(void) 157{ 158 unsigned long flags; 159 160 local_irq_save(flags); 161 guest_enter_irqoff(); 162 local_irq_restore(flags); 163} 164 165static inline void guest_exit(void) 166{ 167 unsigned long flags; 168 169 local_irq_save(flags); 170 guest_exit_irqoff(); 171 local_irq_restore(flags); 172} 173 174#endif 175