1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_PREEMPT_H 3#define __LINUX_PREEMPT_H 4 5/* 6 * include/linux/preempt.h - macros for accessing and manipulating 7 * preempt_count (used for kernel preemption, interrupt count, etc.) 8 */ 9 10#include <linux/linkage.h> 11#include <linux/list.h> 12 13/* 14 * We put the hardirq and softirq counter into the preemption 15 * counter. The bitmask has the following meaning: 16 * 17 * - bits 0-7 are the preemption count (max preemption depth: 256) 18 * - bits 8-15 are the softirq count (max # of softirqs: 256) 19 * 20 * The hardirq count could in theory be the same as the number of 21 * interrupts in the system, but we run all interrupt handlers with 22 * interrupts disabled, so we cannot have nesting interrupts. Though 23 * there are a few palaeontologic drivers which reenable interrupts in 24 * the handler, so we need more than one bit here. 25 * 26 * PREEMPT_MASK: 0x000000ff 27 * SOFTIRQ_MASK: 0x0000ff00 28 * HARDIRQ_MASK: 0x000f0000 29 * NMI_MASK: 0x00100000 30 * PREEMPT_NEED_RESCHED: 0x80000000 31 */ 32#define PREEMPT_BITS 8 33#define SOFTIRQ_BITS 8 34#define HARDIRQ_BITS 4 35#define NMI_BITS 1 36 37#define PREEMPT_SHIFT 0 38#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 39#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 40#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) 41 42#define __IRQ_MASK(x) ((1UL << (x))-1) 43 44#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 45#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 46#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 47#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) 48 49#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 50#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 51#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 52#define NMI_OFFSET (1UL << NMI_SHIFT) 53 54#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) 55 56#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) 57 58/* 59 * Disable preemption until the scheduler is running -- use an unconditional 60 * value so that it also works on !PREEMPT_COUNT kernels. 61 * 62 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). 63 */ 64#define INIT_PREEMPT_COUNT PREEMPT_OFFSET 65 66/* 67 * Initial preempt_count value; reflects the preempt_count schedule invariant 68 * which states that during context switches: 69 * 70 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET 71 * 72 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. 73 * Note: See finish_task_switch(). 74 */ 75#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) 76 77/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ 78#include <asm/preempt.h> 79 80#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 81#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 82#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ 83 | NMI_MASK)) 84 85/* 86 * Are we doing bottom half or hardware interrupt processing? 87 * 88 * in_irq() - We're in (hard) IRQ context 89 * in_softirq() - We have BH disabled, or are processing softirqs 90 * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled 91 * in_serving_softirq() - We're in softirq context 92 * in_nmi() - We're in NMI context 93 * in_task() - We're in task context 94 * 95 * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really 96 * should not be used in new code. 97 */ 98#define in_irq() (hardirq_count()) 99#define in_softirq() (softirq_count()) 100#define in_interrupt() (irq_count()) 101#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) 102#define in_nmi() (preempt_count() & NMI_MASK) 103#define in_task() (!(preempt_count() & \ 104 (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) 105 106/* 107 * The preempt_count offset after preempt_disable(); 108 */ 109#if defined(CONFIG_PREEMPT_COUNT) 110# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET 111#else 112# define PREEMPT_DISABLE_OFFSET 0 113#endif 114 115/* 116 * The preempt_count offset after spin_lock() 117 */ 118#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET 119 120/* 121 * The preempt_count offset needed for things like: 122 * 123 * spin_lock_bh() 124 * 125 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and 126 * softirqs, such that unlock sequences of: 127 * 128 * spin_unlock(); 129 * local_bh_enable(); 130 * 131 * Work as expected. 132 */ 133#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) 134 135/* 136 * Are we running in atomic context? WARNING: this macro cannot 137 * always detect atomic context; in particular, it cannot know about 138 * held spinlocks in non-preemptible kernels. Thus it should not be 139 * used in the general case to determine whether sleeping is possible. 140 * Do not use in_atomic() in driver code. 141 */ 142#define in_atomic() (preempt_count() != 0) 143 144/* 145 * Check whether we were atomic before we did preempt_disable(): 146 * (used by the scheduler) 147 */ 148#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) 149 150#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) 151extern void preempt_count_add(int val); 152extern void preempt_count_sub(int val); 153#define preempt_count_dec_and_test() \ 154 ({ preempt_count_sub(1); should_resched(0); }) 155#else 156#define preempt_count_add(val) __preempt_count_add(val) 157#define preempt_count_sub(val) __preempt_count_sub(val) 158#define preempt_count_dec_and_test() __preempt_count_dec_and_test() 159#endif 160 161#define __preempt_count_inc() __preempt_count_add(1) 162#define __preempt_count_dec() __preempt_count_sub(1) 163 164#define preempt_count_inc() preempt_count_add(1) 165#define preempt_count_dec() preempt_count_sub(1) 166 167#ifdef CONFIG_PREEMPT_COUNT 168 169#define preempt_disable() \ 170do { \ 171 preempt_count_inc(); \ 172 barrier(); \ 173} while (0) 174 175#define sched_preempt_enable_no_resched() \ 176do { \ 177 barrier(); \ 178 preempt_count_dec(); \ 179} while (0) 180 181#define preempt_enable_no_resched() sched_preempt_enable_no_resched() 182 183#define preemptible() (preempt_count() == 0 && !irqs_disabled()) 184 185#ifdef CONFIG_PREEMPT 186#define preempt_enable() \ 187do { \ 188 barrier(); \ 189 if (unlikely(preempt_count_dec_and_test())) \ 190 __preempt_schedule(); \ 191} while (0) 192 193#define preempt_enable_notrace() \ 194do { \ 195 barrier(); \ 196 if (unlikely(__preempt_count_dec_and_test())) \ 197 __preempt_schedule_notrace(); \ 198} while (0) 199 200#define preempt_check_resched() \ 201do { \ 202 if (should_resched(0)) \ 203 __preempt_schedule(); \ 204} while (0) 205 206#else /* !CONFIG_PREEMPT */ 207#define preempt_enable() \ 208do { \ 209 barrier(); \ 210 preempt_count_dec(); \ 211} while (0) 212 213#define preempt_enable_notrace() \ 214do { \ 215 barrier(); \ 216 __preempt_count_dec(); \ 217} while (0) 218 219#define preempt_check_resched() do { } while (0) 220#endif /* CONFIG_PREEMPT */ 221 222#define preempt_disable_notrace() \ 223do { \ 224 __preempt_count_inc(); \ 225 barrier(); \ 226} while (0) 227 228#define preempt_enable_no_resched_notrace() \ 229do { \ 230 barrier(); \ 231 __preempt_count_dec(); \ 232} while (0) 233 234#else /* !CONFIG_PREEMPT_COUNT */ 235 236/* 237 * Even if we don't have any preemption, we need preempt disable/enable 238 * to be barriers, so that we don't have things like get_user/put_user 239 * that can cause faults and scheduling migrate into our preempt-protected 240 * region. 241 */ 242#define preempt_disable() barrier() 243#define sched_preempt_enable_no_resched() barrier() 244#define preempt_enable_no_resched() barrier() 245#define preempt_enable() barrier() 246#define preempt_check_resched() do { } while (0) 247 248#define preempt_disable_notrace() barrier() 249#define preempt_enable_no_resched_notrace() barrier() 250#define preempt_enable_notrace() barrier() 251#define preemptible() 0 252 253#endif /* CONFIG_PREEMPT_COUNT */ 254 255#ifdef MODULE 256/* 257 * Modules have no business playing preemption tricks. 258 */ 259#undef sched_preempt_enable_no_resched 260#undef preempt_enable_no_resched 261#undef preempt_enable_no_resched_notrace 262#undef preempt_check_resched 263#endif 264 265#define preempt_set_need_resched() \ 266do { \ 267 set_preempt_need_resched(); \ 268} while (0) 269#define preempt_fold_need_resched() \ 270do { \ 271 if (tif_need_resched()) \ 272 set_preempt_need_resched(); \ 273} while (0) 274 275#ifdef CONFIG_PREEMPT_NOTIFIERS 276 277struct preempt_notifier; 278 279/** 280 * preempt_ops - notifiers called when a task is preempted and rescheduled 281 * @sched_in: we're about to be rescheduled: 282 * notifier: struct preempt_notifier for the task being scheduled 283 * cpu: cpu we're scheduled on 284 * @sched_out: we've just been preempted 285 * notifier: struct preempt_notifier for the task being preempted 286 * next: the task that's kicking us out 287 * 288 * Please note that sched_in and out are called under different 289 * contexts. sched_out is called with rq lock held and irq disabled 290 * while sched_in is called without rq lock and irq enabled. This 291 * difference is intentional and depended upon by its users. 292 */ 293struct preempt_ops { 294 void (*sched_in)(struct preempt_notifier *notifier, int cpu); 295 void (*sched_out)(struct preempt_notifier *notifier, 296 struct task_struct *next); 297}; 298 299/** 300 * preempt_notifier - key for installing preemption notifiers 301 * @link: internal use 302 * @ops: defines the notifier functions to be called 303 * 304 * Usually used in conjunction with container_of(). 305 */ 306struct preempt_notifier { 307 struct hlist_node link; 308 struct preempt_ops *ops; 309}; 310 311void preempt_notifier_inc(void); 312void preempt_notifier_dec(void); 313void preempt_notifier_register(struct preempt_notifier *notifier); 314void preempt_notifier_unregister(struct preempt_notifier *notifier); 315 316static inline void preempt_notifier_init(struct preempt_notifier *notifier, 317 struct preempt_ops *ops) 318{ 319 INIT_HLIST_NODE(¬ifier->link); 320 notifier->ops = ops; 321} 322 323#endif 324 325#endif /* __LINUX_PREEMPT_H */ 326