linux/include/linux/preempt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_PREEMPT_H
   3#define __LINUX_PREEMPT_H
   4
   5/*
   6 * include/linux/preempt.h - macros for accessing and manipulating
   7 * preempt_count (used for kernel preemption, interrupt count, etc.)
   8 */
   9
  10#include <linux/linkage.h>
  11#include <linux/list.h>
  12
  13/*
  14 * We put the hardirq and softirq counter into the preemption
  15 * counter. The bitmask has the following meaning:
  16 *
  17 * - bits 0-7 are the preemption count (max preemption depth: 256)
  18 * - bits 8-15 are the softirq count (max # of softirqs: 256)
  19 *
  20 * The hardirq count could in theory be the same as the number of
  21 * interrupts in the system, but we run all interrupt handlers with
  22 * interrupts disabled, so we cannot have nesting interrupts. Though
  23 * there are a few palaeontologic drivers which reenable interrupts in
  24 * the handler, so we need more than one bit here.
  25 *
  26 *         PREEMPT_MASK:        0x000000ff
  27 *         SOFTIRQ_MASK:        0x0000ff00
  28 *         HARDIRQ_MASK:        0x000f0000
  29 *             NMI_MASK:        0x00f00000
  30 * PREEMPT_NEED_RESCHED:        0x80000000
  31 */
  32#define PREEMPT_BITS    8
  33#define SOFTIRQ_BITS    8
  34#define HARDIRQ_BITS    4
  35#define NMI_BITS        4
  36
  37#define PREEMPT_SHIFT   0
  38#define SOFTIRQ_SHIFT   (PREEMPT_SHIFT + PREEMPT_BITS)
  39#define HARDIRQ_SHIFT   (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
  40#define NMI_SHIFT       (HARDIRQ_SHIFT + HARDIRQ_BITS)
  41
  42#define __IRQ_MASK(x)   ((1UL << (x))-1)
  43
  44#define PREEMPT_MASK    (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
  45#define SOFTIRQ_MASK    (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
  46#define HARDIRQ_MASK    (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
  47#define NMI_MASK        (__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
  48
  49#define PREEMPT_OFFSET  (1UL << PREEMPT_SHIFT)
  50#define SOFTIRQ_OFFSET  (1UL << SOFTIRQ_SHIFT)
  51#define HARDIRQ_OFFSET  (1UL << HARDIRQ_SHIFT)
  52#define NMI_OFFSET      (1UL << NMI_SHIFT)
  53
  54#define SOFTIRQ_DISABLE_OFFSET  (2 * SOFTIRQ_OFFSET)
  55
  56#define PREEMPT_DISABLED        (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
  57
  58/*
  59 * Disable preemption until the scheduler is running -- use an unconditional
  60 * value so that it also works on !PREEMPT_COUNT kernels.
  61 *
  62 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
  63 */
  64#define INIT_PREEMPT_COUNT      PREEMPT_OFFSET
  65
  66/*
  67 * Initial preempt_count value; reflects the preempt_count schedule invariant
  68 * which states that during context switches:
  69 *
  70 *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
  71 *
  72 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
  73 * Note: See finish_task_switch().
  74 */
  75#define FORK_PREEMPT_COUNT      (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
  76
  77/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
  78#include <asm/preempt.h>
  79
  80#define nmi_count()     (preempt_count() & NMI_MASK)
  81#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
  82#ifdef CONFIG_PREEMPT_RT
  83# define softirq_count()        (current->softirq_disable_cnt & SOFTIRQ_MASK)
  84#else
  85# define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
  86#endif
  87#define irq_count()     (nmi_count() | hardirq_count() | softirq_count())
  88
  89/*
  90 * Macros to retrieve the current execution context:
  91 *
  92 * in_nmi()             - We're in NMI context
  93 * in_hardirq()         - We're in hard IRQ context
  94 * in_serving_softirq() - We're in softirq context
  95 * in_task()            - We're in task context
  96 */
  97#define in_nmi()                (nmi_count())
  98#define in_hardirq()            (hardirq_count())
  99#define in_serving_softirq()    (softirq_count() & SOFTIRQ_OFFSET)
 100#define in_task()               (!(in_nmi() | in_hardirq() | in_serving_softirq()))
 101
 102/*
 103 * The following macros are deprecated and should not be used in new code:
 104 * in_irq()       - Obsolete version of in_hardirq()
 105 * in_softirq()   - We have BH disabled, or are processing softirqs
 106 * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
 107 */
 108#define in_irq()                (hardirq_count())
 109#define in_softirq()            (softirq_count())
 110#define in_interrupt()          (irq_count())
 111
 112/*
 113 * The preempt_count offset after preempt_disable();
 114 */
 115#if defined(CONFIG_PREEMPT_COUNT)
 116# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
 117#else
 118# define PREEMPT_DISABLE_OFFSET 0
 119#endif
 120
 121/*
 122 * The preempt_count offset after spin_lock()
 123 */
 124#if !defined(CONFIG_PREEMPT_RT)
 125#define PREEMPT_LOCK_OFFSET     PREEMPT_DISABLE_OFFSET
 126#else
 127#define PREEMPT_LOCK_OFFSET     0
 128#endif
 129
 130/*
 131 * The preempt_count offset needed for things like:
 132 *
 133 *  spin_lock_bh()
 134 *
 135 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
 136 * softirqs, such that unlock sequences of:
 137 *
 138 *  spin_unlock();
 139 *  local_bh_enable();
 140 *
 141 * Work as expected.
 142 */
 143#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
 144
 145/*
 146 * Are we running in atomic context?  WARNING: this macro cannot
 147 * always detect atomic context; in particular, it cannot know about
 148 * held spinlocks in non-preemptible kernels.  Thus it should not be
 149 * used in the general case to determine whether sleeping is possible.
 150 * Do not use in_atomic() in driver code.
 151 */
 152#define in_atomic()     (preempt_count() != 0)
 153
 154/*
 155 * Check whether we were atomic before we did preempt_disable():
 156 * (used by the scheduler)
 157 */
 158#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
 159
 160#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
 161extern void preempt_count_add(int val);
 162extern void preempt_count_sub(int val);
 163#define preempt_count_dec_and_test() \
 164        ({ preempt_count_sub(1); should_resched(0); })
 165#else
 166#define preempt_count_add(val)  __preempt_count_add(val)
 167#define preempt_count_sub(val)  __preempt_count_sub(val)
 168#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
 169#endif
 170
 171#define __preempt_count_inc() __preempt_count_add(1)
 172#define __preempt_count_dec() __preempt_count_sub(1)
 173
 174#define preempt_count_inc() preempt_count_add(1)
 175#define preempt_count_dec() preempt_count_sub(1)
 176
 177#ifdef CONFIG_PREEMPT_COUNT
 178
 179#define preempt_disable() \
 180do { \
 181        preempt_count_inc(); \
 182        barrier(); \
 183} while (0)
 184
 185#define sched_preempt_enable_no_resched() \
 186do { \
 187        barrier(); \
 188        preempt_count_dec(); \
 189} while (0)
 190
 191#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 192
 193#define preemptible()   (preempt_count() == 0 && !irqs_disabled())
 194
 195#ifdef CONFIG_PREEMPTION
 196#define preempt_enable() \
 197do { \
 198        barrier(); \
 199        if (unlikely(preempt_count_dec_and_test())) \
 200                __preempt_schedule(); \
 201} while (0)
 202
 203#define preempt_enable_notrace() \
 204do { \
 205        barrier(); \
 206        if (unlikely(__preempt_count_dec_and_test())) \
 207                __preempt_schedule_notrace(); \
 208} while (0)
 209
 210#define preempt_check_resched() \
 211do { \
 212        if (should_resched(0)) \
 213                __preempt_schedule(); \
 214} while (0)
 215
 216#else /* !CONFIG_PREEMPTION */
 217#define preempt_enable() \
 218do { \
 219        barrier(); \
 220        preempt_count_dec(); \
 221} while (0)
 222
 223#define preempt_enable_notrace() \
 224do { \
 225        barrier(); \
 226        __preempt_count_dec(); \
 227} while (0)
 228
 229#define preempt_check_resched() do { } while (0)
 230#endif /* CONFIG_PREEMPTION */
 231
 232#define preempt_disable_notrace() \
 233do { \
 234        __preempt_count_inc(); \
 235        barrier(); \
 236} while (0)
 237
 238#define preempt_enable_no_resched_notrace() \
 239do { \
 240        barrier(); \
 241        __preempt_count_dec(); \
 242} while (0)
 243
 244#else /* !CONFIG_PREEMPT_COUNT */
 245
 246/*
 247 * Even if we don't have any preemption, we need preempt disable/enable
 248 * to be barriers, so that we don't have things like get_user/put_user
 249 * that can cause faults and scheduling migrate into our preempt-protected
 250 * region.
 251 */
 252#define preempt_disable()                       barrier()
 253#define sched_preempt_enable_no_resched()       barrier()
 254#define preempt_enable_no_resched()             barrier()
 255#define preempt_enable()                        barrier()
 256#define preempt_check_resched()                 do { } while (0)
 257
 258#define preempt_disable_notrace()               barrier()
 259#define preempt_enable_no_resched_notrace()     barrier()
 260#define preempt_enable_notrace()                barrier()
 261#define preemptible()                           0
 262
 263#endif /* CONFIG_PREEMPT_COUNT */
 264
 265#ifdef MODULE
 266/*
 267 * Modules have no business playing preemption tricks.
 268 */
 269#undef sched_preempt_enable_no_resched
 270#undef preempt_enable_no_resched
 271#undef preempt_enable_no_resched_notrace
 272#undef preempt_check_resched
 273#endif
 274
 275#define preempt_set_need_resched() \
 276do { \
 277        set_preempt_need_resched(); \
 278} while (0)
 279#define preempt_fold_need_resched() \
 280do { \
 281        if (tif_need_resched()) \
 282                set_preempt_need_resched(); \
 283} while (0)
 284
 285#ifdef CONFIG_PREEMPT_NOTIFIERS
 286
 287struct preempt_notifier;
 288
 289/**
 290 * preempt_ops - notifiers called when a task is preempted and rescheduled
 291 * @sched_in: we're about to be rescheduled:
 292 *    notifier: struct preempt_notifier for the task being scheduled
 293 *    cpu:  cpu we're scheduled on
 294 * @sched_out: we've just been preempted
 295 *    notifier: struct preempt_notifier for the task being preempted
 296 *    next: the task that's kicking us out
 297 *
 298 * Please note that sched_in and out are called under different
 299 * contexts.  sched_out is called with rq lock held and irq disabled
 300 * while sched_in is called without rq lock and irq enabled.  This
 301 * difference is intentional and depended upon by its users.
 302 */
 303struct preempt_ops {
 304        void (*sched_in)(struct preempt_notifier *notifier, int cpu);
 305        void (*sched_out)(struct preempt_notifier *notifier,
 306                          struct task_struct *next);
 307};
 308
 309/**
 310 * preempt_notifier - key for installing preemption notifiers
 311 * @link: internal use
 312 * @ops: defines the notifier functions to be called
 313 *
 314 * Usually used in conjunction with container_of().
 315 */
 316struct preempt_notifier {
 317        struct hlist_node link;
 318        struct preempt_ops *ops;
 319};
 320
 321void preempt_notifier_inc(void);
 322void preempt_notifier_dec(void);
 323void preempt_notifier_register(struct preempt_notifier *notifier);
 324void preempt_notifier_unregister(struct preempt_notifier *notifier);
 325
 326static inline void preempt_notifier_init(struct preempt_notifier *notifier,
 327                                     struct preempt_ops *ops)
 328{
 329        INIT_HLIST_NODE(&notifier->link);
 330        notifier->ops = ops;
 331}
 332
 333#endif
 334
 335#ifdef CONFIG_SMP
 336
 337/*
 338 * Migrate-Disable and why it is undesired.
 339 *
 340 * When a preempted task becomes elegible to run under the ideal model (IOW it
 341 * becomes one of the M highest priority tasks), it might still have to wait
 342 * for the preemptee's migrate_disable() section to complete. Thereby suffering
 343 * a reduction in bandwidth in the exact duration of the migrate_disable()
 344 * section.
 345 *
 346 * Per this argument, the change from preempt_disable() to migrate_disable()
 347 * gets us:
 348 *
 349 * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
 350 *   it would have had to wait for the lower priority task.
 351 *
 352 * - a lower priority tasks; which under preempt_disable() could've instantly
 353 *   migrated away when another CPU becomes available, is now constrained
 354 *   by the ability to push the higher priority task away, which might itself be
 355 *   in a migrate_disable() section, reducing it's available bandwidth.
 356 *
 357 * IOW it trades latency / moves the interference term, but it stays in the
 358 * system, and as long as it remains unbounded, the system is not fully
 359 * deterministic.
 360 *
 361 *
 362 * The reason we have it anyway.
 363 *
 364 * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
 365 * number of primitives into becoming preemptible, they would also allow
 366 * migration. This turns out to break a bunch of per-cpu usage. To this end,
 367 * all these primitives employ migirate_disable() to restore this implicit
 368 * assumption.
 369 *
 370 * This is a 'temporary' work-around at best. The correct solution is getting
 371 * rid of the above assumptions and reworking the code to employ explicit
 372 * per-cpu locking or short preempt-disable regions.
 373 *
 374 * The end goal must be to get rid of migrate_disable(), alternatively we need
 375 * a schedulability theory that does not depend on abritrary migration.
 376 *
 377 *
 378 * Notes on the implementation.
 379 *
 380 * The implementation is particularly tricky since existing code patterns
 381 * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
 382 * This means that it cannot use cpus_read_lock() to serialize against hotplug,
 383 * nor can it easily migrate itself into a pending affinity mask change on
 384 * migrate_enable().
 385 *
 386 *
 387 * Note: even non-work-conserving schedulers like semi-partitioned depends on
 388 *       migration, so migrate_disable() is not only a problem for
 389 *       work-conserving schedulers.
 390 *
 391 */
 392extern void migrate_disable(void);
 393extern void migrate_enable(void);
 394
 395#else
 396
 397static inline void migrate_disable(void) { }
 398static inline void migrate_enable(void) { }
 399
 400#endif /* CONFIG_SMP */
 401
 402#endif /* __LINUX_PREEMPT_H */
 403