linux/include/asm-generic/preempt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_PREEMPT_H
   3#define __ASM_PREEMPT_H
   4
   5#include <linux/thread_info.h>
   6
   7#define PREEMPT_ENABLED (0)
   8
   9static __always_inline int preempt_count(void)
  10{
  11        return READ_ONCE(current_thread_info()->preempt_count);
  12}
  13
  14static __always_inline volatile int *preempt_count_ptr(void)
  15{
  16        return &current_thread_info()->preempt_count;
  17}
  18
  19static __always_inline void preempt_count_set(int pc)
  20{
  21        *preempt_count_ptr() = pc;
  22}
  23
  24/*
  25 * must be macros to avoid header recursion hell
  26 */
  27#define init_task_preempt_count(p) do { \
  28        task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
  29} while (0)
  30
  31#define init_idle_preempt_count(p, cpu) do { \
  32        task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
  33} while (0)
  34
  35static __always_inline void set_preempt_need_resched(void)
  36{
  37}
  38
  39static __always_inline void clear_preempt_need_resched(void)
  40{
  41}
  42
  43static __always_inline bool test_preempt_need_resched(void)
  44{
  45        return false;
  46}
  47
  48/*
  49 * The various preempt_count add/sub methods
  50 */
  51
  52static __always_inline void __preempt_count_add(int val)
  53{
  54        *preempt_count_ptr() += val;
  55}
  56
  57static __always_inline void __preempt_count_sub(int val)
  58{
  59        *preempt_count_ptr() -= val;
  60}
  61
  62static __always_inline bool __preempt_count_dec_and_test(void)
  63{
  64        /*
  65         * Because of load-store architectures cannot do per-cpu atomic
  66         * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
  67         * lost.
  68         */
  69        return !--*preempt_count_ptr() && tif_need_resched();
  70}
  71
  72/*
  73 * Returns true when we need to resched and can (barring IRQ state).
  74 */
  75static __always_inline bool should_resched(int preempt_offset)
  76{
  77        return unlikely(preempt_count() == preempt_offset &&
  78                        tif_need_resched());
  79}
  80
  81#ifdef CONFIG_PREEMPTION
  82extern asmlinkage void preempt_schedule(void);
  83#define __preempt_schedule() preempt_schedule()
  84extern asmlinkage void preempt_schedule_notrace(void);
  85#define __preempt_schedule_notrace() preempt_schedule_notrace()
  86#endif /* CONFIG_PREEMPTION */
  87
  88#endif /* __ASM_PREEMPT_H */
  89