linux/arch/arm64/include/asm/preempt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_PREEMPT_H
   3#define __ASM_PREEMPT_H
   4
   5#include <linux/jump_label.h>
   6#include <linux/thread_info.h>
   7
   8#define PREEMPT_NEED_RESCHED    BIT(32)
   9#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
  10
  11static inline int preempt_count(void)
  12{
  13        return READ_ONCE(current_thread_info()->preempt.count);
  14}
  15
  16static inline void preempt_count_set(u64 pc)
  17{
  18        /* Preserve existing value of PREEMPT_NEED_RESCHED */
  19        WRITE_ONCE(current_thread_info()->preempt.count, pc);
  20}
  21
  22#define init_task_preempt_count(p) do { \
  23        task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
  24} while (0)
  25
  26#define init_idle_preempt_count(p, cpu) do { \
  27        task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
  28} while (0)
  29
  30static inline void set_preempt_need_resched(void)
  31{
  32        current_thread_info()->preempt.need_resched = 0;
  33}
  34
  35static inline void clear_preempt_need_resched(void)
  36{
  37        current_thread_info()->preempt.need_resched = 1;
  38}
  39
  40static inline bool test_preempt_need_resched(void)
  41{
  42        return !current_thread_info()->preempt.need_resched;
  43}
  44
  45static inline void __preempt_count_add(int val)
  46{
  47        u32 pc = READ_ONCE(current_thread_info()->preempt.count);
  48        pc += val;
  49        WRITE_ONCE(current_thread_info()->preempt.count, pc);
  50}
  51
  52static inline void __preempt_count_sub(int val)
  53{
  54        u32 pc = READ_ONCE(current_thread_info()->preempt.count);
  55        pc -= val;
  56        WRITE_ONCE(current_thread_info()->preempt.count, pc);
  57}
  58
  59static inline bool __preempt_count_dec_and_test(void)
  60{
  61        struct thread_info *ti = current_thread_info();
  62        u64 pc = READ_ONCE(ti->preempt_count);
  63
  64        /* Update only the count field, leaving need_resched unchanged */
  65        WRITE_ONCE(ti->preempt.count, --pc);
  66
  67        /*
  68         * If we wrote back all zeroes, then we're preemptible and in
  69         * need of a reschedule. Otherwise, we need to reload the
  70         * preempt_count in case the need_resched flag was cleared by an
  71         * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
  72         * pair.
  73         */
  74        return !pc || !READ_ONCE(ti->preempt_count);
  75}
  76
  77static inline bool should_resched(int preempt_offset)
  78{
  79        u64 pc = READ_ONCE(current_thread_info()->preempt_count);
  80        return pc == preempt_offset;
  81}
  82
  83#ifdef CONFIG_PREEMPTION
  84
  85void preempt_schedule(void);
  86void preempt_schedule_notrace(void);
  87
  88#ifdef CONFIG_PREEMPT_DYNAMIC
  89
  90DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
  91void dynamic_preempt_schedule(void);
  92#define __preempt_schedule()            dynamic_preempt_schedule()
  93void dynamic_preempt_schedule_notrace(void);
  94#define __preempt_schedule_notrace()    dynamic_preempt_schedule_notrace()
  95
  96#else /* CONFIG_PREEMPT_DYNAMIC */
  97
  98#define __preempt_schedule()            preempt_schedule()
  99#define __preempt_schedule_notrace()    preempt_schedule_notrace()
 100
 101#endif /* CONFIG_PREEMPT_DYNAMIC */
 102#endif /* CONFIG_PREEMPTION */
 103
 104#endif /* __ASM_PREEMPT_H */
 105