linux/arch/arm64/include/asm/preempt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_PREEMPT_H
   3#define __ASM_PREEMPT_H
   4
   5#include <linux/thread_info.h>
   6
   7#define PREEMPT_NEED_RESCHED    BIT(32)
   8#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
   9
  10static inline int preempt_count(void)
  11{
  12        return READ_ONCE(current_thread_info()->preempt.count);
  13}
  14
  15static inline void preempt_count_set(u64 pc)
  16{
  17        /* Preserve existing value of PREEMPT_NEED_RESCHED */
  18        WRITE_ONCE(current_thread_info()->preempt.count, pc);
  19}
  20
  21#define init_task_preempt_count(p) do { \
  22        task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
  23} while (0)
  24
  25#define init_idle_preempt_count(p, cpu) do { \
  26        task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
  27} while (0)
  28
  29static inline void set_preempt_need_resched(void)
  30{
  31        current_thread_info()->preempt.need_resched = 0;
  32}
  33
  34static inline void clear_preempt_need_resched(void)
  35{
  36        current_thread_info()->preempt.need_resched = 1;
  37}
  38
  39static inline bool test_preempt_need_resched(void)
  40{
  41        return !current_thread_info()->preempt.need_resched;
  42}
  43
  44static inline void __preempt_count_add(int val)
  45{
  46        u32 pc = READ_ONCE(current_thread_info()->preempt.count);
  47        pc += val;
  48        WRITE_ONCE(current_thread_info()->preempt.count, pc);
  49}
  50
  51static inline void __preempt_count_sub(int val)
  52{
  53        u32 pc = READ_ONCE(current_thread_info()->preempt.count);
  54        pc -= val;
  55        WRITE_ONCE(current_thread_info()->preempt.count, pc);
  56}
  57
  58static inline bool __preempt_count_dec_and_test(void)
  59{
  60        struct thread_info *ti = current_thread_info();
  61        u64 pc = READ_ONCE(ti->preempt_count);
  62
  63        /* Update only the count field, leaving need_resched unchanged */
  64        WRITE_ONCE(ti->preempt.count, --pc);
  65
  66        /*
  67         * If we wrote back all zeroes, then we're preemptible and in
  68         * need of a reschedule. Otherwise, we need to reload the
  69         * preempt_count in case the need_resched flag was cleared by an
  70         * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
  71         * pair.
  72         */
  73        return !pc || !READ_ONCE(ti->preempt_count);
  74}
  75
  76static inline bool should_resched(int preempt_offset)
  77{
  78        u64 pc = READ_ONCE(current_thread_info()->preempt_count);
  79        return pc == preempt_offset;
  80}
  81
  82#ifdef CONFIG_PREEMPTION
  83void preempt_schedule(void);
  84#define __preempt_schedule() preempt_schedule()
  85void preempt_schedule_notrace(void);
  86#define __preempt_schedule_notrace() preempt_schedule_notrace()
  87#endif /* CONFIG_PREEMPTION */
  88
  89#endif /* __ASM_PREEMPT_H */
  90