linux/arch/s390/include/asm/preempt.h
<<
>>
Prefs
   1#ifndef __ASM_PREEMPT_H
   2#define __ASM_PREEMPT_H
   3
   4#include <asm/current.h>
   5#include <linux/thread_info.h>
   6#include <asm/atomic_ops.h>
   7
   8#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
   9
  10#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
  11
  12static inline int preempt_count(void)
  13{
  14        return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
  15}
  16
  17static inline void preempt_count_set(int pc)
  18{
  19        int old, new;
  20
  21        do {
  22                old = READ_ONCE(S390_lowcore.preempt_count);
  23                new = (old & PREEMPT_NEED_RESCHED) |
  24                        (pc & ~PREEMPT_NEED_RESCHED);
  25        } while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
  26                                  old, new) != old);
  27}
  28
  29#define init_task_preempt_count(p)      do { } while (0)
  30
  31#define init_idle_preempt_count(p, cpu) do { \
  32        S390_lowcore.preempt_count = PREEMPT_ENABLED; \
  33} while (0)
  34
  35static inline void set_preempt_need_resched(void)
  36{
  37        __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
  38}
  39
  40static inline void clear_preempt_need_resched(void)
  41{
  42        __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
  43}
  44
  45static inline bool test_preempt_need_resched(void)
  46{
  47        return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
  48}
  49
  50static inline void __preempt_count_add(int val)
  51{
  52        if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
  53                __atomic_add_const(val, &S390_lowcore.preempt_count);
  54        else
  55                __atomic_add(val, &S390_lowcore.preempt_count);
  56}
  57
  58static inline void __preempt_count_sub(int val)
  59{
  60        __preempt_count_add(-val);
  61}
  62
  63static inline bool __preempt_count_dec_and_test(void)
  64{
  65        return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
  66}
  67
  68static inline bool should_resched(int preempt_offset)
  69{
  70        return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
  71                        preempt_offset);
  72}
  73
  74#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  75
  76#define PREEMPT_ENABLED (0)
  77
  78static inline int preempt_count(void)
  79{
  80        return READ_ONCE(S390_lowcore.preempt_count);
  81}
  82
  83static inline void preempt_count_set(int pc)
  84{
  85        S390_lowcore.preempt_count = pc;
  86}
  87
  88#define init_task_preempt_count(p)      do { } while (0)
  89
  90#define init_idle_preempt_count(p, cpu) do { \
  91        S390_lowcore.preempt_count = PREEMPT_ENABLED; \
  92} while (0)
  93
  94static inline void set_preempt_need_resched(void)
  95{
  96}
  97
  98static inline void clear_preempt_need_resched(void)
  99{
 100}
 101
 102static inline bool test_preempt_need_resched(void)
 103{
 104        return false;
 105}
 106
 107static inline void __preempt_count_add(int val)
 108{
 109        S390_lowcore.preempt_count += val;
 110}
 111
 112static inline void __preempt_count_sub(int val)
 113{
 114        S390_lowcore.preempt_count -= val;
 115}
 116
 117static inline bool __preempt_count_dec_and_test(void)
 118{
 119        return !--S390_lowcore.preempt_count && tif_need_resched();
 120}
 121
 122static inline bool should_resched(int preempt_offset)
 123{
 124        return unlikely(preempt_count() == preempt_offset &&
 125                        tif_need_resched());
 126}
 127
 128#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
 129
 130#ifdef CONFIG_PREEMPT
 131extern asmlinkage void preempt_schedule(void);
 132#define __preempt_schedule() preempt_schedule()
 133extern asmlinkage void preempt_schedule_notrace(void);
 134#define __preempt_schedule_notrace() preempt_schedule_notrace()
 135#endif /* CONFIG_PREEMPT */
 136
 137#endif /* __ASM_PREEMPT_H */
 138