linux/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
<<
>>
Prefs
   1#ifndef LOCKS_H
   2#define LOCKS_H
   3
   4#include <limits.h>
   5#include <pthread.h>
   6#include <stdbool.h>
   7
   8#include "assume.h"
   9#include "bug_on.h"
  10#include "preempt.h"
  11
  12int nondet_int(void);
  13
  14#define __acquire(x)
  15#define __acquires(x)
  16#define __release(x)
  17#define __releases(x)
  18
  19/* Only use one lock mechanism. Select which one. */
  20#ifdef PTHREAD_LOCK
  21struct lock_impl {
  22        pthread_mutex_t mutex;
  23};
  24
  25static inline void lock_impl_lock(struct lock_impl *lock)
  26{
  27        BUG_ON(pthread_mutex_lock(&lock->mutex));
  28}
  29
  30static inline void lock_impl_unlock(struct lock_impl *lock)
  31{
  32        BUG_ON(pthread_mutex_unlock(&lock->mutex));
  33}
  34
  35static inline bool lock_impl_trylock(struct lock_impl *lock)
  36{
  37        int err = pthread_mutex_trylock(&lock->mutex);
  38
  39        if (!err)
  40                return true;
  41        else if (err == EBUSY)
  42                return false;
  43        BUG();
  44}
  45
  46static inline void lock_impl_init(struct lock_impl *lock)
  47{
  48        pthread_mutex_init(&lock->mutex, NULL);
  49}
  50
  51#define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}
  52
  53#else /* !defined(PTHREAD_LOCK) */
  54/* Spinlock that assumes that it always gets the lock immediately. */
  55
  56struct lock_impl {
  57        bool locked;
  58};
  59
  60static inline bool lock_impl_trylock(struct lock_impl *lock)
  61{
  62#ifdef RUN
  63        /* TODO: Should this be a test and set? */
  64        return __sync_bool_compare_and_swap(&lock->locked, false, true);
  65#else
  66        __CPROVER_atomic_begin();
  67        bool old_locked = lock->locked;
  68        lock->locked = true;
  69        __CPROVER_atomic_end();
  70
  71        /* Minimal barrier to prevent accesses leaking out of lock. */
  72        __CPROVER_fence("RRfence", "RWfence");
  73
  74        return !old_locked;
  75#endif
  76}
  77
  78static inline void lock_impl_lock(struct lock_impl *lock)
  79{
  80        /*
  81         * CBMC doesn't support busy waiting, so just assume that the
  82         * lock is available.
  83         */
  84        assume(lock_impl_trylock(lock));
  85
  86        /*
  87         * If the lock was already held by this thread then the assumption
  88         * is unsatisfiable (deadlock).
  89         */
  90}
  91
  92static inline void lock_impl_unlock(struct lock_impl *lock)
  93{
  94#ifdef RUN
  95        BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
  96#else
  97        /* Minimal barrier to prevent accesses leaking out of lock. */
  98        __CPROVER_fence("RWfence", "WWfence");
  99
 100        __CPROVER_atomic_begin();
 101        bool old_locked = lock->locked;
 102        lock->locked = false;
 103        __CPROVER_atomic_end();
 104
 105        BUG_ON(!old_locked);
 106#endif
 107}
 108
 109static inline void lock_impl_init(struct lock_impl *lock)
 110{
 111        lock->locked = false;
 112}
 113
 114#define LOCK_IMPL_INITIALIZER {.locked = false}
 115
 116#endif /* !defined(PTHREAD_LOCK) */
 117
 118/*
 119 * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing
 120 * locks of different types.
 121 */
 122typedef struct {
 123        struct lock_impl internal_lock;
 124} spinlock_t;
 125
 126#define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
 127#define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
 128#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
 129
 130static inline void spin_lock_init(spinlock_t *lock)
 131{
 132        lock_impl_init(&lock->internal_lock);
 133}
 134
 135static inline void spin_lock(spinlock_t *lock)
 136{
 137        /*
 138         * Spin locks also need to be removed in order to eliminate all
 139         * memory barriers. They are only used by the write side anyway.
 140         */
 141#ifndef NO_SYNC_SMP_MB
 142        preempt_disable();
 143        lock_impl_lock(&lock->internal_lock);
 144#endif
 145}
 146
 147static inline void spin_unlock(spinlock_t *lock)
 148{
 149#ifndef NO_SYNC_SMP_MB
 150        lock_impl_unlock(&lock->internal_lock);
 151        preempt_enable();
 152#endif
 153}
 154
 155/* Don't bother with interrupts */
 156#define spin_lock_irq(lock) spin_lock(lock)
 157#define spin_unlock_irq(lock) spin_unlock(lock)
 158#define spin_lock_irqsave(lock, flags) spin_lock(lock)
 159#define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)
 160
 161/*
 162 * This is supposed to return an int, but I think that a bool should work as
 163 * well.
 164 */
 165static inline bool spin_trylock(spinlock_t *lock)
 166{
 167#ifndef NO_SYNC_SMP_MB
 168        preempt_disable();
 169        return lock_impl_trylock(&lock->internal_lock);
 170#else
 171        return true;
 172#endif
 173}
 174
 175struct completion {
 176        /* Hopefuly this won't overflow. */
 177        unsigned int count;
 178};
 179
 180#define COMPLETION_INITIALIZER(x) {.count = 0}
 181#define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
 182#define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)
 183
 184static inline void init_completion(struct completion *c)
 185{
 186        c->count = 0;
 187}
 188
 189static inline void wait_for_completion(struct completion *c)
 190{
 191        unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);
 192
 193        assume(prev_count);
 194}
 195
 196static inline void complete(struct completion *c)
 197{
 198        unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);
 199
 200        BUG_ON(prev_count == UINT_MAX);
 201}
 202
 203/* This function probably isn't very useful for CBMC. */
 204static inline bool try_wait_for_completion(struct completion *c)
 205{
 206        BUG();
 207}
 208
 209static inline bool completion_done(struct completion *c)
 210{
 211        return c->count;
 212}
 213
 214/* TODO: Implement complete_all */
 215static inline void complete_all(struct completion *c)
 216{
 217        BUG();
 218}
 219
 220#endif
 221