linux/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef LOCKS_H
   3#define LOCKS_H
   4
   5#include <limits.h>
   6#include <pthread.h>
   7#include <stdbool.h>
   8
   9#include "assume.h"
  10#include "bug_on.h"
  11#include "preempt.h"
  12
  13int nondet_int(void);
  14
  15#define __acquire(x)
  16#define __acquires(x)
  17#define __release(x)
  18#define __releases(x)
  19
  20/* Only use one lock mechanism. Select which one. */
  21#ifdef PTHREAD_LOCK
  22struct lock_impl {
  23        pthread_mutex_t mutex;
  24};
  25
  26static inline void lock_impl_lock(struct lock_impl *lock)
  27{
  28        BUG_ON(pthread_mutex_lock(&lock->mutex));
  29}
  30
  31static inline void lock_impl_unlock(struct lock_impl *lock)
  32{
  33        BUG_ON(pthread_mutex_unlock(&lock->mutex));
  34}
  35
  36static inline bool lock_impl_trylock(struct lock_impl *lock)
  37{
  38        int err = pthread_mutex_trylock(&lock->mutex);
  39
  40        if (!err)
  41                return true;
  42        else if (err == EBUSY)
  43                return false;
  44        BUG();
  45}
  46
  47static inline void lock_impl_init(struct lock_impl *lock)
  48{
  49        pthread_mutex_init(&lock->mutex, NULL);
  50}
  51
  52#define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}
  53
  54#else /* !defined(PTHREAD_LOCK) */
  55/* Spinlock that assumes that it always gets the lock immediately. */
  56
  57struct lock_impl {
  58        bool locked;
  59};
  60
  61static inline bool lock_impl_trylock(struct lock_impl *lock)
  62{
  63#ifdef RUN
  64        /* TODO: Should this be a test and set? */
  65        return __sync_bool_compare_and_swap(&lock->locked, false, true);
  66#else
  67        __CPROVER_atomic_begin();
  68        bool old_locked = lock->locked;
  69        lock->locked = true;
  70        __CPROVER_atomic_end();
  71
  72        /* Minimal barrier to prevent accesses leaking out of lock. */
  73        __CPROVER_fence("RRfence", "RWfence");
  74
  75        return !old_locked;
  76#endif
  77}
  78
  79static inline void lock_impl_lock(struct lock_impl *lock)
  80{
  81        /*
  82         * CBMC doesn't support busy waiting, so just assume that the
  83         * lock is available.
  84         */
  85        assume(lock_impl_trylock(lock));
  86
  87        /*
  88         * If the lock was already held by this thread then the assumption
  89         * is unsatisfiable (deadlock).
  90         */
  91}
  92
  93static inline void lock_impl_unlock(struct lock_impl *lock)
  94{
  95#ifdef RUN
  96        BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
  97#else
  98        /* Minimal barrier to prevent accesses leaking out of lock. */
  99        __CPROVER_fence("RWfence", "WWfence");
 100
 101        __CPROVER_atomic_begin();
 102        bool old_locked = lock->locked;
 103        lock->locked = false;
 104        __CPROVER_atomic_end();
 105
 106        BUG_ON(!old_locked);
 107#endif
 108}
 109
 110static inline void lock_impl_init(struct lock_impl *lock)
 111{
 112        lock->locked = false;
 113}
 114
 115#define LOCK_IMPL_INITIALIZER {.locked = false}
 116
 117#endif /* !defined(PTHREAD_LOCK) */
 118
 119/*
 120 * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing
 121 * locks of different types.
 122 */
 123typedef struct {
 124        struct lock_impl internal_lock;
 125} spinlock_t;
 126
 127#define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
 128#define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
 129#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
 130
 131static inline void spin_lock_init(spinlock_t *lock)
 132{
 133        lock_impl_init(&lock->internal_lock);
 134}
 135
 136static inline void spin_lock(spinlock_t *lock)
 137{
 138        /*
 139         * Spin locks also need to be removed in order to eliminate all
 140         * memory barriers. They are only used by the write side anyway.
 141         */
 142#ifndef NO_SYNC_SMP_MB
 143        preempt_disable();
 144        lock_impl_lock(&lock->internal_lock);
 145#endif
 146}
 147
 148static inline void spin_unlock(spinlock_t *lock)
 149{
 150#ifndef NO_SYNC_SMP_MB
 151        lock_impl_unlock(&lock->internal_lock);
 152        preempt_enable();
 153#endif
 154}
 155
 156/* Don't bother with interrupts */
 157#define spin_lock_irq(lock) spin_lock(lock)
 158#define spin_unlock_irq(lock) spin_unlock(lock)
 159#define spin_lock_irqsave(lock, flags) spin_lock(lock)
 160#define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)
 161
 162/*
 163 * This is supposed to return an int, but I think that a bool should work as
 164 * well.
 165 */
 166static inline bool spin_trylock(spinlock_t *lock)
 167{
 168#ifndef NO_SYNC_SMP_MB
 169        preempt_disable();
 170        return lock_impl_trylock(&lock->internal_lock);
 171#else
 172        return true;
 173#endif
 174}
 175
 176struct completion {
 177        /* Hopefuly this won't overflow. */
 178        unsigned int count;
 179};
 180
 181#define COMPLETION_INITIALIZER(x) {.count = 0}
 182#define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
 183#define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)
 184
 185static inline void init_completion(struct completion *c)
 186{
 187        c->count = 0;
 188}
 189
 190static inline void wait_for_completion(struct completion *c)
 191{
 192        unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);
 193
 194        assume(prev_count);
 195}
 196
 197static inline void complete(struct completion *c)
 198{
 199        unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);
 200
 201        BUG_ON(prev_count == UINT_MAX);
 202}
 203
 204/* This function probably isn't very useful for CBMC. */
 205static inline bool try_wait_for_completion(struct completion *c)
 206{
 207        BUG();
 208}
 209
 210static inline bool completion_done(struct completion *c)
 211{
 212        return c->count;
 213}
 214
 215/* TODO: Implement complete_all */
 216static inline void complete_all(struct completion *c)
 217{
 218        BUG();
 219}
 220
 221#endif
 222