1
2
3#ifndef __ASM_CSKY_SPINLOCK_H
4#define __ASM_CSKY_SPINLOCK_H
5
6#include <linux/spinlock_types.h>
7#include <asm/barrier.h>
8
9
10
11
12static inline void arch_spin_lock(arch_spinlock_t *lock)
13{
14 arch_spinlock_t lockval;
15 u32 ticket_next = 1 << TICKET_NEXT;
16 u32 *p = &lock->lock;
17 u32 tmp;
18
19 asm volatile (
20 "1: ldex.w %0, (%2) \n"
21 " mov %1, %0 \n"
22 " add %0, %3 \n"
23 " stex.w %0, (%2) \n"
24 " bez %0, 1b \n"
25 : "=&r" (tmp), "=&r" (lockval)
26 : "r"(p), "r"(ticket_next)
27 : "cc");
28
29 while (lockval.tickets.next != lockval.tickets.owner)
30 lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
31
32 smp_mb();
33}
34
35static inline int arch_spin_trylock(arch_spinlock_t *lock)
36{
37 u32 tmp, contended, res;
38 u32 ticket_next = 1 << TICKET_NEXT;
39 u32 *p = &lock->lock;
40
41 do {
42 asm volatile (
43 " ldex.w %0, (%3) \n"
44 " movi %2, 1 \n"
45 " rotli %1, %0, 16 \n"
46 " cmpne %1, %0 \n"
47 " bt 1f \n"
48 " movi %2, 0 \n"
49 " add %0, %0, %4 \n"
50 " stex.w %0, (%3) \n"
51 "1: \n"
52 : "=&r" (res), "=&r" (tmp), "=&r" (contended)
53 : "r"(p), "r"(ticket_next)
54 : "cc");
55 } while (!res);
56
57 if (!contended)
58 smp_mb();
59
60 return !contended;
61}
62
63static inline void arch_spin_unlock(arch_spinlock_t *lock)
64{
65 smp_mb();
66 WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1);
67}
68
69static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
70{
71 return lock.tickets.owner == lock.tickets.next;
72}
73
74static inline int arch_spin_is_locked(arch_spinlock_t *lock)
75{
76 return !arch_spin_value_unlocked(READ_ONCE(*lock));
77}
78
79static inline int arch_spin_is_contended(arch_spinlock_t *lock)
80{
81 struct __raw_tickets tickets = READ_ONCE(lock->tickets);
82
83 return (tickets.next - tickets.owner) > 1;
84}
85#define arch_spin_is_contended arch_spin_is_contended
86
87#include <asm/qrwlock.h>
88
89#endif
90