1#include <linux/export.h>
2#include <linux/lockref.h>
3
4#if USE_CMPXCHG_LOCKREF
5
6
7
8
9
10#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
11 struct lockref old; \
12 BUILD_BUG_ON(sizeof(old) != 8); \
13 old.lock_count = READ_ONCE(lockref->lock_count); \
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \
16 CODE \
17 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
18 old.lock_count, \
19 new.lock_count); \
20 if (likely(old.lock_count == prev.lock_count)) { \
21 SUCCESS; \
22 } \
23 cpu_relax_lowlatency(); \
24 } \
25} while (0)
26
27#else
28
29#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
30
31#endif
32
33
34
35
36
37
38
39
40void lockref_get(struct lockref *lockref)
41{
42 CMPXCHG_LOOP(
43 new.count++;
44 ,
45 return;
46 );
47
48 spin_lock(&lockref->lock);
49 lockref->count++;
50 spin_unlock(&lockref->lock);
51}
52EXPORT_SYMBOL(lockref_get);
53
54
55
56
57
58
59int lockref_get_not_zero(struct lockref *lockref)
60{
61 int retval;
62
63 CMPXCHG_LOOP(
64 new.count++;
65 if (old.count <= 0)
66 return 0;
67 ,
68 return 1;
69 );
70
71 spin_lock(&lockref->lock);
72 retval = 0;
73 if (lockref->count > 0) {
74 lockref->count++;
75 retval = 1;
76 }
77 spin_unlock(&lockref->lock);
78 return retval;
79}
80EXPORT_SYMBOL(lockref_get_not_zero);
81
82
83
84
85
86
87
88int lockref_get_or_lock(struct lockref *lockref)
89{
90 CMPXCHG_LOOP(
91 new.count++;
92 if (old.count <= 0)
93 break;
94 ,
95 return 1;
96 );
97
98 spin_lock(&lockref->lock);
99 if (lockref->count <= 0)
100 return 0;
101 lockref->count++;
102 spin_unlock(&lockref->lock);
103 return 1;
104}
105EXPORT_SYMBOL(lockref_get_or_lock);
106
107
108
109
110
111
112
113
114int lockref_put_return(struct lockref *lockref)
115{
116 CMPXCHG_LOOP(
117 new.count--;
118 if (old.count <= 0)
119 return -1;
120 ,
121 return new.count;
122 );
123 return -1;
124}
125EXPORT_SYMBOL(lockref_put_return);
126
127
128
129
130
131
132int lockref_put_or_lock(struct lockref *lockref)
133{
134 CMPXCHG_LOOP(
135 new.count--;
136 if (old.count <= 1)
137 break;
138 ,
139 return 1;
140 );
141
142 spin_lock(&lockref->lock);
143 if (lockref->count <= 1)
144 return 0;
145 lockref->count--;
146 spin_unlock(&lockref->lock);
147 return 1;
148}
149EXPORT_SYMBOL(lockref_put_or_lock);
150
151
152
153
154
155void lockref_mark_dead(struct lockref *lockref)
156{
157 assert_spin_locked(&lockref->lock);
158 lockref->count = -128;
159}
160EXPORT_SYMBOL(lockref_mark_dead);
161
162
163
164
165
166
167int lockref_get_not_dead(struct lockref *lockref)
168{
169 int retval;
170
171 CMPXCHG_LOOP(
172 new.count++;
173 if (old.count < 0)
174 return 0;
175 ,
176 return 1;
177 );
178
179 spin_lock(&lockref->lock);
180 retval = 0;
181 if (lockref->count >= 0) {
182 lockref->count++;
183 retval = 1;
184 }
185 spin_unlock(&lockref->lock);
186 return retval;
187}
188EXPORT_SYMBOL(lockref_get_not_dead);
189