1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/compiler.h>
28#include <linux/kernel.h>
29#include <linux/export.h>
30#include <linux/sched.h>
31#include <linux/sched/debug.h>
32#include <linux/semaphore.h>
33#include <linux/spinlock.h>
34#include <linux/ftrace.h>
35
36static noinline void __down(struct semaphore *sem);
37static noinline int __down_interruptible(struct semaphore *sem);
38static noinline int __down_killable(struct semaphore *sem);
39static noinline int __down_timeout(struct semaphore *sem, long timeout);
40static noinline void __up(struct semaphore *sem);
41
42
43
44
45
46
47
48
49
50
51
52
53void down(struct semaphore *sem)
54{
55 unsigned long flags;
56
57 might_sleep();
58 raw_spin_lock_irqsave(&sem->lock, flags);
59 if (likely(sem->count > 0))
60 sem->count--;
61 else
62 __down(sem);
63 raw_spin_unlock_irqrestore(&sem->lock, flags);
64}
65EXPORT_SYMBOL(down);
66
67
68
69
70
71
72
73
74
75
76int down_interruptible(struct semaphore *sem)
77{
78 unsigned long flags;
79 int result = 0;
80
81 might_sleep();
82 raw_spin_lock_irqsave(&sem->lock, flags);
83 if (likely(sem->count > 0))
84 sem->count--;
85 else
86 result = __down_interruptible(sem);
87 raw_spin_unlock_irqrestore(&sem->lock, flags);
88
89 return result;
90}
91EXPORT_SYMBOL(down_interruptible);
92
93
94
95
96
97
98
99
100
101
102
103int down_killable(struct semaphore *sem)
104{
105 unsigned long flags;
106 int result = 0;
107
108 might_sleep();
109 raw_spin_lock_irqsave(&sem->lock, flags);
110 if (likely(sem->count > 0))
111 sem->count--;
112 else
113 result = __down_killable(sem);
114 raw_spin_unlock_irqrestore(&sem->lock, flags);
115
116 return result;
117}
118EXPORT_SYMBOL(down_killable);
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133int down_trylock(struct semaphore *sem)
134{
135 unsigned long flags;
136 int count;
137
138 raw_spin_lock_irqsave(&sem->lock, flags);
139 count = sem->count - 1;
140 if (likely(count >= 0))
141 sem->count = count;
142 raw_spin_unlock_irqrestore(&sem->lock, flags);
143
144 return (count < 0);
145}
146EXPORT_SYMBOL(down_trylock);
147
148
149
150
151
152
153
154
155
156
157
158int down_timeout(struct semaphore *sem, long timeout)
159{
160 unsigned long flags;
161 int result = 0;
162
163 might_sleep();
164 raw_spin_lock_irqsave(&sem->lock, flags);
165 if (likely(sem->count > 0))
166 sem->count--;
167 else
168 result = __down_timeout(sem, timeout);
169 raw_spin_unlock_irqrestore(&sem->lock, flags);
170
171 return result;
172}
173EXPORT_SYMBOL(down_timeout);
174
175
176
177
178
179
180
181
182void up(struct semaphore *sem)
183{
184 unsigned long flags;
185
186 raw_spin_lock_irqsave(&sem->lock, flags);
187 if (likely(list_empty(&sem->wait_list)))
188 sem->count++;
189 else
190 __up(sem);
191 raw_spin_unlock_irqrestore(&sem->lock, flags);
192}
193EXPORT_SYMBOL(up);
194
195
196
197struct semaphore_waiter {
198 struct list_head list;
199 struct task_struct *task;
200 bool up;
201};
202
203
204
205
206
207
208static inline int __sched __down_common(struct semaphore *sem, long state,
209 long timeout)
210{
211 struct semaphore_waiter waiter;
212
213 list_add_tail(&waiter.list, &sem->wait_list);
214 waiter.task = current;
215 waiter.up = false;
216
217 for (;;) {
218 if (signal_pending_state(state, current))
219 goto interrupted;
220 if (unlikely(timeout <= 0))
221 goto timed_out;
222 __set_current_state(state);
223 raw_spin_unlock_irq(&sem->lock);
224 timeout = schedule_timeout(timeout);
225 raw_spin_lock_irq(&sem->lock);
226 if (waiter.up)
227 return 0;
228 }
229
230 timed_out:
231 list_del(&waiter.list);
232 return -ETIME;
233
234 interrupted:
235 list_del(&waiter.list);
236 return -EINTR;
237}
238
239static noinline void __sched __down(struct semaphore *sem)
240{
241 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
242}
243
244static noinline int __sched __down_interruptible(struct semaphore *sem)
245{
246 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
247}
248
249static noinline int __sched __down_killable(struct semaphore *sem)
250{
251 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
252}
253
254static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
255{
256 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
257}
258
259static noinline void __sched __up(struct semaphore *sem)
260{
261 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
262 struct semaphore_waiter, list);
263 list_del(&waiter->list);
264 waiter->up = true;
265 wake_up_process(waiter->task);
266}
267