1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/sched/rt.h>
22#include <linux/sched/task.h>
23#include <linux/sched/debug.h>
24#include <linux/sched/wake_q.h>
25#include <linux/sched/signal.h>
26#include <linux/sched/clock.h>
27#include <linux/export.h>
28#include <linux/rwsem.h>
29#include <linux/atomic.h>
30
31#include "rwsem.h"
32#include "lock_events.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99#define RWSEM_READER_OWNED (1UL << 0)
100#define RWSEM_RD_NONSPINNABLE (1UL << 1)
101#define RWSEM_WR_NONSPINNABLE (1UL << 2)
102#define RWSEM_NONSPINNABLE (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE)
103#define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
104
105#ifdef CONFIG_DEBUG_RWSEMS
106# define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
107 if (!debug_locks_silent && \
108 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
109 #c, atomic_long_read(&(sem)->count), \
110 (unsigned long) sem->magic, \
111 atomic_long_read(&(sem)->owner), (long)current, \
112 list_empty(&(sem)->wait_list) ? "" : "not ")) \
113 debug_locks_off(); \
114 } while (0)
115#else
116# define DEBUG_RWSEMS_WARN_ON(c, sem)
117#endif
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155#define RWSEM_WRITER_LOCKED (1UL << 0)
156#define RWSEM_FLAG_WAITERS (1UL << 1)
157#define RWSEM_FLAG_HANDOFF (1UL << 2)
158#define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
159
160#define RWSEM_READER_SHIFT 8
161#define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
162#define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
163#define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
164#define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
165#define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
166 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
167
168
169
170
171
172
173
174
175static inline void rwsem_set_owner(struct rw_semaphore *sem)
176{
177 atomic_long_set(&sem->owner, (long)current);
178}
179
180static inline void rwsem_clear_owner(struct rw_semaphore *sem)
181{
182 atomic_long_set(&sem->owner, 0);
183}
184
185
186
187
188static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
189{
190 return atomic_long_read(&sem->owner) & flags;
191}
192
193
194
195
196
197
198
199
200
201
202
203static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
204 struct task_struct *owner)
205{
206 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
207 (atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE);
208
209 atomic_long_set(&sem->owner, val);
210}
211
212static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
213{
214 __rwsem_set_reader_owned(sem, current);
215}
216
217
218
219
220static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
221{
222#ifdef CONFIG_DEBUG_RWSEMS
223
224
225
226 long count = atomic_long_read(&sem->count);
227
228 if (count & RWSEM_WRITER_MASK)
229 return false;
230#endif
231 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
232}
233
234#ifdef CONFIG_DEBUG_RWSEMS
235
236
237
238
239
240
241static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
242{
243 unsigned long val = atomic_long_read(&sem->owner);
244
245 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
246 if (atomic_long_try_cmpxchg(&sem->owner, &val,
247 val & RWSEM_OWNER_FLAGS_MASK))
248 return;
249 }
250}
251#else
252static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
253{
254}
255#endif
256
257
258
259
260
261static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
262{
263 unsigned long owner = atomic_long_read(&sem->owner);
264
265 do {
266 if (!(owner & RWSEM_READER_OWNED))
267 break;
268 if (owner & RWSEM_NONSPINNABLE)
269 break;
270 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
271 owner | RWSEM_NONSPINNABLE));
272}
273
274static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
275{
276 long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
277 if (WARN_ON_ONCE(cnt < 0))
278 rwsem_set_nonspinnable(sem);
279 return !(cnt & RWSEM_READ_FAILED_MASK);
280}
281
282
283
284
285static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
286{
287 return (struct task_struct *)
288 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
289}
290
291
292
293
294
295static inline struct task_struct *
296rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
297{
298 unsigned long owner = atomic_long_read(&sem->owner);
299
300 *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
301 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324void __init_rwsem(struct rw_semaphore *sem, const char *name,
325 struct lock_class_key *key)
326{
327#ifdef CONFIG_DEBUG_LOCK_ALLOC
328
329
330
331 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
332 lockdep_init_map(&sem->dep_map, name, key, 0);
333#endif
334#ifdef CONFIG_DEBUG_RWSEMS
335 sem->magic = sem;
336#endif
337 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
338 raw_spin_lock_init(&sem->wait_lock);
339 INIT_LIST_HEAD(&sem->wait_list);
340 atomic_long_set(&sem->owner, 0L);
341#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
342 osq_lock_init(&sem->osq);
343#endif
344}
345EXPORT_SYMBOL(__init_rwsem);
346
347enum rwsem_waiter_type {
348 RWSEM_WAITING_FOR_WRITE,
349 RWSEM_WAITING_FOR_READ
350};
351
352struct rwsem_waiter {
353 struct list_head list;
354 struct task_struct *task;
355 enum rwsem_waiter_type type;
356 unsigned long timeout;
357 unsigned long last_rowner;
358};
359#define rwsem_first_waiter(sem) \
360 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
361
362enum rwsem_wake_type {
363 RWSEM_WAKE_ANY,
364 RWSEM_WAKE_READERS,
365 RWSEM_WAKE_READ_OWNED
366};
367
368enum writer_wait_state {
369 WRITER_NOT_FIRST,
370 WRITER_FIRST,
371 WRITER_HANDOFF
372};
373
374
375
376
377
378
379#define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
380
381
382
383
384
385
386
387#define MAX_READERS_WAKEUP 0x100
388
389
390
391
392
393
394
395
396
397
398
399
400
401static void rwsem_mark_wake(struct rw_semaphore *sem,
402 enum rwsem_wake_type wake_type,
403 struct wake_q_head *wake_q)
404{
405 struct rwsem_waiter *waiter, *tmp;
406 long oldcount, woken = 0, adjustment = 0;
407 struct list_head wlist;
408
409 lockdep_assert_held(&sem->wait_lock);
410
411
412
413
414
415 waiter = rwsem_first_waiter(sem);
416
417 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
418 if (wake_type == RWSEM_WAKE_ANY) {
419
420
421
422
423
424
425
426 wake_q_add(wake_q, waiter->task);
427 lockevent_inc(rwsem_wake_writer);
428 }
429
430 return;
431 }
432
433
434
435
436 if (unlikely(atomic_long_read(&sem->count) < 0))
437 return;
438
439
440
441
442
443
444 if (wake_type != RWSEM_WAKE_READ_OWNED) {
445 struct task_struct *owner;
446
447 adjustment = RWSEM_READER_BIAS;
448 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
449 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
450
451
452
453
454
455 if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
456 time_after(jiffies, waiter->timeout)) {
457 adjustment -= RWSEM_FLAG_HANDOFF;
458 lockevent_inc(rwsem_rlock_handoff);
459 }
460
461 atomic_long_add(-adjustment, &sem->count);
462 return;
463 }
464
465
466
467
468
469
470 owner = waiter->task;
471 if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) {
472 owner = (void *)((unsigned long)owner | RWSEM_RD_NONSPINNABLE);
473 lockevent_inc(rwsem_opt_norspin);
474 }
475 __rwsem_set_reader_owned(sem, owner);
476 }
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 INIT_LIST_HEAD(&wlist);
502 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
503 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
504 continue;
505
506 woken++;
507 list_move_tail(&waiter->list, &wlist);
508
509
510
511
512 if (woken >= MAX_READERS_WAKEUP)
513 break;
514 }
515
516 adjustment = woken * RWSEM_READER_BIAS - adjustment;
517 lockevent_cond_inc(rwsem_wake_reader, woken);
518 if (list_empty(&sem->wait_list)) {
519
520 adjustment -= RWSEM_FLAG_WAITERS;
521 }
522
523
524
525
526
527 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
528 adjustment -= RWSEM_FLAG_HANDOFF;
529
530 if (adjustment)
531 atomic_long_add(adjustment, &sem->count);
532
533
534 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
535 struct task_struct *tsk;
536
537 tsk = waiter->task;
538 get_task_struct(tsk);
539
540
541
542
543
544
545
546 smp_store_release(&waiter->task, NULL);
547
548
549
550
551 wake_q_add_safe(wake_q, tsk);
552 }
553}
554
555
556
557
558
559
560
561
562
563static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
564 enum writer_wait_state wstate)
565{
566 long count, new;
567
568 lockdep_assert_held(&sem->wait_lock);
569
570 count = atomic_long_read(&sem->count);
571 do {
572 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
573
574 if (has_handoff && wstate == WRITER_NOT_FIRST)
575 return false;
576
577 new = count;
578
579 if (count & RWSEM_LOCK_MASK) {
580 if (has_handoff || (wstate != WRITER_HANDOFF))
581 return false;
582
583 new |= RWSEM_FLAG_HANDOFF;
584 } else {
585 new |= RWSEM_WRITER_LOCKED;
586 new &= ~RWSEM_FLAG_HANDOFF;
587
588 if (list_is_singular(&sem->wait_list))
589 new &= ~RWSEM_FLAG_WAITERS;
590 }
591 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
592
593
594
595
596
597 if (new & RWSEM_FLAG_HANDOFF)
598 return false;
599
600 rwsem_set_owner(sem);
601 return true;
602}
603
604#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
605
606
607
608
609
610static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
611{
612 long count = atomic_long_read(&sem->count);
613
614 if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))
615 return false;
616
617 count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
618 if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
619 rwsem_set_reader_owned(sem);
620 lockevent_inc(rwsem_opt_rlock);
621 return true;
622 }
623
624
625 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
626 return false;
627}
628
629
630
631
632static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
633{
634 long count = atomic_long_read(&sem->count);
635
636 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
637 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
638 count | RWSEM_WRITER_LOCKED)) {
639 rwsem_set_owner(sem);
640 lockevent_inc(rwsem_opt_wlock);
641 return true;
642 }
643 }
644 return false;
645}
646
647static inline bool owner_on_cpu(struct task_struct *owner)
648{
649
650
651
652
653 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
654}
655
656static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
657 unsigned long nonspinnable)
658{
659 struct task_struct *owner;
660 unsigned long flags;
661 bool ret = true;
662
663 BUILD_BUG_ON(!(RWSEM_OWNER_UNKNOWN & RWSEM_NONSPINNABLE));
664
665 if (need_resched()) {
666 lockevent_inc(rwsem_opt_fail);
667 return false;
668 }
669
670 preempt_disable();
671 rcu_read_lock();
672 owner = rwsem_owner_flags(sem, &flags);
673
674
675
676 if ((flags & nonspinnable) ||
677 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
678 ret = false;
679 rcu_read_unlock();
680 preempt_enable();
681
682 lockevent_cond_inc(rwsem_opt_fail, !ret);
683 return ret;
684}
685
686
687
688
689
690
691
692
693
694
695
696
697enum owner_state {
698 OWNER_NULL = 1 << 0,
699 OWNER_WRITER = 1 << 1,
700 OWNER_READER = 1 << 2,
701 OWNER_NONSPINNABLE = 1 << 3,
702};
703#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
704
705static inline enum owner_state
706rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable)
707{
708 if (flags & nonspinnable)
709 return OWNER_NONSPINNABLE;
710
711 if (flags & RWSEM_READER_OWNED)
712 return OWNER_READER;
713
714 return owner ? OWNER_WRITER : OWNER_NULL;
715}
716
717static noinline enum owner_state
718rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
719{
720 struct task_struct *new, *owner;
721 unsigned long flags, new_flags;
722 enum owner_state state;
723
724 owner = rwsem_owner_flags(sem, &flags);
725 state = rwsem_owner_state(owner, flags, nonspinnable);
726 if (state != OWNER_WRITER)
727 return state;
728
729 rcu_read_lock();
730 for (;;) {
731
732
733
734
735
736
737 new = rwsem_owner_flags(sem, &new_flags);
738 if ((new != owner) || (new_flags != flags)) {
739 state = rwsem_owner_state(new, new_flags, nonspinnable);
740 break;
741 }
742
743
744
745
746
747
748
749 barrier();
750
751 if (need_resched() || !owner_on_cpu(owner)) {
752 state = OWNER_NONSPINNABLE;
753 break;
754 }
755
756 cpu_relax();
757 }
758 rcu_read_unlock();
759
760 return state;
761}
762
763
764
765
766
767
768
769
770
771
772
773
774
775static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
776{
777 long count = atomic_long_read(&sem->count);
778 int readers = count >> RWSEM_READER_SHIFT;
779 u64 delta;
780
781 if (readers > 30)
782 readers = 30;
783 delta = (20 + readers) * NSEC_PER_USEC / 2;
784
785 return sched_clock() + delta;
786}
787
788static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
789{
790 bool taken = false;
791 int prev_owner_state = OWNER_NULL;
792 int loop = 0;
793 u64 rspin_threshold = 0;
794 unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
795 : RWSEM_RD_NONSPINNABLE;
796
797 preempt_disable();
798
799
800 if (!osq_lock(&sem->osq))
801 goto done;
802
803
804
805
806
807
808
809 for (;;) {
810 enum owner_state owner_state;
811
812 owner_state = rwsem_spin_on_owner(sem, nonspinnable);
813 if (!(owner_state & OWNER_SPINNABLE))
814 break;
815
816
817
818
819 taken = wlock ? rwsem_try_write_lock_unqueued(sem)
820 : rwsem_try_read_lock_unqueued(sem);
821
822 if (taken)
823 break;
824
825
826
827
828 if (wlock && (owner_state == OWNER_READER)) {
829
830
831
832
833
834
835
836 if (prev_owner_state != OWNER_READER) {
837 if (rwsem_test_oflags(sem, nonspinnable))
838 break;
839 rspin_threshold = rwsem_rspin_threshold(sem);
840 loop = 0;
841 }
842
843
844
845
846
847
848
849
850 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
851 rwsem_set_nonspinnable(sem);
852 lockevent_inc(rwsem_opt_nospin);
853 break;
854 }
855 }
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888 if (owner_state != OWNER_WRITER) {
889 if (need_resched())
890 break;
891 if (rt_task(current) &&
892 (prev_owner_state != OWNER_WRITER))
893 break;
894 }
895 prev_owner_state = owner_state;
896
897
898
899
900
901
902
903 cpu_relax();
904 }
905 osq_unlock(&sem->osq);
906done:
907 preempt_enable();
908 lockevent_cond_inc(rwsem_opt_fail, !taken);
909 return taken;
910}
911
912
913
914
915
916
917
918
919
920
921
922static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
923{
924 if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
925 atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
926}
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
948 unsigned long last_rowner)
949{
950 unsigned long owner = atomic_long_read(&sem->owner);
951
952 if (!(owner & RWSEM_READER_OWNED))
953 return false;
954
955 if (((owner ^ last_rowner) & ~RWSEM_OWNER_FLAGS_MASK) &&
956 rwsem_try_read_lock_unqueued(sem)) {
957 lockevent_inc(rwsem_opt_rlock2);
958 lockevent_add(rwsem_opt_fail, -1);
959 return true;
960 }
961 return false;
962}
963#else
964static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
965 unsigned long nonspinnable)
966{
967 return false;
968}
969
970static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
971{
972 return false;
973}
974
975static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
976
977static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
978 unsigned long last_rowner)
979{
980 return false;
981}
982
983static inline int
984rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
985{
986 return 0;
987}
988#define OWNER_NULL 1
989#endif
990
991
992
993
994static struct rw_semaphore __sched *
995rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
996{
997 long count, adjustment = -RWSEM_READER_BIAS;
998 struct rwsem_waiter waiter;
999 DEFINE_WAKE_Q(wake_q);
1000 bool wake = false;
1001
1002
1003
1004
1005
1006 waiter.last_rowner = atomic_long_read(&sem->owner);
1007 if (!(waiter.last_rowner & RWSEM_READER_OWNED))
1008 waiter.last_rowner &= RWSEM_RD_NONSPINNABLE;
1009
1010 if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
1011 goto queue;
1012
1013
1014
1015
1016 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
1017 adjustment = 0;
1018 if (rwsem_optimistic_spin(sem, false)) {
1019
1020
1021
1022
1023
1024 if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
1025 raw_spin_lock_irq(&sem->wait_lock);
1026 if (!list_empty(&sem->wait_list))
1027 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1028 &wake_q);
1029 raw_spin_unlock_irq(&sem->wait_lock);
1030 wake_up_q(&wake_q);
1031 }
1032 return sem;
1033 } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
1034
1035 return sem;
1036 }
1037
1038queue:
1039 waiter.task = current;
1040 waiter.type = RWSEM_WAITING_FOR_READ;
1041 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1042
1043 raw_spin_lock_irq(&sem->wait_lock);
1044 if (list_empty(&sem->wait_list)) {
1045
1046
1047
1048
1049
1050
1051 if (adjustment && !(atomic_long_read(&sem->count) &
1052 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
1053
1054 smp_acquire__after_ctrl_dep();
1055 raw_spin_unlock_irq(&sem->wait_lock);
1056 rwsem_set_reader_owned(sem);
1057 lockevent_inc(rwsem_rlock_fast);
1058 return sem;
1059 }
1060 adjustment += RWSEM_FLAG_WAITERS;
1061 }
1062 list_add_tail(&waiter.list, &sem->wait_list);
1063
1064
1065 if (adjustment)
1066 count = atomic_long_add_return(adjustment, &sem->count);
1067 else
1068 count = atomic_long_read(&sem->count);
1069
1070
1071
1072
1073
1074
1075
1076 if (!(count & RWSEM_LOCK_MASK)) {
1077 clear_wr_nonspinnable(sem);
1078 wake = true;
1079 }
1080 if (wake || (!(count & RWSEM_WRITER_MASK) &&
1081 (adjustment & RWSEM_FLAG_WAITERS)))
1082 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1083
1084 raw_spin_unlock_irq(&sem->wait_lock);
1085 wake_up_q(&wake_q);
1086
1087
1088 for (;;) {
1089 set_current_state(state);
1090 if (!smp_load_acquire(&waiter.task)) {
1091
1092 break;
1093 }
1094 if (signal_pending_state(state, current)) {
1095 raw_spin_lock_irq(&sem->wait_lock);
1096 if (waiter.task)
1097 goto out_nolock;
1098 raw_spin_unlock_irq(&sem->wait_lock);
1099
1100 break;
1101 }
1102 schedule();
1103 lockevent_inc(rwsem_sleep_reader);
1104 }
1105
1106 __set_current_state(TASK_RUNNING);
1107 lockevent_inc(rwsem_rlock);
1108 return sem;
1109
1110out_nolock:
1111 list_del(&waiter.list);
1112 if (list_empty(&sem->wait_list)) {
1113 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
1114 &sem->count);
1115 }
1116 raw_spin_unlock_irq(&sem->wait_lock);
1117 __set_current_state(TASK_RUNNING);
1118 lockevent_inc(rwsem_rlock_fail);
1119 return ERR_PTR(-EINTR);
1120}
1121
1122
1123
1124
1125
1126static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
1127 bool disable)
1128{
1129 if (unlikely(disable)) {
1130 atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner);
1131 lockevent_inc(rwsem_opt_norspin);
1132 }
1133}
1134
1135
1136
1137
1138static struct rw_semaphore *
1139rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1140{
1141 long count;
1142 bool disable_rspin;
1143 enum writer_wait_state wstate;
1144 struct rwsem_waiter waiter;
1145 struct rw_semaphore *ret = sem;
1146 DEFINE_WAKE_Q(wake_q);
1147
1148
1149 if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
1150 rwsem_optimistic_spin(sem, true)) {
1151
1152 return sem;
1153 }
1154
1155
1156
1157
1158
1159
1160 disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE;
1161
1162
1163
1164
1165
1166 waiter.task = current;
1167 waiter.type = RWSEM_WAITING_FOR_WRITE;
1168 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1169
1170 raw_spin_lock_irq(&sem->wait_lock);
1171
1172
1173 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1174
1175 list_add_tail(&waiter.list, &sem->wait_list);
1176
1177
1178 if (wstate == WRITER_NOT_FIRST) {
1179 count = atomic_long_read(&sem->count);
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 if (count & RWSEM_WRITER_MASK)
1190 goto wait;
1191
1192 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1193 ? RWSEM_WAKE_READERS
1194 : RWSEM_WAKE_ANY, &wake_q);
1195
1196 if (!wake_q_empty(&wake_q)) {
1197
1198
1199
1200
1201 raw_spin_unlock_irq(&sem->wait_lock);
1202 wake_up_q(&wake_q);
1203 wake_q_init(&wake_q);
1204 raw_spin_lock_irq(&sem->wait_lock);
1205 }
1206 } else {
1207 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1208 }
1209
1210wait:
1211
1212 set_current_state(state);
1213 for (;;) {
1214 if (rwsem_try_write_lock(sem, wstate)) {
1215
1216 break;
1217 }
1218
1219 raw_spin_unlock_irq(&sem->wait_lock);
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 if (wstate == WRITER_HANDOFF &&
1230 rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
1231 goto trylock_again;
1232
1233
1234 for (;;) {
1235 if (signal_pending_state(state, current))
1236 goto out_nolock;
1237
1238 schedule();
1239 lockevent_inc(rwsem_sleep_writer);
1240 set_current_state(state);
1241
1242
1243
1244
1245 if (wstate == WRITER_HANDOFF)
1246 break;
1247
1248 if ((wstate == WRITER_NOT_FIRST) &&
1249 (rwsem_first_waiter(sem) == &waiter))
1250 wstate = WRITER_FIRST;
1251
1252 count = atomic_long_read(&sem->count);
1253 if (!(count & RWSEM_LOCK_MASK))
1254 break;
1255
1256
1257
1258
1259
1260 if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1261 time_after(jiffies, waiter.timeout))) {
1262 wstate = WRITER_HANDOFF;
1263 lockevent_inc(rwsem_wlock_handoff);
1264 break;
1265 }
1266 }
1267trylock_again:
1268 raw_spin_lock_irq(&sem->wait_lock);
1269 }
1270 __set_current_state(TASK_RUNNING);
1271 list_del(&waiter.list);
1272 rwsem_disable_reader_optspin(sem, disable_rspin);
1273 raw_spin_unlock_irq(&sem->wait_lock);
1274 lockevent_inc(rwsem_wlock);
1275
1276 return ret;
1277
1278out_nolock:
1279 __set_current_state(TASK_RUNNING);
1280 raw_spin_lock_irq(&sem->wait_lock);
1281 list_del(&waiter.list);
1282
1283 if (unlikely(wstate == WRITER_HANDOFF))
1284 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1285
1286 if (list_empty(&sem->wait_list))
1287 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1288 else
1289 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1290 raw_spin_unlock_irq(&sem->wait_lock);
1291 wake_up_q(&wake_q);
1292 lockevent_inc(rwsem_wlock_fail);
1293
1294 return ERR_PTR(-EINTR);
1295}
1296
1297
1298
1299
1300
1301static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
1302{
1303 unsigned long flags;
1304 DEFINE_WAKE_Q(wake_q);
1305
1306 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1307
1308 if (!list_empty(&sem->wait_list))
1309 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1310
1311 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1312 wake_up_q(&wake_q);
1313
1314 return sem;
1315}
1316
1317
1318
1319
1320
1321
1322static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1323{
1324 unsigned long flags;
1325 DEFINE_WAKE_Q(wake_q);
1326
1327 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1328
1329 if (!list_empty(&sem->wait_list))
1330 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1331
1332 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1333 wake_up_q(&wake_q);
1334
1335 return sem;
1336}
1337
1338
1339
1340
1341inline void __down_read(struct rw_semaphore *sem)
1342{
1343 if (!rwsem_read_trylock(sem)) {
1344 rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
1345 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1346 } else {
1347 rwsem_set_reader_owned(sem);
1348 }
1349}
1350
1351static inline int __down_read_killable(struct rw_semaphore *sem)
1352{
1353 if (!rwsem_read_trylock(sem)) {
1354 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
1355 return -EINTR;
1356 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1357 } else {
1358 rwsem_set_reader_owned(sem);
1359 }
1360 return 0;
1361}
1362
1363static inline int __down_read_trylock(struct rw_semaphore *sem)
1364{
1365 long tmp;
1366
1367 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1368
1369
1370
1371
1372 tmp = RWSEM_UNLOCKED_VALUE;
1373 do {
1374 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1375 tmp + RWSEM_READER_BIAS)) {
1376 rwsem_set_reader_owned(sem);
1377 return 1;
1378 }
1379 } while (!(tmp & RWSEM_READ_FAILED_MASK));
1380 return 0;
1381}
1382
1383
1384
1385
1386static inline void __down_write(struct rw_semaphore *sem)
1387{
1388 long tmp = RWSEM_UNLOCKED_VALUE;
1389
1390 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1391 RWSEM_WRITER_LOCKED)))
1392 rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
1393 else
1394 rwsem_set_owner(sem);
1395}
1396
1397static inline int __down_write_killable(struct rw_semaphore *sem)
1398{
1399 long tmp = RWSEM_UNLOCKED_VALUE;
1400
1401 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1402 RWSEM_WRITER_LOCKED))) {
1403 if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
1404 return -EINTR;
1405 } else {
1406 rwsem_set_owner(sem);
1407 }
1408 return 0;
1409}
1410
1411static inline int __down_write_trylock(struct rw_semaphore *sem)
1412{
1413 long tmp;
1414
1415 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1416
1417 tmp = RWSEM_UNLOCKED_VALUE;
1418 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1419 RWSEM_WRITER_LOCKED)) {
1420 rwsem_set_owner(sem);
1421 return true;
1422 }
1423 return false;
1424}
1425
1426
1427
1428
1429inline void __up_read(struct rw_semaphore *sem)
1430{
1431 long tmp;
1432
1433 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1434 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1435
1436 rwsem_clear_reader_owned(sem);
1437 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1438 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1439 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1440 RWSEM_FLAG_WAITERS)) {
1441 clear_wr_nonspinnable(sem);
1442 rwsem_wake(sem, tmp);
1443 }
1444}
1445
1446
1447
1448
1449static inline void __up_write(struct rw_semaphore *sem)
1450{
1451 long tmp;
1452
1453 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1454
1455
1456
1457
1458 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1459 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1460
1461 rwsem_clear_owner(sem);
1462 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1463 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1464 rwsem_wake(sem, tmp);
1465}
1466
1467
1468
1469
1470static inline void __downgrade_write(struct rw_semaphore *sem)
1471{
1472 long tmp;
1473
1474
1475
1476
1477
1478
1479
1480
1481 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1482 tmp = atomic_long_fetch_add_release(
1483 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1484 rwsem_set_reader_owned(sem);
1485 if (tmp & RWSEM_FLAG_WAITERS)
1486 rwsem_downgrade_wake(sem);
1487}
1488
1489
1490
1491
1492void __sched down_read(struct rw_semaphore *sem)
1493{
1494 might_sleep();
1495 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1496
1497 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1498}
1499EXPORT_SYMBOL(down_read);
1500
1501int __sched down_read_killable(struct rw_semaphore *sem)
1502{
1503 might_sleep();
1504 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1505
1506 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1507 rwsem_release(&sem->dep_map, _RET_IP_);
1508 return -EINTR;
1509 }
1510
1511 return 0;
1512}
1513EXPORT_SYMBOL(down_read_killable);
1514
1515
1516
1517
1518int down_read_trylock(struct rw_semaphore *sem)
1519{
1520 int ret = __down_read_trylock(sem);
1521
1522 if (ret == 1)
1523 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1524 return ret;
1525}
1526EXPORT_SYMBOL(down_read_trylock);
1527
1528
1529
1530
1531void __sched down_write(struct rw_semaphore *sem)
1532{
1533 might_sleep();
1534 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1535 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1536}
1537EXPORT_SYMBOL(down_write);
1538
1539
1540
1541
1542int __sched down_write_killable(struct rw_semaphore *sem)
1543{
1544 might_sleep();
1545 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1546
1547 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1548 __down_write_killable)) {
1549 rwsem_release(&sem->dep_map, _RET_IP_);
1550 return -EINTR;
1551 }
1552
1553 return 0;
1554}
1555EXPORT_SYMBOL(down_write_killable);
1556
1557
1558
1559
1560int down_write_trylock(struct rw_semaphore *sem)
1561{
1562 int ret = __down_write_trylock(sem);
1563
1564 if (ret == 1)
1565 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1566
1567 return ret;
1568}
1569EXPORT_SYMBOL(down_write_trylock);
1570
1571
1572
1573
1574void up_read(struct rw_semaphore *sem)
1575{
1576 rwsem_release(&sem->dep_map, _RET_IP_);
1577 __up_read(sem);
1578}
1579EXPORT_SYMBOL(up_read);
1580
1581
1582
1583
1584void up_write(struct rw_semaphore *sem)
1585{
1586 rwsem_release(&sem->dep_map, _RET_IP_);
1587 __up_write(sem);
1588}
1589EXPORT_SYMBOL(up_write);
1590
1591
1592
1593
1594void downgrade_write(struct rw_semaphore *sem)
1595{
1596 lock_downgrade(&sem->dep_map, _RET_IP_);
1597 __downgrade_write(sem);
1598}
1599EXPORT_SYMBOL(downgrade_write);
1600
1601#ifdef CONFIG_DEBUG_LOCK_ALLOC
1602
1603void down_read_nested(struct rw_semaphore *sem, int subclass)
1604{
1605 might_sleep();
1606 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1607 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1608}
1609EXPORT_SYMBOL(down_read_nested);
1610
1611void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1612{
1613 might_sleep();
1614 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1615 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1616}
1617EXPORT_SYMBOL(_down_write_nest_lock);
1618
1619void down_read_non_owner(struct rw_semaphore *sem)
1620{
1621 might_sleep();
1622 __down_read(sem);
1623 __rwsem_set_reader_owned(sem, NULL);
1624}
1625EXPORT_SYMBOL(down_read_non_owner);
1626
1627void down_write_nested(struct rw_semaphore *sem, int subclass)
1628{
1629 might_sleep();
1630 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1631 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1632}
1633EXPORT_SYMBOL(down_write_nested);
1634
1635int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1636{
1637 might_sleep();
1638 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1639
1640 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1641 __down_write_killable)) {
1642 rwsem_release(&sem->dep_map, _RET_IP_);
1643 return -EINTR;
1644 }
1645
1646 return 0;
1647}
1648EXPORT_SYMBOL(down_write_killable_nested);
1649
1650void up_read_non_owner(struct rw_semaphore *sem)
1651{
1652 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1653 __up_read(sem);
1654}
1655EXPORT_SYMBOL(up_read_non_owner);
1656
1657#endif
1658