1#include <linux/atomic.h>
2#include <linux/rwsem.h>
3#include <linux/percpu.h>
4#include <linux/wait.h>
5#include <linux/lockdep.h>
6#include <linux/percpu-rwsem.h>
7#include <linux/rcupdate.h>
8#include <linux/sched.h>
9#include <linux/errno.h>
10
11int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
12 const char *name, struct lock_class_key *rwsem_key)
13{
14 brw->fast_read_ctr = alloc_percpu(int);
15 if (unlikely(!brw->fast_read_ctr))
16 return -ENOMEM;
17
18
19 __init_rwsem(&brw->rw_sem, name, rwsem_key);
20 rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
21 atomic_set(&brw->slow_read_ctr, 0);
22 init_waitqueue_head(&brw->write_waitq);
23 return 0;
24}
25EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
26
27void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
28{
29
30
31
32
33 if (!brw->fast_read_ctr)
34 return;
35
36 rcu_sync_dtor(&brw->rss);
37 free_percpu(brw->fast_read_ctr);
38 brw->fast_read_ctr = NULL;
39}
40EXPORT_SYMBOL_GPL(percpu_free_rwsem);
41
42
43
44
45
46
47
48
49
50static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
51{
52 bool success;
53
54 preempt_disable();
55 success = rcu_sync_is_idle(&brw->rss);
56 if (likely(success))
57 __this_cpu_add(*brw->fast_read_ctr, val);
58 preempt_enable();
59
60 return success;
61}
62
63
64
65
66
67
68
69
70
71void percpu_down_read(struct percpu_rw_semaphore *brw)
72{
73 might_sleep();
74 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
75
76 if (likely(update_fast_ctr(brw, +1)))
77 return;
78
79
80 __down_read(&brw->rw_sem);
81 atomic_inc(&brw->slow_read_ctr);
82 __up_read(&brw->rw_sem);
83}
84EXPORT_SYMBOL_GPL(percpu_down_read);
85
86int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
87{
88 if (unlikely(!update_fast_ctr(brw, +1))) {
89 if (!__down_read_trylock(&brw->rw_sem))
90 return 0;
91 atomic_inc(&brw->slow_read_ctr);
92 __up_read(&brw->rw_sem);
93 }
94
95 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
96 return 1;
97}
98
99void percpu_up_read(struct percpu_rw_semaphore *brw)
100{
101 rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
102
103 if (likely(update_fast_ctr(brw, -1)))
104 return;
105
106
107 if (atomic_dec_and_test(&brw->slow_read_ctr))
108 wake_up_all(&brw->write_waitq);
109}
110EXPORT_SYMBOL_GPL(percpu_up_read);
111
112static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
113{
114 unsigned int sum = 0;
115 int cpu;
116
117 for_each_possible_cpu(cpu) {
118 sum += per_cpu(*brw->fast_read_ctr, cpu);
119 per_cpu(*brw->fast_read_ctr, cpu) = 0;
120 }
121
122 return sum;
123}
124
125void percpu_down_write(struct percpu_rw_semaphore *brw)
126{
127
128
129
130
131
132
133
134
135 rcu_sync_enter(&brw->rss);
136
137
138 down_write(&brw->rw_sem);
139
140
141 atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
142
143
144 wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
145}
146EXPORT_SYMBOL_GPL(percpu_down_write);
147
148void percpu_up_write(struct percpu_rw_semaphore *brw)
149{
150
151 up_write(&brw->rw_sem);
152
153
154
155
156
157 rcu_sync_exit(&brw->rss);
158}
159EXPORT_SYMBOL_GPL(percpu_up_write);
160