1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/kernel.h>
35#include <linux/gfp.h>
36#include <linux/slab.h>
37#include <linux/radix-tree.h>
38#include <linux/rcupdate.h>
39#include <stdlib.h>
40#include <pthread.h>
41#include <stdio.h>
42#include <assert.h>
43
44#include "regression.h"
45
46static RADIX_TREE(mt_tree, GFP_KERNEL);
47static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER;
48
49struct page {
50 pthread_mutex_t lock;
51 struct rcu_head rcu;
52 int count;
53 unsigned long index;
54};
55
56static struct page *page_alloc(void)
57{
58 struct page *p;
59 p = malloc(sizeof(struct page));
60 p->count = 1;
61 p->index = 1;
62 pthread_mutex_init(&p->lock, NULL);
63
64 return p;
65}
66
67static void page_rcu_free(struct rcu_head *rcu)
68{
69 struct page *p = container_of(rcu, struct page, rcu);
70 assert(!p->count);
71 pthread_mutex_destroy(&p->lock);
72 free(p);
73}
74
75static void page_free(struct page *p)
76{
77 call_rcu(&p->rcu, page_rcu_free);
78}
79
80static unsigned find_get_pages(unsigned long start,
81 unsigned int nr_pages, struct page **pages)
82{
83 unsigned int i;
84 unsigned int ret;
85 unsigned int nr_found;
86
87 rcu_read_lock();
88restart:
89 nr_found = radix_tree_gang_lookup_slot(&mt_tree,
90 (void ***)pages, NULL, start, nr_pages);
91 ret = 0;
92 for (i = 0; i < nr_found; i++) {
93 struct page *page;
94repeat:
95 page = radix_tree_deref_slot((void **)pages[i]);
96 if (unlikely(!page))
97 continue;
98
99 if (radix_tree_exception(page)) {
100 if (radix_tree_deref_retry(page)) {
101
102
103
104
105
106 assert((start | i) == 0);
107 goto restart;
108 }
109
110
111
112 assert(0);
113 }
114
115 pthread_mutex_lock(&page->lock);
116 if (!page->count) {
117 pthread_mutex_unlock(&page->lock);
118 goto repeat;
119 }
120
121 pthread_mutex_unlock(&page->lock);
122
123
124 if (unlikely(page != *((void **)pages[i]))) {
125 goto repeat;
126 }
127
128 pages[ret] = page;
129 ret++;
130 }
131 rcu_read_unlock();
132 return ret;
133}
134
135static pthread_barrier_t worker_barrier;
136
137static void *regression1_fn(void *arg)
138{
139 rcu_register_thread();
140
141 if (pthread_barrier_wait(&worker_barrier) ==
142 PTHREAD_BARRIER_SERIAL_THREAD) {
143 int j;
144
145 for (j = 0; j < 1000000; j++) {
146 struct page *p;
147
148 p = page_alloc();
149 pthread_mutex_lock(&mt_lock);
150 radix_tree_insert(&mt_tree, 0, p);
151 pthread_mutex_unlock(&mt_lock);
152
153 p = page_alloc();
154 pthread_mutex_lock(&mt_lock);
155 radix_tree_insert(&mt_tree, 1, p);
156 pthread_mutex_unlock(&mt_lock);
157
158 pthread_mutex_lock(&mt_lock);
159 p = radix_tree_delete(&mt_tree, 1);
160 pthread_mutex_lock(&p->lock);
161 p->count--;
162 pthread_mutex_unlock(&p->lock);
163 pthread_mutex_unlock(&mt_lock);
164 page_free(p);
165
166 pthread_mutex_lock(&mt_lock);
167 p = radix_tree_delete(&mt_tree, 0);
168 pthread_mutex_lock(&p->lock);
169 p->count--;
170 pthread_mutex_unlock(&p->lock);
171 pthread_mutex_unlock(&mt_lock);
172 page_free(p);
173 }
174 } else {
175 int j;
176
177 for (j = 0; j < 100000000; j++) {
178 struct page *pages[10];
179
180 find_get_pages(0, 10, pages);
181 }
182 }
183
184 rcu_unregister_thread();
185
186 return NULL;
187}
188
189static pthread_t *threads;
190void regression1_test(void)
191{
192 int nr_threads;
193 int i;
194 long arg;
195
196
197 printv(1, "running regression test 1, should finish in under a minute\n");
198 nr_threads = 2;
199 pthread_barrier_init(&worker_barrier, NULL, nr_threads);
200
201 threads = malloc(nr_threads * sizeof(pthread_t *));
202
203 for (i = 0; i < nr_threads; i++) {
204 arg = i;
205 if (pthread_create(&threads[i], NULL, regression1_fn, (void *)arg)) {
206 perror("pthread_create");
207 exit(1);
208 }
209 }
210
211 for (i = 0; i < nr_threads; i++) {
212 if (pthread_join(threads[i], NULL)) {
213 perror("pthread_join");
214 exit(1);
215 }
216 }
217
218 free(threads);
219
220 printv(1, "regression test 1, done\n");
221}
222