1#include <linux/spinlock.h>
2#include <linux/slab.h>
3#include <linux/list.h>
4#include <linux/list_bl.h>
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/workqueue.h>
8#include <linux/mbcache.h>
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26struct mb_cache {
27
28 struct hlist_bl_head *c_hash;
29
30 int c_bucket_bits;
31
32 unsigned long c_max_entries;
33
34 spinlock_t c_list_lock;
35 struct list_head c_list;
36
37 unsigned long c_entry_count;
38 struct shrinker c_shrink;
39
40 struct work_struct c_shrink_work;
41};
42
43static struct kmem_cache *mb_entry_cache;
44
45static unsigned long mb_cache_shrink(struct mb_cache *cache,
46 unsigned long nr_to_scan);
47
48static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
49 u32 key)
50{
51 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
52}
53
54
55
56
57
58#define SYNC_SHRINK_BATCH 64
59
60
61
62
63
64
65
66
67
68
69
70
71
72int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
73 sector_t block, bool reusable)
74{
75 struct mb_cache_entry *entry, *dup;
76 struct hlist_bl_node *dup_node;
77 struct hlist_bl_head *head;
78
79
80 if (cache->c_entry_count >= cache->c_max_entries)
81 schedule_work(&cache->c_shrink_work);
82
83 if (cache->c_entry_count >= 2*cache->c_max_entries)
84 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
85
86 entry = kmem_cache_alloc(mb_entry_cache, mask);
87 if (!entry)
88 return -ENOMEM;
89
90 INIT_LIST_HEAD(&entry->e_list);
91
92 atomic_set(&entry->e_refcnt, 1);
93 entry->e_key = key;
94 entry->e_block = block;
95 entry->e_reusable = reusable;
96 head = mb_cache_entry_head(cache, key);
97 hlist_bl_lock(head);
98 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
99 if (dup->e_key == key && dup->e_block == block) {
100 hlist_bl_unlock(head);
101 kmem_cache_free(mb_entry_cache, entry);
102 return -EBUSY;
103 }
104 }
105 hlist_bl_add_head(&entry->e_hash_list, head);
106 hlist_bl_unlock(head);
107
108 spin_lock(&cache->c_list_lock);
109 list_add_tail(&entry->e_list, &cache->c_list);
110
111 atomic_inc(&entry->e_refcnt);
112 cache->c_entry_count++;
113 spin_unlock(&cache->c_list_lock);
114
115 return 0;
116}
117EXPORT_SYMBOL(mb_cache_entry_create);
118
119void __mb_cache_entry_free(struct mb_cache_entry *entry)
120{
121 kmem_cache_free(mb_entry_cache, entry);
122}
123EXPORT_SYMBOL(__mb_cache_entry_free);
124
125static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
126 struct mb_cache_entry *entry,
127 u32 key)
128{
129 struct mb_cache_entry *old_entry = entry;
130 struct hlist_bl_node *node;
131 struct hlist_bl_head *head;
132
133 head = mb_cache_entry_head(cache, key);
134 hlist_bl_lock(head);
135 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
136 node = entry->e_hash_list.next;
137 else
138 node = hlist_bl_first(head);
139 while (node) {
140 entry = hlist_bl_entry(node, struct mb_cache_entry,
141 e_hash_list);
142 if (entry->e_key == key && entry->e_reusable) {
143 atomic_inc(&entry->e_refcnt);
144 goto out;
145 }
146 node = node->next;
147 }
148 entry = NULL;
149out:
150 hlist_bl_unlock(head);
151 if (old_entry)
152 mb_cache_entry_put(cache, old_entry);
153
154 return entry;
155}
156
157
158
159
160
161
162
163
164
165struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
166 u32 key)
167{
168 return __entry_find(cache, NULL, key);
169}
170EXPORT_SYMBOL(mb_cache_entry_find_first);
171
172
173
174
175
176
177
178
179
180
181
182struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
183 struct mb_cache_entry *entry)
184{
185 return __entry_find(cache, entry, entry->e_key);
186}
187EXPORT_SYMBOL(mb_cache_entry_find_next);
188
189
190
191
192
193
194
195struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
196 sector_t block)
197{
198 struct hlist_bl_node *node;
199 struct hlist_bl_head *head;
200 struct mb_cache_entry *entry;
201
202 head = mb_cache_entry_head(cache, key);
203 hlist_bl_lock(head);
204 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
205 if (entry->e_key == key && entry->e_block == block) {
206 atomic_inc(&entry->e_refcnt);
207 goto out;
208 }
209 }
210 entry = NULL;
211out:
212 hlist_bl_unlock(head);
213 return entry;
214}
215EXPORT_SYMBOL(mb_cache_entry_get);
216
217
218
219
220
221
222
223
224void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
225 sector_t block)
226{
227 struct hlist_bl_node *node;
228 struct hlist_bl_head *head;
229 struct mb_cache_entry *entry;
230
231 head = mb_cache_entry_head(cache, key);
232 hlist_bl_lock(head);
233 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
234 if (entry->e_key == key && entry->e_block == block) {
235
236 hlist_bl_del_init(&entry->e_hash_list);
237 hlist_bl_unlock(head);
238 spin_lock(&cache->c_list_lock);
239 if (!list_empty(&entry->e_list)) {
240 list_del_init(&entry->e_list);
241 cache->c_entry_count--;
242 atomic_dec(&entry->e_refcnt);
243 }
244 spin_unlock(&cache->c_list_lock);
245 mb_cache_entry_put(cache, entry);
246 return;
247 }
248 }
249 hlist_bl_unlock(head);
250}
251EXPORT_SYMBOL(mb_cache_entry_delete_block);
252
253
254
255
256
257
258
259void mb_cache_entry_touch(struct mb_cache *cache,
260 struct mb_cache_entry *entry)
261{
262 entry->e_referenced = 1;
263}
264EXPORT_SYMBOL(mb_cache_entry_touch);
265
266static unsigned long mb_cache_count(struct shrinker *shrink,
267 struct shrink_control *sc)
268{
269 struct mb_cache *cache = container_of(shrink, struct mb_cache,
270 c_shrink);
271
272 return cache->c_entry_count;
273}
274
275
276static unsigned long mb_cache_shrink(struct mb_cache *cache,
277 unsigned long nr_to_scan)
278{
279 struct mb_cache_entry *entry;
280 struct hlist_bl_head *head;
281 unsigned long shrunk = 0;
282
283 spin_lock(&cache->c_list_lock);
284 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
285 entry = list_first_entry(&cache->c_list,
286 struct mb_cache_entry, e_list);
287 if (entry->e_referenced) {
288 entry->e_referenced = 0;
289 list_move_tail(&entry->e_list, &cache->c_list);
290 continue;
291 }
292 list_del_init(&entry->e_list);
293 cache->c_entry_count--;
294
295
296
297
298 spin_unlock(&cache->c_list_lock);
299 head = mb_cache_entry_head(cache, entry->e_key);
300 hlist_bl_lock(head);
301 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
302 hlist_bl_del_init(&entry->e_hash_list);
303 atomic_dec(&entry->e_refcnt);
304 }
305 hlist_bl_unlock(head);
306 if (mb_cache_entry_put(cache, entry))
307 shrunk++;
308 cond_resched();
309 spin_lock(&cache->c_list_lock);
310 }
311 spin_unlock(&cache->c_list_lock);
312
313 return shrunk;
314}
315
316static unsigned long mb_cache_scan(struct shrinker *shrink,
317 struct shrink_control *sc)
318{
319 struct mb_cache *cache = container_of(shrink, struct mb_cache,
320 c_shrink);
321 return mb_cache_shrink(cache, sc->nr_to_scan);
322}
323
324
325#define SHRINK_DIVISOR 16
326
327static void mb_cache_shrink_worker(struct work_struct *work)
328{
329 struct mb_cache *cache = container_of(work, struct mb_cache,
330 c_shrink_work);
331 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
332}
333
334
335
336
337
338
339
340struct mb_cache *mb_cache_create(int bucket_bits)
341{
342 struct mb_cache *cache;
343 unsigned long bucket_count = 1UL << bucket_bits;
344 unsigned long i;
345
346 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
347 if (!cache)
348 goto err_out;
349 cache->c_bucket_bits = bucket_bits;
350 cache->c_max_entries = bucket_count << 4;
351 INIT_LIST_HEAD(&cache->c_list);
352 spin_lock_init(&cache->c_list_lock);
353 cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
354 GFP_KERNEL);
355 if (!cache->c_hash) {
356 kfree(cache);
357 goto err_out;
358 }
359 for (i = 0; i < bucket_count; i++)
360 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
361
362 cache->c_shrink.count_objects = mb_cache_count;
363 cache->c_shrink.scan_objects = mb_cache_scan;
364 cache->c_shrink.seeks = DEFAULT_SEEKS;
365 if (register_shrinker(&cache->c_shrink)) {
366 kfree(cache->c_hash);
367 kfree(cache);
368 goto err_out;
369 }
370
371 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
372
373 return cache;
374
375err_out:
376 return NULL;
377}
378EXPORT_SYMBOL(mb_cache_create);
379
380
381
382
383
384
385
386
387void mb_cache_destroy(struct mb_cache *cache)
388{
389 struct mb_cache_entry *entry, *next;
390
391 unregister_shrinker(&cache->c_shrink);
392
393
394
395
396
397 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
398 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
399 hlist_bl_del_init(&entry->e_hash_list);
400 atomic_dec(&entry->e_refcnt);
401 } else
402 WARN_ON(1);
403 list_del(&entry->e_list);
404 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
405 mb_cache_entry_put(cache, entry);
406 }
407 kfree(cache->c_hash);
408 kfree(cache);
409}
410EXPORT_SYMBOL(mb_cache_destroy);
411
412static int __init mbcache_init(void)
413{
414 mb_entry_cache = kmem_cache_create("mbcache",
415 sizeof(struct mb_cache_entry), 0,
416 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
417 if (!mb_entry_cache)
418 return -ENOMEM;
419 return 0;
420}
421
422static void __exit mbcache_exit(void)
423{
424 kmem_cache_destroy(mb_entry_cache);
425}
426
427module_init(mbcache_init)
428module_exit(mbcache_exit)
429
430MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
431MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
432MODULE_LICENSE("GPL");
433