1#include <linux/spinlock.h>
2#include <linux/slab.h>
3#include <linux/list.h>
4#include <linux/list_bl.h>
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/workqueue.h>
8#include <linux/mbcache.h>
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27struct mb_cache {
28
29 struct hlist_bl_head *c_hash;
30
31 int c_bucket_bits;
32
33 unsigned long c_max_entries;
34
35 spinlock_t c_list_lock;
36 struct list_head c_list;
37
38 unsigned long c_entry_count;
39 struct shrinker c_shrink;
40
41 struct work_struct c_shrink_work;
42};
43
44static struct kmem_cache *mb_entry_cache;
45
46static unsigned long mb_cache_shrink(struct mb_cache *cache,
47 unsigned long nr_to_scan);
48
49static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
50 u32 key)
51{
52 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
53}
54
55
56
57
58
59#define SYNC_SHRINK_BATCH 64
60
61
62
63
64
65
66
67
68
69
70
71
72
73int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
74 u64 value, bool reusable)
75{
76 struct mb_cache_entry *entry, *dup;
77 struct hlist_bl_node *dup_node;
78 struct hlist_bl_head *head;
79
80
81 if (cache->c_entry_count >= cache->c_max_entries)
82 schedule_work(&cache->c_shrink_work);
83
84 if (cache->c_entry_count >= 2*cache->c_max_entries)
85 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
86
87 entry = kmem_cache_alloc(mb_entry_cache, mask);
88 if (!entry)
89 return -ENOMEM;
90
91 INIT_LIST_HEAD(&entry->e_list);
92
93 atomic_set(&entry->e_refcnt, 1);
94 entry->e_key = key;
95 entry->e_value = value;
96 entry->e_reusable = reusable;
97 entry->e_referenced = 0;
98 head = mb_cache_entry_head(cache, key);
99 hlist_bl_lock(head);
100 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
101 if (dup->e_key == key && dup->e_value == value) {
102 hlist_bl_unlock(head);
103 kmem_cache_free(mb_entry_cache, entry);
104 return -EBUSY;
105 }
106 }
107 hlist_bl_add_head(&entry->e_hash_list, head);
108 hlist_bl_unlock(head);
109
110 spin_lock(&cache->c_list_lock);
111 list_add_tail(&entry->e_list, &cache->c_list);
112
113 atomic_inc(&entry->e_refcnt);
114 cache->c_entry_count++;
115 spin_unlock(&cache->c_list_lock);
116
117 return 0;
118}
119EXPORT_SYMBOL(mb_cache_entry_create);
120
121void __mb_cache_entry_free(struct mb_cache_entry *entry)
122{
123 kmem_cache_free(mb_entry_cache, entry);
124}
125EXPORT_SYMBOL(__mb_cache_entry_free);
126
127static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
128 struct mb_cache_entry *entry,
129 u32 key)
130{
131 struct mb_cache_entry *old_entry = entry;
132 struct hlist_bl_node *node;
133 struct hlist_bl_head *head;
134
135 head = mb_cache_entry_head(cache, key);
136 hlist_bl_lock(head);
137 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
138 node = entry->e_hash_list.next;
139 else
140 node = hlist_bl_first(head);
141 while (node) {
142 entry = hlist_bl_entry(node, struct mb_cache_entry,
143 e_hash_list);
144 if (entry->e_key == key && entry->e_reusable) {
145 atomic_inc(&entry->e_refcnt);
146 goto out;
147 }
148 node = node->next;
149 }
150 entry = NULL;
151out:
152 hlist_bl_unlock(head);
153 if (old_entry)
154 mb_cache_entry_put(cache, old_entry);
155
156 return entry;
157}
158
159
160
161
162
163
164
165
166
167struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
168 u32 key)
169{
170 return __entry_find(cache, NULL, key);
171}
172EXPORT_SYMBOL(mb_cache_entry_find_first);
173
174
175
176
177
178
179
180
181
182
183
184struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
185 struct mb_cache_entry *entry)
186{
187 return __entry_find(cache, entry, entry->e_key);
188}
189EXPORT_SYMBOL(mb_cache_entry_find_next);
190
191
192
193
194
195
196
197struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
198 u64 value)
199{
200 struct hlist_bl_node *node;
201 struct hlist_bl_head *head;
202 struct mb_cache_entry *entry;
203
204 head = mb_cache_entry_head(cache, key);
205 hlist_bl_lock(head);
206 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
207 if (entry->e_key == key && entry->e_value == value) {
208 atomic_inc(&entry->e_refcnt);
209 goto out;
210 }
211 }
212 entry = NULL;
213out:
214 hlist_bl_unlock(head);
215 return entry;
216}
217EXPORT_SYMBOL(mb_cache_entry_get);
218
219
220
221
222
223
224
225
226void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
227{
228 struct hlist_bl_node *node;
229 struct hlist_bl_head *head;
230 struct mb_cache_entry *entry;
231
232 head = mb_cache_entry_head(cache, key);
233 hlist_bl_lock(head);
234 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
235 if (entry->e_key == key && entry->e_value == value) {
236
237 hlist_bl_del_init(&entry->e_hash_list);
238 hlist_bl_unlock(head);
239 spin_lock(&cache->c_list_lock);
240 if (!list_empty(&entry->e_list)) {
241 list_del_init(&entry->e_list);
242 if (!WARN_ONCE(cache->c_entry_count == 0,
243 "mbcache: attempt to decrement c_entry_count past zero"))
244 cache->c_entry_count--;
245 atomic_dec(&entry->e_refcnt);
246 }
247 spin_unlock(&cache->c_list_lock);
248 mb_cache_entry_put(cache, entry);
249 return;
250 }
251 }
252 hlist_bl_unlock(head);
253}
254EXPORT_SYMBOL(mb_cache_entry_delete);
255
256
257
258
259
260
261
262void mb_cache_entry_touch(struct mb_cache *cache,
263 struct mb_cache_entry *entry)
264{
265 entry->e_referenced = 1;
266}
267EXPORT_SYMBOL(mb_cache_entry_touch);
268
269static unsigned long mb_cache_count(struct shrinker *shrink,
270 struct shrink_control *sc)
271{
272 struct mb_cache *cache = container_of(shrink, struct mb_cache,
273 c_shrink);
274
275 return cache->c_entry_count;
276}
277
278
279static unsigned long mb_cache_shrink(struct mb_cache *cache,
280 unsigned long nr_to_scan)
281{
282 struct mb_cache_entry *entry;
283 struct hlist_bl_head *head;
284 unsigned long shrunk = 0;
285
286 spin_lock(&cache->c_list_lock);
287 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
288 entry = list_first_entry(&cache->c_list,
289 struct mb_cache_entry, e_list);
290 if (entry->e_referenced) {
291 entry->e_referenced = 0;
292 list_move_tail(&entry->e_list, &cache->c_list);
293 continue;
294 }
295 list_del_init(&entry->e_list);
296 cache->c_entry_count--;
297
298
299
300
301 spin_unlock(&cache->c_list_lock);
302 head = mb_cache_entry_head(cache, entry->e_key);
303 hlist_bl_lock(head);
304 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
305 hlist_bl_del_init(&entry->e_hash_list);
306 atomic_dec(&entry->e_refcnt);
307 }
308 hlist_bl_unlock(head);
309 if (mb_cache_entry_put(cache, entry))
310 shrunk++;
311 cond_resched();
312 spin_lock(&cache->c_list_lock);
313 }
314 spin_unlock(&cache->c_list_lock);
315
316 return shrunk;
317}
318
319static unsigned long mb_cache_scan(struct shrinker *shrink,
320 struct shrink_control *sc)
321{
322 struct mb_cache *cache = container_of(shrink, struct mb_cache,
323 c_shrink);
324 return mb_cache_shrink(cache, sc->nr_to_scan);
325}
326
327
328#define SHRINK_DIVISOR 16
329
330static void mb_cache_shrink_worker(struct work_struct *work)
331{
332 struct mb_cache *cache = container_of(work, struct mb_cache,
333 c_shrink_work);
334 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
335}
336
337
338
339
340
341
342
343struct mb_cache *mb_cache_create(int bucket_bits)
344{
345 struct mb_cache *cache;
346 unsigned long bucket_count = 1UL << bucket_bits;
347 unsigned long i;
348
349 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
350 if (!cache)
351 goto err_out;
352 cache->c_bucket_bits = bucket_bits;
353 cache->c_max_entries = bucket_count << 4;
354 INIT_LIST_HEAD(&cache->c_list);
355 spin_lock_init(&cache->c_list_lock);
356 cache->c_hash = kmalloc_array(bucket_count,
357 sizeof(struct hlist_bl_head),
358 GFP_KERNEL);
359 if (!cache->c_hash) {
360 kfree(cache);
361 goto err_out;
362 }
363 for (i = 0; i < bucket_count; i++)
364 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
365
366 cache->c_shrink.count_objects = mb_cache_count;
367 cache->c_shrink.scan_objects = mb_cache_scan;
368 cache->c_shrink.seeks = DEFAULT_SEEKS;
369 if (register_shrinker(&cache->c_shrink)) {
370 kfree(cache->c_hash);
371 kfree(cache);
372 goto err_out;
373 }
374
375 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
376
377 return cache;
378
379err_out:
380 return NULL;
381}
382EXPORT_SYMBOL(mb_cache_create);
383
384
385
386
387
388
389
390
391void mb_cache_destroy(struct mb_cache *cache)
392{
393 struct mb_cache_entry *entry, *next;
394
395 unregister_shrinker(&cache->c_shrink);
396
397
398
399
400
401 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
402 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
403 hlist_bl_del_init(&entry->e_hash_list);
404 atomic_dec(&entry->e_refcnt);
405 } else
406 WARN_ON(1);
407 list_del(&entry->e_list);
408 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
409 mb_cache_entry_put(cache, entry);
410 }
411 kfree(cache->c_hash);
412 kfree(cache);
413}
414EXPORT_SYMBOL(mb_cache_destroy);
415
416static int __init mbcache_init(void)
417{
418 mb_entry_cache = kmem_cache_create("mbcache",
419 sizeof(struct mb_cache_entry), 0,
420 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
421 if (!mb_entry_cache)
422 return -ENOMEM;
423 return 0;
424}
425
426static void __exit mbcache_exit(void)
427{
428 kmem_cache_destroy(mb_entry_cache);
429}
430
431module_init(mbcache_init)
432module_exit(mbcache_exit)
433
434MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
435MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
436MODULE_LICENSE("GPL");
437