1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/atomic.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/module.h>
50#include <linux/preempt.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53#include <linux/zbud.h>
54#include <linux/zpool.h>
55
56
57
58
59
60
61
62
63
64
65
66
67
68#define NCHUNKS_ORDER 6
69
70#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
71#define CHUNK_SIZE (1 << CHUNK_SHIFT)
72#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
73#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93struct zbud_pool {
94 spinlock_t lock;
95 struct list_head unbuddied[NCHUNKS];
96 struct list_head buddied;
97 struct list_head lru;
98 u64 pages_nr;
99 struct zbud_ops *ops;
100};
101
102
103
104
105
106
107
108
109
110struct zbud_header {
111 struct list_head buddy;
112 struct list_head lru;
113 unsigned int first_chunks;
114 unsigned int last_chunks;
115 bool under_reclaim;
116};
117
118
119
120
121
122#ifdef CONFIG_ZPOOL
123
124static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
125{
126 return zpool_evict(pool, handle);
127}
128
129static struct zbud_ops zbud_zpool_ops = {
130 .evict = zbud_zpool_evict
131};
132
133static void *zbud_zpool_create(char *name, gfp_t gfp,
134 struct zpool_ops *zpool_ops)
135{
136 return zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
137}
138
139static void zbud_zpool_destroy(void *pool)
140{
141 zbud_destroy_pool(pool);
142}
143
144static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
145 unsigned long *handle)
146{
147 return zbud_alloc(pool, size, gfp, handle);
148}
149static void zbud_zpool_free(void *pool, unsigned long handle)
150{
151 zbud_free(pool, handle);
152}
153
154static int zbud_zpool_shrink(void *pool, unsigned int pages,
155 unsigned int *reclaimed)
156{
157 unsigned int total = 0;
158 int ret = -EINVAL;
159
160 while (total < pages) {
161 ret = zbud_reclaim_page(pool, 8);
162 if (ret < 0)
163 break;
164 total++;
165 }
166
167 if (reclaimed)
168 *reclaimed = total;
169
170 return ret;
171}
172
173static void *zbud_zpool_map(void *pool, unsigned long handle,
174 enum zpool_mapmode mm)
175{
176 return zbud_map(pool, handle);
177}
178static void zbud_zpool_unmap(void *pool, unsigned long handle)
179{
180 zbud_unmap(pool, handle);
181}
182
183static u64 zbud_zpool_total_size(void *pool)
184{
185 return zbud_get_pool_size(pool) * PAGE_SIZE;
186}
187
188static struct zpool_driver zbud_zpool_driver = {
189 .type = "zbud",
190 .owner = THIS_MODULE,
191 .create = zbud_zpool_create,
192 .destroy = zbud_zpool_destroy,
193 .malloc = zbud_zpool_malloc,
194 .free = zbud_zpool_free,
195 .shrink = zbud_zpool_shrink,
196 .map = zbud_zpool_map,
197 .unmap = zbud_zpool_unmap,
198 .total_size = zbud_zpool_total_size,
199};
200
201MODULE_ALIAS("zpool-zbud");
202#endif
203
204
205
206
207
208enum buddy {
209 FIRST,
210 LAST
211};
212
213
214static int size_to_chunks(size_t size)
215{
216 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
217}
218
219#define for_each_unbuddied_list(_iter, _begin) \
220 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
221
222
223static struct zbud_header *init_zbud_page(struct page *page)
224{
225 struct zbud_header *zhdr = page_address(page);
226 zhdr->first_chunks = 0;
227 zhdr->last_chunks = 0;
228 INIT_LIST_HEAD(&zhdr->buddy);
229 INIT_LIST_HEAD(&zhdr->lru);
230 zhdr->under_reclaim = 0;
231 return zhdr;
232}
233
234
235static void free_zbud_page(struct zbud_header *zhdr)
236{
237 __free_page(virt_to_page(zhdr));
238}
239
240
241
242
243
244static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
245{
246 unsigned long handle;
247
248
249
250
251
252
253
254 handle = (unsigned long)zhdr;
255 if (bud == FIRST)
256
257 handle += ZHDR_SIZE_ALIGNED;
258 else
259 handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
260 return handle;
261}
262
263
264static struct zbud_header *handle_to_zbud_header(unsigned long handle)
265{
266 return (struct zbud_header *)(handle & PAGE_MASK);
267}
268
269
270static int num_free_chunks(struct zbud_header *zhdr)
271{
272
273
274
275
276 return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
277}
278
279
280
281
282
283
284
285
286
287
288
289
290struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops)
291{
292 struct zbud_pool *pool;
293 int i;
294
295 pool = kmalloc(sizeof(struct zbud_pool), gfp);
296 if (!pool)
297 return NULL;
298 spin_lock_init(&pool->lock);
299 for_each_unbuddied_list(i, 0)
300 INIT_LIST_HEAD(&pool->unbuddied[i]);
301 INIT_LIST_HEAD(&pool->buddied);
302 INIT_LIST_HEAD(&pool->lru);
303 pool->pages_nr = 0;
304 pool->ops = ops;
305 return pool;
306}
307
308
309
310
311
312
313
314void zbud_destroy_pool(struct zbud_pool *pool)
315{
316 kfree(pool);
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
339 unsigned long *handle)
340{
341 int chunks, i, freechunks;
342 struct zbud_header *zhdr = NULL;
343 enum buddy bud;
344 struct page *page;
345
346 if (!size || (gfp & __GFP_HIGHMEM))
347 return -EINVAL;
348 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
349 return -ENOSPC;
350 chunks = size_to_chunks(size);
351 spin_lock(&pool->lock);
352
353
354 zhdr = NULL;
355 for_each_unbuddied_list(i, chunks) {
356 if (!list_empty(&pool->unbuddied[i])) {
357 zhdr = list_first_entry(&pool->unbuddied[i],
358 struct zbud_header, buddy);
359 list_del(&zhdr->buddy);
360 if (zhdr->first_chunks == 0)
361 bud = FIRST;
362 else
363 bud = LAST;
364 goto found;
365 }
366 }
367
368
369 spin_unlock(&pool->lock);
370 page = alloc_page(gfp);
371 if (!page)
372 return -ENOMEM;
373 spin_lock(&pool->lock);
374 pool->pages_nr++;
375 zhdr = init_zbud_page(page);
376 bud = FIRST;
377
378found:
379 if (bud == FIRST)
380 zhdr->first_chunks = chunks;
381 else
382 zhdr->last_chunks = chunks;
383
384 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
385
386 freechunks = num_free_chunks(zhdr);
387 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
388 } else {
389
390 list_add(&zhdr->buddy, &pool->buddied);
391 }
392
393
394 if (!list_empty(&zhdr->lru))
395 list_del(&zhdr->lru);
396 list_add(&zhdr->lru, &pool->lru);
397
398 *handle = encode_handle(zhdr, bud);
399 spin_unlock(&pool->lock);
400
401 return 0;
402}
403
404
405
406
407
408
409
410
411
412
413
414void zbud_free(struct zbud_pool *pool, unsigned long handle)
415{
416 struct zbud_header *zhdr;
417 int freechunks;
418
419 spin_lock(&pool->lock);
420 zhdr = handle_to_zbud_header(handle);
421
422
423 if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
424 zhdr->last_chunks = 0;
425 else
426 zhdr->first_chunks = 0;
427
428 if (zhdr->under_reclaim) {
429
430 spin_unlock(&pool->lock);
431 return;
432 }
433
434
435 list_del(&zhdr->buddy);
436
437 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
438
439 list_del(&zhdr->lru);
440 free_zbud_page(zhdr);
441 pool->pages_nr--;
442 } else {
443
444 freechunks = num_free_chunks(zhdr);
445 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
446 }
447
448 spin_unlock(&pool->lock);
449}
450
451#define list_tail_entry(ptr, type, member) \
452 list_entry((ptr)->prev, type, member)
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
490{
491 int i, ret, freechunks;
492 struct zbud_header *zhdr;
493 unsigned long first_handle = 0, last_handle = 0;
494
495 spin_lock(&pool->lock);
496 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
497 retries == 0) {
498 spin_unlock(&pool->lock);
499 return -EINVAL;
500 }
501 for (i = 0; i < retries; i++) {
502 zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
503 list_del(&zhdr->lru);
504 list_del(&zhdr->buddy);
505
506 zhdr->under_reclaim = true;
507
508
509
510
511 first_handle = 0;
512 last_handle = 0;
513 if (zhdr->first_chunks)
514 first_handle = encode_handle(zhdr, FIRST);
515 if (zhdr->last_chunks)
516 last_handle = encode_handle(zhdr, LAST);
517 spin_unlock(&pool->lock);
518
519
520 if (first_handle) {
521 ret = pool->ops->evict(pool, first_handle);
522 if (ret)
523 goto next;
524 }
525 if (last_handle) {
526 ret = pool->ops->evict(pool, last_handle);
527 if (ret)
528 goto next;
529 }
530next:
531 spin_lock(&pool->lock);
532 zhdr->under_reclaim = false;
533 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
534
535
536
537
538 free_zbud_page(zhdr);
539 pool->pages_nr--;
540 spin_unlock(&pool->lock);
541 return 0;
542 } else if (zhdr->first_chunks == 0 ||
543 zhdr->last_chunks == 0) {
544
545 freechunks = num_free_chunks(zhdr);
546 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
547 } else {
548
549 list_add(&zhdr->buddy, &pool->buddied);
550 }
551
552
553 list_add(&zhdr->lru, &pool->lru);
554 }
555 spin_unlock(&pool->lock);
556 return -EAGAIN;
557}
558
559
560
561
562
563
564
565
566
567
568
569
570
571void *zbud_map(struct zbud_pool *pool, unsigned long handle)
572{
573 return (void *)(handle);
574}
575
576
577
578
579
580
581void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
582{
583}
584
585
586
587
588
589
590
591
592u64 zbud_get_pool_size(struct zbud_pool *pool)
593{
594 return pool->pages_nr;
595}
596
597static int __init init_zbud(void)
598{
599
600 BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
601 pr_info("loaded\n");
602
603#ifdef CONFIG_ZPOOL
604 zpool_register_driver(&zbud_zpool_driver);
605#endif
606
607 return 0;
608}
609
610static void __exit exit_zbud(void)
611{
612#ifdef CONFIG_ZPOOL
613 zpool_unregister_driver(&zbud_zpool_driver);
614#endif
615
616 pr_info("unloaded\n");
617}
618
619module_init(init_zbud);
620module_exit(exit_zbud);
621
622MODULE_LICENSE("GPL");
623MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
624MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
625