1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/atomic.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/module.h>
50#include <linux/preempt.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53#include <linux/zbud.h>
54#include <linux/zpool.h>
55
56
57
58
59
60
61
62
63
64
65
66
67
68#define NCHUNKS_ORDER 6
69
70#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
71#define CHUNK_SIZE (1 << CHUNK_SHIFT)
72#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
73#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93struct zbud_pool {
94 spinlock_t lock;
95 struct list_head unbuddied[NCHUNKS];
96 struct list_head buddied;
97 struct list_head lru;
98 u64 pages_nr;
99 const struct zbud_ops *ops;
100#ifdef CONFIG_ZPOOL
101 struct zpool *zpool;
102 const struct zpool_ops *zpool_ops;
103#endif
104};
105
106
107
108
109
110
111
112
113
114struct zbud_header {
115 struct list_head buddy;
116 struct list_head lru;
117 unsigned int first_chunks;
118 unsigned int last_chunks;
119 bool under_reclaim;
120};
121
122
123
124
125
126#ifdef CONFIG_ZPOOL
127
128static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
129{
130 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
131 return pool->zpool_ops->evict(pool->zpool, handle);
132 else
133 return -ENOENT;
134}
135
136static const struct zbud_ops zbud_zpool_ops = {
137 .evict = zbud_zpool_evict
138};
139
140static void *zbud_zpool_create(char *name, gfp_t gfp,
141 const struct zpool_ops *zpool_ops,
142 struct zpool *zpool)
143{
144 struct zbud_pool *pool;
145
146 pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
147 if (pool) {
148 pool->zpool = zpool;
149 pool->zpool_ops = zpool_ops;
150 }
151 return pool;
152}
153
154static void zbud_zpool_destroy(void *pool)
155{
156 zbud_destroy_pool(pool);
157}
158
159static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
160 unsigned long *handle)
161{
162 return zbud_alloc(pool, size, gfp, handle);
163}
164static void zbud_zpool_free(void *pool, unsigned long handle)
165{
166 zbud_free(pool, handle);
167}
168
169static int zbud_zpool_shrink(void *pool, unsigned int pages,
170 unsigned int *reclaimed)
171{
172 unsigned int total = 0;
173 int ret = -EINVAL;
174
175 while (total < pages) {
176 ret = zbud_reclaim_page(pool, 8);
177 if (ret < 0)
178 break;
179 total++;
180 }
181
182 if (reclaimed)
183 *reclaimed = total;
184
185 return ret;
186}
187
188static void *zbud_zpool_map(void *pool, unsigned long handle,
189 enum zpool_mapmode mm)
190{
191 return zbud_map(pool, handle);
192}
193static void zbud_zpool_unmap(void *pool, unsigned long handle)
194{
195 zbud_unmap(pool, handle);
196}
197
198static u64 zbud_zpool_total_size(void *pool)
199{
200 return zbud_get_pool_size(pool) * PAGE_SIZE;
201}
202
203static struct zpool_driver zbud_zpool_driver = {
204 .type = "zbud",
205 .owner = THIS_MODULE,
206 .create = zbud_zpool_create,
207 .destroy = zbud_zpool_destroy,
208 .malloc = zbud_zpool_malloc,
209 .free = zbud_zpool_free,
210 .shrink = zbud_zpool_shrink,
211 .map = zbud_zpool_map,
212 .unmap = zbud_zpool_unmap,
213 .total_size = zbud_zpool_total_size,
214};
215
216MODULE_ALIAS("zpool-zbud");
217#endif
218
219
220
221
222
223enum buddy {
224 FIRST,
225 LAST
226};
227
228
229static int size_to_chunks(size_t size)
230{
231 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
232}
233
234#define for_each_unbuddied_list(_iter, _begin) \
235 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
236
237
238static struct zbud_header *init_zbud_page(struct page *page)
239{
240 struct zbud_header *zhdr = page_address(page);
241 zhdr->first_chunks = 0;
242 zhdr->last_chunks = 0;
243 INIT_LIST_HEAD(&zhdr->buddy);
244 INIT_LIST_HEAD(&zhdr->lru);
245 zhdr->under_reclaim = 0;
246 return zhdr;
247}
248
249
250static void free_zbud_page(struct zbud_header *zhdr)
251{
252 __free_page(virt_to_page(zhdr));
253}
254
255
256
257
258
259static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
260{
261 unsigned long handle;
262
263
264
265
266
267
268
269 handle = (unsigned long)zhdr;
270 if (bud == FIRST)
271
272 handle += ZHDR_SIZE_ALIGNED;
273 else
274 handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
275 return handle;
276}
277
278
279static struct zbud_header *handle_to_zbud_header(unsigned long handle)
280{
281 return (struct zbud_header *)(handle & PAGE_MASK);
282}
283
284
285static int num_free_chunks(struct zbud_header *zhdr)
286{
287
288
289
290
291 return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
306{
307 struct zbud_pool *pool;
308 int i;
309
310 pool = kzalloc(sizeof(struct zbud_pool), gfp);
311 if (!pool)
312 return NULL;
313 spin_lock_init(&pool->lock);
314 for_each_unbuddied_list(i, 0)
315 INIT_LIST_HEAD(&pool->unbuddied[i]);
316 INIT_LIST_HEAD(&pool->buddied);
317 INIT_LIST_HEAD(&pool->lru);
318 pool->pages_nr = 0;
319 pool->ops = ops;
320 return pool;
321}
322
323
324
325
326
327
328
329void zbud_destroy_pool(struct zbud_pool *pool)
330{
331 kfree(pool);
332}
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
354 unsigned long *handle)
355{
356 int chunks, i, freechunks;
357 struct zbud_header *zhdr = NULL;
358 enum buddy bud;
359 struct page *page;
360
361 if (!size || (gfp & __GFP_HIGHMEM))
362 return -EINVAL;
363 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
364 return -ENOSPC;
365 chunks = size_to_chunks(size);
366 spin_lock(&pool->lock);
367
368
369 zhdr = NULL;
370 for_each_unbuddied_list(i, chunks) {
371 if (!list_empty(&pool->unbuddied[i])) {
372 zhdr = list_first_entry(&pool->unbuddied[i],
373 struct zbud_header, buddy);
374 list_del(&zhdr->buddy);
375 if (zhdr->first_chunks == 0)
376 bud = FIRST;
377 else
378 bud = LAST;
379 goto found;
380 }
381 }
382
383
384 spin_unlock(&pool->lock);
385 page = alloc_page(gfp);
386 if (!page)
387 return -ENOMEM;
388 spin_lock(&pool->lock);
389 pool->pages_nr++;
390 zhdr = init_zbud_page(page);
391 bud = FIRST;
392
393found:
394 if (bud == FIRST)
395 zhdr->first_chunks = chunks;
396 else
397 zhdr->last_chunks = chunks;
398
399 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
400
401 freechunks = num_free_chunks(zhdr);
402 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
403 } else {
404
405 list_add(&zhdr->buddy, &pool->buddied);
406 }
407
408
409 if (!list_empty(&zhdr->lru))
410 list_del(&zhdr->lru);
411 list_add(&zhdr->lru, &pool->lru);
412
413 *handle = encode_handle(zhdr, bud);
414 spin_unlock(&pool->lock);
415
416 return 0;
417}
418
419
420
421
422
423
424
425
426
427
428
429void zbud_free(struct zbud_pool *pool, unsigned long handle)
430{
431 struct zbud_header *zhdr;
432 int freechunks;
433
434 spin_lock(&pool->lock);
435 zhdr = handle_to_zbud_header(handle);
436
437
438 if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
439 zhdr->last_chunks = 0;
440 else
441 zhdr->first_chunks = 0;
442
443 if (zhdr->under_reclaim) {
444
445 spin_unlock(&pool->lock);
446 return;
447 }
448
449
450 list_del(&zhdr->buddy);
451
452 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
453
454 list_del(&zhdr->lru);
455 free_zbud_page(zhdr);
456 pool->pages_nr--;
457 } else {
458
459 freechunks = num_free_chunks(zhdr);
460 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
461 }
462
463 spin_unlock(&pool->lock);
464}
465
466#define list_tail_entry(ptr, type, member) \
467 list_entry((ptr)->prev, type, member)
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
505{
506 int i, ret, freechunks;
507 struct zbud_header *zhdr;
508 unsigned long first_handle = 0, last_handle = 0;
509
510 spin_lock(&pool->lock);
511 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
512 retries == 0) {
513 spin_unlock(&pool->lock);
514 return -EINVAL;
515 }
516 for (i = 0; i < retries; i++) {
517 zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
518 list_del(&zhdr->lru);
519 list_del(&zhdr->buddy);
520
521 zhdr->under_reclaim = true;
522
523
524
525
526 first_handle = 0;
527 last_handle = 0;
528 if (zhdr->first_chunks)
529 first_handle = encode_handle(zhdr, FIRST);
530 if (zhdr->last_chunks)
531 last_handle = encode_handle(zhdr, LAST);
532 spin_unlock(&pool->lock);
533
534
535 if (first_handle) {
536 ret = pool->ops->evict(pool, first_handle);
537 if (ret)
538 goto next;
539 }
540 if (last_handle) {
541 ret = pool->ops->evict(pool, last_handle);
542 if (ret)
543 goto next;
544 }
545next:
546 spin_lock(&pool->lock);
547 zhdr->under_reclaim = false;
548 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
549
550
551
552
553 free_zbud_page(zhdr);
554 pool->pages_nr--;
555 spin_unlock(&pool->lock);
556 return 0;
557 } else if (zhdr->first_chunks == 0 ||
558 zhdr->last_chunks == 0) {
559
560 freechunks = num_free_chunks(zhdr);
561 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
562 } else {
563
564 list_add(&zhdr->buddy, &pool->buddied);
565 }
566
567
568 list_add(&zhdr->lru, &pool->lru);
569 }
570 spin_unlock(&pool->lock);
571 return -EAGAIN;
572}
573
574
575
576
577
578
579
580
581
582
583
584
585
586void *zbud_map(struct zbud_pool *pool, unsigned long handle)
587{
588 return (void *)(handle);
589}
590
591
592
593
594
595
596void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
597{
598}
599
600
601
602
603
604
605
606
607u64 zbud_get_pool_size(struct zbud_pool *pool)
608{
609 return pool->pages_nr;
610}
611
612static int __init init_zbud(void)
613{
614
615 BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
616 pr_info("loaded\n");
617
618#ifdef CONFIG_ZPOOL
619 zpool_register_driver(&zbud_zpool_driver);
620#endif
621
622 return 0;
623}
624
625static void __exit exit_zbud(void)
626{
627#ifdef CONFIG_ZPOOL
628 zpool_unregister_driver(&zbud_zpool_driver);
629#endif
630
631 pr_info("unloaded\n");
632}
633
634module_init(init_zbud);
635module_exit(exit_zbud);
636
637MODULE_LICENSE("GPL");
638MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
639MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
640