1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/magic.h>
37#include <linux/bitops.h>
38#include <linux/errno.h>
39#include <linux/highmem.h>
40#include <linux/string.h>
41#include <linux/slab.h>
42#include <asm/tlbflush.h>
43#include <asm/pgtable.h>
44#include <linux/cpumask.h>
45#include <linux/cpu.h>
46#include <linux/vmalloc.h>
47#include <linux/preempt.h>
48#include <linux/spinlock.h>
49#include <linux/types.h>
50#include <linux/debugfs.h>
51#include <linux/zsmalloc.h>
52#include <linux/zpool.h>
53#include <linux/mount.h>
54#include <linux/migrate.h>
55#include <linux/pagemap.h>
56
57#define ZSPAGE_MAGIC 0x58
58
59
60
61
62
63
64
65#define ZS_ALIGN 8
66
67
68
69
70
71#define ZS_MAX_ZSPAGE_ORDER 2
72#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
73
74#define ZS_HANDLE_SIZE (sizeof(unsigned long))
75
76
77
78
79
80
81
82
83
84
85#ifndef MAX_PHYSMEM_BITS
86#ifdef CONFIG_HIGHMEM64G
87#define MAX_PHYSMEM_BITS 36
88#else
89
90
91
92
93#define MAX_PHYSMEM_BITS BITS_PER_LONG
94#endif
95#endif
96#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
97
98
99
100
101
102
103
104
105#define HANDLE_PIN_BIT 0
106
107
108
109
110
111
112
113
114#define OBJ_ALLOCATED_TAG 1
115#define OBJ_TAG_BITS 1
116#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
117#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
118
119#define FULLNESS_BITS 2
120#define CLASS_BITS 8
121#define ISOLATED_BITS 3
122#define MAGIC_VAL_BITS 8
123
124#define MAX(a, b) ((a) >= (b) ? (a) : (b))
125
126#define ZS_MIN_ALLOC_SIZE \
127 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
128
129#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
145#define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
146 ZS_SIZE_CLASS_DELTA) + 1)
147
148enum fullness_group {
149 ZS_EMPTY,
150 ZS_ALMOST_EMPTY,
151 ZS_ALMOST_FULL,
152 ZS_FULL,
153 NR_ZS_FULLNESS,
154};
155
156enum zs_stat_type {
157 CLASS_EMPTY,
158 CLASS_ALMOST_EMPTY,
159 CLASS_ALMOST_FULL,
160 CLASS_FULL,
161 OBJ_ALLOCATED,
162 OBJ_USED,
163 NR_ZS_STAT_TYPE,
164};
165
166struct zs_size_stat {
167 unsigned long objs[NR_ZS_STAT_TYPE];
168};
169
170#ifdef CONFIG_ZSMALLOC_STAT
171static struct dentry *zs_stat_root;
172#endif
173
174#ifdef CONFIG_COMPACTION
175static struct vfsmount *zsmalloc_mnt;
176#endif
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192static const int fullness_threshold_frac = 4;
193
194struct size_class {
195 spinlock_t lock;
196 struct list_head fullness_list[NR_ZS_FULLNESS];
197
198
199
200
201 int size;
202 int objs_per_zspage;
203
204 int pages_per_zspage;
205
206 unsigned int index;
207 struct zs_size_stat stats;
208};
209
210
211static void SetPageHugeObject(struct page *page)
212{
213 SetPageOwnerPriv1(page);
214}
215
216static void ClearPageHugeObject(struct page *page)
217{
218 ClearPageOwnerPriv1(page);
219}
220
221static int PageHugeObject(struct page *page)
222{
223 return PageOwnerPriv1(page);
224}
225
226
227
228
229
230
231
232struct link_free {
233 union {
234
235
236
237
238 unsigned long next;
239
240
241
242 unsigned long handle;
243 };
244};
245
246struct zs_pool {
247 const char *name;
248
249 struct size_class *size_class[ZS_SIZE_CLASSES];
250 struct kmem_cache *handle_cachep;
251 struct kmem_cache *zspage_cachep;
252
253 atomic_long_t pages_allocated;
254
255 struct zs_pool_stats stats;
256
257
258 struct shrinker shrinker;
259
260
261
262
263 bool shrinker_enabled;
264#ifdef CONFIG_ZSMALLOC_STAT
265 struct dentry *stat_dentry;
266#endif
267#ifdef CONFIG_COMPACTION
268 struct inode *inode;
269 struct work_struct free_work;
270#endif
271};
272
273struct zspage {
274 struct {
275 unsigned int fullness:FULLNESS_BITS;
276 unsigned int class:CLASS_BITS + 1;
277 unsigned int isolated:ISOLATED_BITS;
278 unsigned int magic:MAGIC_VAL_BITS;
279 };
280 unsigned int inuse;
281 unsigned int freeobj;
282 struct page *first_page;
283 struct list_head list;
284#ifdef CONFIG_COMPACTION
285 rwlock_t lock;
286#endif
287};
288
289struct mapping_area {
290#ifdef CONFIG_PGTABLE_MAPPING
291 struct vm_struct *vm;
292#else
293 char *vm_buf;
294#endif
295 char *vm_addr;
296 enum zs_mapmode vm_mm;
297};
298
299#ifdef CONFIG_COMPACTION
300static int zs_register_migration(struct zs_pool *pool);
301static void zs_unregister_migration(struct zs_pool *pool);
302static void migrate_lock_init(struct zspage *zspage);
303static void migrate_read_lock(struct zspage *zspage);
304static void migrate_read_unlock(struct zspage *zspage);
305static void kick_deferred_free(struct zs_pool *pool);
306static void init_deferred_free(struct zs_pool *pool);
307static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
308#else
309static int zsmalloc_mount(void) { return 0; }
310static void zsmalloc_unmount(void) {}
311static int zs_register_migration(struct zs_pool *pool) { return 0; }
312static void zs_unregister_migration(struct zs_pool *pool) {}
313static void migrate_lock_init(struct zspage *zspage) {}
314static void migrate_read_lock(struct zspage *zspage) {}
315static void migrate_read_unlock(struct zspage *zspage) {}
316static void kick_deferred_free(struct zs_pool *pool) {}
317static void init_deferred_free(struct zs_pool *pool) {}
318static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
319#endif
320
321static int create_cache(struct zs_pool *pool)
322{
323 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
324 0, 0, NULL);
325 if (!pool->handle_cachep)
326 return 1;
327
328 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
329 0, 0, NULL);
330 if (!pool->zspage_cachep) {
331 kmem_cache_destroy(pool->handle_cachep);
332 pool->handle_cachep = NULL;
333 return 1;
334 }
335
336 return 0;
337}
338
339static void destroy_cache(struct zs_pool *pool)
340{
341 kmem_cache_destroy(pool->handle_cachep);
342 kmem_cache_destroy(pool->zspage_cachep);
343}
344
345static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
346{
347 return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
348 gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
349}
350
351static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
352{
353 kmem_cache_free(pool->handle_cachep, (void *)handle);
354}
355
356static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
357{
358 return kmem_cache_alloc(pool->zspage_cachep,
359 flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
360}
361
362static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
363{
364 kmem_cache_free(pool->zspage_cachep, zspage);
365}
366
367static void record_obj(unsigned long handle, unsigned long obj)
368{
369
370
371
372
373
374 WRITE_ONCE(*(unsigned long *)handle, obj);
375}
376
377
378
379#ifdef CONFIG_ZPOOL
380
381static void *zs_zpool_create(const char *name, gfp_t gfp,
382 const struct zpool_ops *zpool_ops,
383 struct zpool *zpool)
384{
385
386
387
388
389
390 return zs_create_pool(name);
391}
392
393static void zs_zpool_destroy(void *pool)
394{
395 zs_destroy_pool(pool);
396}
397
398static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
399 unsigned long *handle)
400{
401 *handle = zs_malloc(pool, size, gfp);
402 return *handle ? 0 : -1;
403}
404static void zs_zpool_free(void *pool, unsigned long handle)
405{
406 zs_free(pool, handle);
407}
408
409static int zs_zpool_shrink(void *pool, unsigned int pages,
410 unsigned int *reclaimed)
411{
412 return -EINVAL;
413}
414
415static void *zs_zpool_map(void *pool, unsigned long handle,
416 enum zpool_mapmode mm)
417{
418 enum zs_mapmode zs_mm;
419
420 switch (mm) {
421 case ZPOOL_MM_RO:
422 zs_mm = ZS_MM_RO;
423 break;
424 case ZPOOL_MM_WO:
425 zs_mm = ZS_MM_WO;
426 break;
427 case ZPOOL_MM_RW:
428 default:
429 zs_mm = ZS_MM_RW;
430 break;
431 }
432
433 return zs_map_object(pool, handle, zs_mm);
434}
435static void zs_zpool_unmap(void *pool, unsigned long handle)
436{
437 zs_unmap_object(pool, handle);
438}
439
440static u64 zs_zpool_total_size(void *pool)
441{
442 return zs_get_total_pages(pool) << PAGE_SHIFT;
443}
444
445static struct zpool_driver zs_zpool_driver = {
446 .type = "zsmalloc",
447 .owner = THIS_MODULE,
448 .create = zs_zpool_create,
449 .destroy = zs_zpool_destroy,
450 .malloc = zs_zpool_malloc,
451 .free = zs_zpool_free,
452 .shrink = zs_zpool_shrink,
453 .map = zs_zpool_map,
454 .unmap = zs_zpool_unmap,
455 .total_size = zs_zpool_total_size,
456};
457
458MODULE_ALIAS("zpool-zsmalloc");
459#endif
460
461
462static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
463
464static bool is_zspage_isolated(struct zspage *zspage)
465{
466 return zspage->isolated;
467}
468
469static __maybe_unused int is_first_page(struct page *page)
470{
471 return PagePrivate(page);
472}
473
474
475static inline int get_zspage_inuse(struct zspage *zspage)
476{
477 return zspage->inuse;
478}
479
480static inline void set_zspage_inuse(struct zspage *zspage, int val)
481{
482 zspage->inuse = val;
483}
484
485static inline void mod_zspage_inuse(struct zspage *zspage, int val)
486{
487 zspage->inuse += val;
488}
489
490static inline struct page *get_first_page(struct zspage *zspage)
491{
492 struct page *first_page = zspage->first_page;
493
494 VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
495 return first_page;
496}
497
498static inline int get_first_obj_offset(struct page *page)
499{
500 return page->units;
501}
502
503static inline void set_first_obj_offset(struct page *page, int offset)
504{
505 page->units = offset;
506}
507
508static inline unsigned int get_freeobj(struct zspage *zspage)
509{
510 return zspage->freeobj;
511}
512
513static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
514{
515 zspage->freeobj = obj;
516}
517
518static void get_zspage_mapping(struct zspage *zspage,
519 unsigned int *class_idx,
520 enum fullness_group *fullness)
521{
522 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
523
524 *fullness = zspage->fullness;
525 *class_idx = zspage->class;
526}
527
528static void set_zspage_mapping(struct zspage *zspage,
529 unsigned int class_idx,
530 enum fullness_group fullness)
531{
532 zspage->class = class_idx;
533 zspage->fullness = fullness;
534}
535
536
537
538
539
540
541
542
543static int get_size_class_index(int size)
544{
545 int idx = 0;
546
547 if (likely(size > ZS_MIN_ALLOC_SIZE))
548 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
549 ZS_SIZE_CLASS_DELTA);
550
551 return min_t(int, ZS_SIZE_CLASSES - 1, idx);
552}
553
554static inline void zs_stat_inc(struct size_class *class,
555 enum zs_stat_type type, unsigned long cnt)
556{
557 class->stats.objs[type] += cnt;
558}
559
560static inline void zs_stat_dec(struct size_class *class,
561 enum zs_stat_type type, unsigned long cnt)
562{
563 class->stats.objs[type] -= cnt;
564}
565
566static inline unsigned long zs_stat_get(struct size_class *class,
567 enum zs_stat_type type)
568{
569 return class->stats.objs[type];
570}
571
572#ifdef CONFIG_ZSMALLOC_STAT
573
574static void __init zs_stat_init(void)
575{
576 if (!debugfs_initialized()) {
577 pr_warn("debugfs not available, stat dir not created\n");
578 return;
579 }
580
581 zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
582 if (!zs_stat_root)
583 pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
584}
585
586static void __exit zs_stat_exit(void)
587{
588 debugfs_remove_recursive(zs_stat_root);
589}
590
591static unsigned long zs_can_compact(struct size_class *class);
592
593static int zs_stats_size_show(struct seq_file *s, void *v)
594{
595 int i;
596 struct zs_pool *pool = s->private;
597 struct size_class *class;
598 int objs_per_zspage;
599 unsigned long class_almost_full, class_almost_empty;
600 unsigned long obj_allocated, obj_used, pages_used, freeable;
601 unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
602 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
603 unsigned long total_freeable = 0;
604
605 seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
606 "class", "size", "almost_full", "almost_empty",
607 "obj_allocated", "obj_used", "pages_used",
608 "pages_per_zspage", "freeable");
609
610 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
611 class = pool->size_class[i];
612
613 if (class->index != i)
614 continue;
615
616 spin_lock(&class->lock);
617 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
618 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
619 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
620 obj_used = zs_stat_get(class, OBJ_USED);
621 freeable = zs_can_compact(class);
622 spin_unlock(&class->lock);
623
624 objs_per_zspage = class->objs_per_zspage;
625 pages_used = obj_allocated / objs_per_zspage *
626 class->pages_per_zspage;
627
628 seq_printf(s, " %5u %5u %11lu %12lu %13lu"
629 " %10lu %10lu %16d %8lu\n",
630 i, class->size, class_almost_full, class_almost_empty,
631 obj_allocated, obj_used, pages_used,
632 class->pages_per_zspage, freeable);
633
634 total_class_almost_full += class_almost_full;
635 total_class_almost_empty += class_almost_empty;
636 total_objs += obj_allocated;
637 total_used_objs += obj_used;
638 total_pages += pages_used;
639 total_freeable += freeable;
640 }
641
642 seq_puts(s, "\n");
643 seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
644 "Total", "", total_class_almost_full,
645 total_class_almost_empty, total_objs,
646 total_used_objs, total_pages, "", total_freeable);
647
648 return 0;
649}
650
651static int zs_stats_size_open(struct inode *inode, struct file *file)
652{
653 return single_open(file, zs_stats_size_show, inode->i_private);
654}
655
656static const struct file_operations zs_stat_size_ops = {
657 .open = zs_stats_size_open,
658 .read = seq_read,
659 .llseek = seq_lseek,
660 .release = single_release,
661};
662
663static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
664{
665 struct dentry *entry;
666
667 if (!zs_stat_root) {
668 pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
669 return;
670 }
671
672 entry = debugfs_create_dir(name, zs_stat_root);
673 if (!entry) {
674 pr_warn("debugfs dir <%s> creation failed\n", name);
675 return;
676 }
677 pool->stat_dentry = entry;
678
679 entry = debugfs_create_file("classes", S_IFREG | S_IRUGO,
680 pool->stat_dentry, pool, &zs_stat_size_ops);
681 if (!entry) {
682 pr_warn("%s: debugfs file entry <%s> creation failed\n",
683 name, "classes");
684 debugfs_remove_recursive(pool->stat_dentry);
685 pool->stat_dentry = NULL;
686 }
687}
688
689static void zs_pool_stat_destroy(struct zs_pool *pool)
690{
691 debugfs_remove_recursive(pool->stat_dentry);
692}
693
694#else
695static void __init zs_stat_init(void)
696{
697}
698
699static void __exit zs_stat_exit(void)
700{
701}
702
703static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
704{
705}
706
707static inline void zs_pool_stat_destroy(struct zs_pool *pool)
708{
709}
710#endif
711
712
713
714
715
716
717
718
719
720static enum fullness_group get_fullness_group(struct size_class *class,
721 struct zspage *zspage)
722{
723 int inuse, objs_per_zspage;
724 enum fullness_group fg;
725
726 inuse = get_zspage_inuse(zspage);
727 objs_per_zspage = class->objs_per_zspage;
728
729 if (inuse == 0)
730 fg = ZS_EMPTY;
731 else if (inuse == objs_per_zspage)
732 fg = ZS_FULL;
733 else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
734 fg = ZS_ALMOST_EMPTY;
735 else
736 fg = ZS_ALMOST_FULL;
737
738 return fg;
739}
740
741
742
743
744
745
746
747static void insert_zspage(struct size_class *class,
748 struct zspage *zspage,
749 enum fullness_group fullness)
750{
751 struct zspage *head;
752
753 zs_stat_inc(class, fullness, 1);
754 head = list_first_entry_or_null(&class->fullness_list[fullness],
755 struct zspage, list);
756
757
758
759
760 if (head) {
761 if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) {
762 list_add(&zspage->list, &head->list);
763 return;
764 }
765 }
766 list_add(&zspage->list, &class->fullness_list[fullness]);
767}
768
769
770
771
772
773static void remove_zspage(struct size_class *class,
774 struct zspage *zspage,
775 enum fullness_group fullness)
776{
777 VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
778 VM_BUG_ON(is_zspage_isolated(zspage));
779
780 list_del_init(&zspage->list);
781 zs_stat_dec(class, fullness, 1);
782}
783
784
785
786
787
788
789
790
791
792
793static enum fullness_group fix_fullness_group(struct size_class *class,
794 struct zspage *zspage)
795{
796 int class_idx;
797 enum fullness_group currfg, newfg;
798
799 get_zspage_mapping(zspage, &class_idx, &currfg);
800 newfg = get_fullness_group(class, zspage);
801 if (newfg == currfg)
802 goto out;
803
804 if (!is_zspage_isolated(zspage)) {
805 remove_zspage(class, zspage, currfg);
806 insert_zspage(class, zspage, newfg);
807 }
808
809 set_zspage_mapping(zspage, class_idx, newfg);
810
811out:
812 return newfg;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828static int get_pages_per_zspage(int class_size)
829{
830 int i, max_usedpc = 0;
831
832 int max_usedpc_order = 1;
833
834 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
835 int zspage_size;
836 int waste, usedpc;
837
838 zspage_size = i * PAGE_SIZE;
839 waste = zspage_size % class_size;
840 usedpc = (zspage_size - waste) * 100 / zspage_size;
841
842 if (usedpc > max_usedpc) {
843 max_usedpc = usedpc;
844 max_usedpc_order = i;
845 }
846 }
847
848 return max_usedpc_order;
849}
850
851static struct zspage *get_zspage(struct page *page)
852{
853 struct zspage *zspage = (struct zspage *)page->private;
854
855 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
856 return zspage;
857}
858
859static struct page *get_next_page(struct page *page)
860{
861 if (unlikely(PageHugeObject(page)))
862 return NULL;
863
864 return page->freelist;
865}
866
867
868
869
870
871
872static void obj_to_location(unsigned long obj, struct page **page,
873 unsigned int *obj_idx)
874{
875 obj >>= OBJ_TAG_BITS;
876 *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
877 *obj_idx = (obj & OBJ_INDEX_MASK);
878}
879
880
881
882
883
884
885static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
886{
887 unsigned long obj;
888
889 obj = page_to_pfn(page) << OBJ_INDEX_BITS;
890 obj |= obj_idx & OBJ_INDEX_MASK;
891 obj <<= OBJ_TAG_BITS;
892
893 return obj;
894}
895
896static unsigned long handle_to_obj(unsigned long handle)
897{
898 return *(unsigned long *)handle;
899}
900
901static unsigned long obj_to_head(struct page *page, void *obj)
902{
903 if (unlikely(PageHugeObject(page))) {
904 VM_BUG_ON_PAGE(!is_first_page(page), page);
905 return page->index;
906 } else
907 return *(unsigned long *)obj;
908}
909
910static inline int testpin_tag(unsigned long handle)
911{
912 return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
913}
914
915static inline int trypin_tag(unsigned long handle)
916{
917 return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
918}
919
920static void pin_tag(unsigned long handle)
921{
922 bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
923}
924
925static void unpin_tag(unsigned long handle)
926{
927 bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
928}
929
930static void reset_page(struct page *page)
931{
932 __ClearPageMovable(page);
933 ClearPagePrivate(page);
934 set_page_private(page, 0);
935 page_mapcount_reset(page);
936 ClearPageHugeObject(page);
937 page->freelist = NULL;
938}
939
940
941
942
943
944void lock_zspage(struct zspage *zspage)
945{
946 struct page *page = get_first_page(zspage);
947
948 do {
949 lock_page(page);
950 } while ((page = get_next_page(page)) != NULL);
951}
952
953int trylock_zspage(struct zspage *zspage)
954{
955 struct page *cursor, *fail;
956
957 for (cursor = get_first_page(zspage); cursor != NULL; cursor =
958 get_next_page(cursor)) {
959 if (!trylock_page(cursor)) {
960 fail = cursor;
961 goto unlock;
962 }
963 }
964
965 return 1;
966unlock:
967 for (cursor = get_first_page(zspage); cursor != fail; cursor =
968 get_next_page(cursor))
969 unlock_page(cursor);
970
971 return 0;
972}
973
974static void __free_zspage(struct zs_pool *pool, struct size_class *class,
975 struct zspage *zspage)
976{
977 struct page *page, *next;
978 enum fullness_group fg;
979 unsigned int class_idx;
980
981 get_zspage_mapping(zspage, &class_idx, &fg);
982
983 assert_spin_locked(&class->lock);
984
985 VM_BUG_ON(get_zspage_inuse(zspage));
986 VM_BUG_ON(fg != ZS_EMPTY);
987
988 next = page = get_first_page(zspage);
989 do {
990 VM_BUG_ON_PAGE(!PageLocked(page), page);
991 next = get_next_page(page);
992 reset_page(page);
993 unlock_page(page);
994 dec_zone_page_state(page, NR_ZSPAGES);
995 put_page(page);
996 page = next;
997 } while (page != NULL);
998
999 cache_free_zspage(pool, zspage);
1000
1001 zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
1002 atomic_long_sub(class->pages_per_zspage,
1003 &pool->pages_allocated);
1004}
1005
1006static void free_zspage(struct zs_pool *pool, struct size_class *class,
1007 struct zspage *zspage)
1008{
1009 VM_BUG_ON(get_zspage_inuse(zspage));
1010 VM_BUG_ON(list_empty(&zspage->list));
1011
1012 if (!trylock_zspage(zspage)) {
1013 kick_deferred_free(pool);
1014 return;
1015 }
1016
1017 remove_zspage(class, zspage, ZS_EMPTY);
1018 __free_zspage(pool, class, zspage);
1019}
1020
1021
1022static void init_zspage(struct size_class *class, struct zspage *zspage)
1023{
1024 unsigned int freeobj = 1;
1025 unsigned long off = 0;
1026 struct page *page = get_first_page(zspage);
1027
1028 while (page) {
1029 struct page *next_page;
1030 struct link_free *link;
1031 void *vaddr;
1032
1033 set_first_obj_offset(page, off);
1034
1035 vaddr = kmap_atomic(page);
1036 link = (struct link_free *)vaddr + off / sizeof(*link);
1037
1038 while ((off += class->size) < PAGE_SIZE) {
1039 link->next = freeobj++ << OBJ_TAG_BITS;
1040 link += class->size / sizeof(*link);
1041 }
1042
1043
1044
1045
1046
1047
1048 next_page = get_next_page(page);
1049 if (next_page) {
1050 link->next = freeobj++ << OBJ_TAG_BITS;
1051 } else {
1052
1053
1054
1055
1056 link->next = -1 << OBJ_TAG_BITS;
1057 }
1058 kunmap_atomic(vaddr);
1059 page = next_page;
1060 off %= PAGE_SIZE;
1061 }
1062
1063 set_freeobj(zspage, 0);
1064}
1065
1066static void create_page_chain(struct size_class *class, struct zspage *zspage,
1067 struct page *pages[])
1068{
1069 int i;
1070 struct page *page;
1071 struct page *prev_page = NULL;
1072 int nr_pages = class->pages_per_zspage;
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 for (i = 0; i < nr_pages; i++) {
1083 page = pages[i];
1084 set_page_private(page, (unsigned long)zspage);
1085 page->freelist = NULL;
1086 if (i == 0) {
1087 zspage->first_page = page;
1088 SetPagePrivate(page);
1089 if (unlikely(class->objs_per_zspage == 1 &&
1090 class->pages_per_zspage == 1))
1091 SetPageHugeObject(page);
1092 } else {
1093 prev_page->freelist = page;
1094 }
1095 prev_page = page;
1096 }
1097}
1098
1099
1100
1101
1102static struct zspage *alloc_zspage(struct zs_pool *pool,
1103 struct size_class *class,
1104 gfp_t gfp)
1105{
1106 int i;
1107 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
1108 struct zspage *zspage = cache_alloc_zspage(pool, gfp);
1109
1110 if (!zspage)
1111 return NULL;
1112
1113 memset(zspage, 0, sizeof(struct zspage));
1114 zspage->magic = ZSPAGE_MAGIC;
1115 migrate_lock_init(zspage);
1116
1117 for (i = 0; i < class->pages_per_zspage; i++) {
1118 struct page *page;
1119
1120 page = alloc_page(gfp);
1121 if (!page) {
1122 while (--i >= 0) {
1123 dec_zone_page_state(pages[i], NR_ZSPAGES);
1124 __free_page(pages[i]);
1125 }
1126 cache_free_zspage(pool, zspage);
1127 return NULL;
1128 }
1129
1130 inc_zone_page_state(page, NR_ZSPAGES);
1131 pages[i] = page;
1132 }
1133
1134 create_page_chain(class, zspage, pages);
1135 init_zspage(class, zspage);
1136
1137 return zspage;
1138}
1139
1140static struct zspage *find_get_zspage(struct size_class *class)
1141{
1142 int i;
1143 struct zspage *zspage;
1144
1145 for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
1146 zspage = list_first_entry_or_null(&class->fullness_list[i],
1147 struct zspage, list);
1148 if (zspage)
1149 break;
1150 }
1151
1152 return zspage;
1153}
1154
1155#ifdef CONFIG_PGTABLE_MAPPING
1156static inline int __zs_cpu_up(struct mapping_area *area)
1157{
1158
1159
1160
1161
1162 if (area->vm)
1163 return 0;
1164 area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
1165 if (!area->vm)
1166 return -ENOMEM;
1167 return 0;
1168}
1169
1170static inline void __zs_cpu_down(struct mapping_area *area)
1171{
1172 if (area->vm)
1173 free_vm_area(area->vm);
1174 area->vm = NULL;
1175}
1176
1177static inline void *__zs_map_object(struct mapping_area *area,
1178 struct page *pages[2], int off, int size)
1179{
1180 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
1181 area->vm_addr = area->vm->addr;
1182 return area->vm_addr + off;
1183}
1184
1185static inline void __zs_unmap_object(struct mapping_area *area,
1186 struct page *pages[2], int off, int size)
1187{
1188 unsigned long addr = (unsigned long)area->vm_addr;
1189
1190 unmap_kernel_range(addr, PAGE_SIZE * 2);
1191}
1192
1193#else
1194
1195static inline int __zs_cpu_up(struct mapping_area *area)
1196{
1197
1198
1199
1200
1201 if (area->vm_buf)
1202 return 0;
1203 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
1204 if (!area->vm_buf)
1205 return -ENOMEM;
1206 return 0;
1207}
1208
1209static inline void __zs_cpu_down(struct mapping_area *area)
1210{
1211 kfree(area->vm_buf);
1212 area->vm_buf = NULL;
1213}
1214
1215static void *__zs_map_object(struct mapping_area *area,
1216 struct page *pages[2], int off, int size)
1217{
1218 int sizes[2];
1219 void *addr;
1220 char *buf = area->vm_buf;
1221
1222
1223 pagefault_disable();
1224
1225
1226 if (area->vm_mm == ZS_MM_WO)
1227 goto out;
1228
1229 sizes[0] = PAGE_SIZE - off;
1230 sizes[1] = size - sizes[0];
1231
1232
1233 addr = kmap_atomic(pages[0]);
1234 memcpy(buf, addr + off, sizes[0]);
1235 kunmap_atomic(addr);
1236 addr = kmap_atomic(pages[1]);
1237 memcpy(buf + sizes[0], addr, sizes[1]);
1238 kunmap_atomic(addr);
1239out:
1240 return area->vm_buf;
1241}
1242
1243static void __zs_unmap_object(struct mapping_area *area,
1244 struct page *pages[2], int off, int size)
1245{
1246 int sizes[2];
1247 void *addr;
1248 char *buf;
1249
1250
1251 if (area->vm_mm == ZS_MM_RO)
1252 goto out;
1253
1254 buf = area->vm_buf;
1255 buf = buf + ZS_HANDLE_SIZE;
1256 size -= ZS_HANDLE_SIZE;
1257 off += ZS_HANDLE_SIZE;
1258
1259 sizes[0] = PAGE_SIZE - off;
1260 sizes[1] = size - sizes[0];
1261
1262
1263 addr = kmap_atomic(pages[0]);
1264 memcpy(addr + off, buf, sizes[0]);
1265 kunmap_atomic(addr);
1266 addr = kmap_atomic(pages[1]);
1267 memcpy(addr, buf + sizes[0], sizes[1]);
1268 kunmap_atomic(addr);
1269
1270out:
1271
1272 pagefault_enable();
1273}
1274
1275#endif
1276
1277static int zs_cpu_prepare(unsigned int cpu)
1278{
1279 struct mapping_area *area;
1280
1281 area = &per_cpu(zs_map_area, cpu);
1282 return __zs_cpu_up(area);
1283}
1284
1285static int zs_cpu_dead(unsigned int cpu)
1286{
1287 struct mapping_area *area;
1288
1289 area = &per_cpu(zs_map_area, cpu);
1290 __zs_cpu_down(area);
1291 return 0;
1292}
1293
1294static bool can_merge(struct size_class *prev, int pages_per_zspage,
1295 int objs_per_zspage)
1296{
1297 if (prev->pages_per_zspage == pages_per_zspage &&
1298 prev->objs_per_zspage == objs_per_zspage)
1299 return true;
1300
1301 return false;
1302}
1303
1304static bool zspage_full(struct size_class *class, struct zspage *zspage)
1305{
1306 return get_zspage_inuse(zspage) == class->objs_per_zspage;
1307}
1308
1309unsigned long zs_get_total_pages(struct zs_pool *pool)
1310{
1311 return atomic_long_read(&pool->pages_allocated);
1312}
1313EXPORT_SYMBOL_GPL(zs_get_total_pages);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1330 enum zs_mapmode mm)
1331{
1332 struct zspage *zspage;
1333 struct page *page;
1334 unsigned long obj, off;
1335 unsigned int obj_idx;
1336
1337 unsigned int class_idx;
1338 enum fullness_group fg;
1339 struct size_class *class;
1340 struct mapping_area *area;
1341 struct page *pages[2];
1342 void *ret;
1343
1344
1345
1346
1347
1348
1349 WARN_ON_ONCE(in_interrupt());
1350
1351
1352 pin_tag(handle);
1353
1354 obj = handle_to_obj(handle);
1355 obj_to_location(obj, &page, &obj_idx);
1356 zspage = get_zspage(page);
1357
1358
1359 migrate_read_lock(zspage);
1360
1361 get_zspage_mapping(zspage, &class_idx, &fg);
1362 class = pool->size_class[class_idx];
1363 off = (class->size * obj_idx) & ~PAGE_MASK;
1364
1365 area = &get_cpu_var(zs_map_area);
1366 area->vm_mm = mm;
1367 if (off + class->size <= PAGE_SIZE) {
1368
1369 area->vm_addr = kmap_atomic(page);
1370 ret = area->vm_addr + off;
1371 goto out;
1372 }
1373
1374
1375 pages[0] = page;
1376 pages[1] = get_next_page(page);
1377 BUG_ON(!pages[1]);
1378
1379 ret = __zs_map_object(area, pages, off, class->size);
1380out:
1381 if (likely(!PageHugeObject(page)))
1382 ret += ZS_HANDLE_SIZE;
1383
1384 return ret;
1385}
1386EXPORT_SYMBOL_GPL(zs_map_object);
1387
1388void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1389{
1390 struct zspage *zspage;
1391 struct page *page;
1392 unsigned long obj, off;
1393 unsigned int obj_idx;
1394
1395 unsigned int class_idx;
1396 enum fullness_group fg;
1397 struct size_class *class;
1398 struct mapping_area *area;
1399
1400 obj = handle_to_obj(handle);
1401 obj_to_location(obj, &page, &obj_idx);
1402 zspage = get_zspage(page);
1403 get_zspage_mapping(zspage, &class_idx, &fg);
1404 class = pool->size_class[class_idx];
1405 off = (class->size * obj_idx) & ~PAGE_MASK;
1406
1407 area = this_cpu_ptr(&zs_map_area);
1408 if (off + class->size <= PAGE_SIZE)
1409 kunmap_atomic(area->vm_addr);
1410 else {
1411 struct page *pages[2];
1412
1413 pages[0] = page;
1414 pages[1] = get_next_page(page);
1415 BUG_ON(!pages[1]);
1416
1417 __zs_unmap_object(area, pages, off, class->size);
1418 }
1419 put_cpu_var(zs_map_area);
1420
1421 migrate_read_unlock(zspage);
1422 unpin_tag(handle);
1423}
1424EXPORT_SYMBOL_GPL(zs_unmap_object);
1425
1426static unsigned long obj_malloc(struct size_class *class,
1427 struct zspage *zspage, unsigned long handle)
1428{
1429 int i, nr_page, offset;
1430 unsigned long obj;
1431 struct link_free *link;
1432
1433 struct page *m_page;
1434 unsigned long m_offset;
1435 void *vaddr;
1436
1437 handle |= OBJ_ALLOCATED_TAG;
1438 obj = get_freeobj(zspage);
1439
1440 offset = obj * class->size;
1441 nr_page = offset >> PAGE_SHIFT;
1442 m_offset = offset & ~PAGE_MASK;
1443 m_page = get_first_page(zspage);
1444
1445 for (i = 0; i < nr_page; i++)
1446 m_page = get_next_page(m_page);
1447
1448 vaddr = kmap_atomic(m_page);
1449 link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1450 set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1451 if (likely(!PageHugeObject(m_page)))
1452
1453 link->handle = handle;
1454 else
1455
1456 zspage->first_page->index = handle;
1457
1458 kunmap_atomic(vaddr);
1459 mod_zspage_inuse(zspage, 1);
1460 zs_stat_inc(class, OBJ_USED, 1);
1461
1462 obj = location_to_obj(m_page, obj);
1463
1464 return obj;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1479{
1480 unsigned long handle, obj;
1481 struct size_class *class;
1482 enum fullness_group newfg;
1483 struct zspage *zspage;
1484
1485 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
1486 return 0;
1487
1488 handle = cache_alloc_handle(pool, gfp);
1489 if (!handle)
1490 return 0;
1491
1492
1493 size += ZS_HANDLE_SIZE;
1494 class = pool->size_class[get_size_class_index(size)];
1495
1496 spin_lock(&class->lock);
1497 zspage = find_get_zspage(class);
1498 if (likely(zspage)) {
1499 obj = obj_malloc(class, zspage, handle);
1500
1501 fix_fullness_group(class, zspage);
1502 record_obj(handle, obj);
1503 spin_unlock(&class->lock);
1504
1505 return handle;
1506 }
1507
1508 spin_unlock(&class->lock);
1509
1510 zspage = alloc_zspage(pool, class, gfp);
1511 if (!zspage) {
1512 cache_free_handle(pool, handle);
1513 return 0;
1514 }
1515
1516 spin_lock(&class->lock);
1517 obj = obj_malloc(class, zspage, handle);
1518 newfg = get_fullness_group(class, zspage);
1519 insert_zspage(class, zspage, newfg);
1520 set_zspage_mapping(zspage, class->index, newfg);
1521 record_obj(handle, obj);
1522 atomic_long_add(class->pages_per_zspage,
1523 &pool->pages_allocated);
1524 zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
1525
1526
1527 SetZsPageMovable(pool, zspage);
1528 spin_unlock(&class->lock);
1529
1530 return handle;
1531}
1532EXPORT_SYMBOL_GPL(zs_malloc);
1533
1534static void obj_free(struct size_class *class, unsigned long obj)
1535{
1536 struct link_free *link;
1537 struct zspage *zspage;
1538 struct page *f_page;
1539 unsigned long f_offset;
1540 unsigned int f_objidx;
1541 void *vaddr;
1542
1543 obj &= ~OBJ_ALLOCATED_TAG;
1544 obj_to_location(obj, &f_page, &f_objidx);
1545 f_offset = (class->size * f_objidx) & ~PAGE_MASK;
1546 zspage = get_zspage(f_page);
1547
1548 vaddr = kmap_atomic(f_page);
1549
1550
1551 link = (struct link_free *)(vaddr + f_offset);
1552 link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1553 kunmap_atomic(vaddr);
1554 set_freeobj(zspage, f_objidx);
1555 mod_zspage_inuse(zspage, -1);
1556 zs_stat_dec(class, OBJ_USED, 1);
1557}
1558
1559void zs_free(struct zs_pool *pool, unsigned long handle)
1560{
1561 struct zspage *zspage;
1562 struct page *f_page;
1563 unsigned long obj;
1564 unsigned int f_objidx;
1565 int class_idx;
1566 struct size_class *class;
1567 enum fullness_group fullness;
1568 bool isolated;
1569
1570 if (unlikely(!handle))
1571 return;
1572
1573 pin_tag(handle);
1574 obj = handle_to_obj(handle);
1575 obj_to_location(obj, &f_page, &f_objidx);
1576 zspage = get_zspage(f_page);
1577
1578 migrate_read_lock(zspage);
1579
1580 get_zspage_mapping(zspage, &class_idx, &fullness);
1581 class = pool->size_class[class_idx];
1582
1583 spin_lock(&class->lock);
1584 obj_free(class, obj);
1585 fullness = fix_fullness_group(class, zspage);
1586 if (fullness != ZS_EMPTY) {
1587 migrate_read_unlock(zspage);
1588 goto out;
1589 }
1590
1591 isolated = is_zspage_isolated(zspage);
1592 migrate_read_unlock(zspage);
1593
1594 if (likely(!isolated))
1595 free_zspage(pool, class, zspage);
1596out:
1597
1598 spin_unlock(&class->lock);
1599 unpin_tag(handle);
1600 cache_free_handle(pool, handle);
1601}
1602EXPORT_SYMBOL_GPL(zs_free);
1603
1604static void zs_object_copy(struct size_class *class, unsigned long dst,
1605 unsigned long src)
1606{
1607 struct page *s_page, *d_page;
1608 unsigned int s_objidx, d_objidx;
1609 unsigned long s_off, d_off;
1610 void *s_addr, *d_addr;
1611 int s_size, d_size, size;
1612 int written = 0;
1613
1614 s_size = d_size = class->size;
1615
1616 obj_to_location(src, &s_page, &s_objidx);
1617 obj_to_location(dst, &d_page, &d_objidx);
1618
1619 s_off = (class->size * s_objidx) & ~PAGE_MASK;
1620 d_off = (class->size * d_objidx) & ~PAGE_MASK;
1621
1622 if (s_off + class->size > PAGE_SIZE)
1623 s_size = PAGE_SIZE - s_off;
1624
1625 if (d_off + class->size > PAGE_SIZE)
1626 d_size = PAGE_SIZE - d_off;
1627
1628 s_addr = kmap_atomic(s_page);
1629 d_addr = kmap_atomic(d_page);
1630
1631 while (1) {
1632 size = min(s_size, d_size);
1633 memcpy(d_addr + d_off, s_addr + s_off, size);
1634 written += size;
1635
1636 if (written == class->size)
1637 break;
1638
1639 s_off += size;
1640 s_size -= size;
1641 d_off += size;
1642 d_size -= size;
1643
1644 if (s_off >= PAGE_SIZE) {
1645 kunmap_atomic(d_addr);
1646 kunmap_atomic(s_addr);
1647 s_page = get_next_page(s_page);
1648 s_addr = kmap_atomic(s_page);
1649 d_addr = kmap_atomic(d_page);
1650 s_size = class->size - written;
1651 s_off = 0;
1652 }
1653
1654 if (d_off >= PAGE_SIZE) {
1655 kunmap_atomic(d_addr);
1656 d_page = get_next_page(d_page);
1657 d_addr = kmap_atomic(d_page);
1658 d_size = class->size - written;
1659 d_off = 0;
1660 }
1661 }
1662
1663 kunmap_atomic(d_addr);
1664 kunmap_atomic(s_addr);
1665}
1666
1667
1668
1669
1670
1671static unsigned long find_alloced_obj(struct size_class *class,
1672 struct page *page, int *obj_idx)
1673{
1674 unsigned long head;
1675 int offset = 0;
1676 int index = *obj_idx;
1677 unsigned long handle = 0;
1678 void *addr = kmap_atomic(page);
1679
1680 offset = get_first_obj_offset(page);
1681 offset += class->size * index;
1682
1683 while (offset < PAGE_SIZE) {
1684 head = obj_to_head(page, addr + offset);
1685 if (head & OBJ_ALLOCATED_TAG) {
1686 handle = head & ~OBJ_ALLOCATED_TAG;
1687 if (trypin_tag(handle))
1688 break;
1689 handle = 0;
1690 }
1691
1692 offset += class->size;
1693 index++;
1694 }
1695
1696 kunmap_atomic(addr);
1697
1698 *obj_idx = index;
1699
1700 return handle;
1701}
1702
1703struct zs_compact_control {
1704
1705 struct page *s_page;
1706
1707
1708 struct page *d_page;
1709
1710
1711 int obj_idx;
1712};
1713
1714static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
1715 struct zs_compact_control *cc)
1716{
1717 unsigned long used_obj, free_obj;
1718 unsigned long handle;
1719 struct page *s_page = cc->s_page;
1720 struct page *d_page = cc->d_page;
1721 int obj_idx = cc->obj_idx;
1722 int ret = 0;
1723
1724 while (1) {
1725 handle = find_alloced_obj(class, s_page, &obj_idx);
1726 if (!handle) {
1727 s_page = get_next_page(s_page);
1728 if (!s_page)
1729 break;
1730 obj_idx = 0;
1731 continue;
1732 }
1733
1734
1735 if (zspage_full(class, get_zspage(d_page))) {
1736 unpin_tag(handle);
1737 ret = -ENOMEM;
1738 break;
1739 }
1740
1741 used_obj = handle_to_obj(handle);
1742 free_obj = obj_malloc(class, get_zspage(d_page), handle);
1743 zs_object_copy(class, free_obj, used_obj);
1744 obj_idx++;
1745
1746
1747
1748
1749
1750
1751 free_obj |= BIT(HANDLE_PIN_BIT);
1752 record_obj(handle, free_obj);
1753 unpin_tag(handle);
1754 obj_free(class, used_obj);
1755 }
1756
1757
1758 cc->s_page = s_page;
1759 cc->obj_idx = obj_idx;
1760
1761 return ret;
1762}
1763
1764static struct zspage *isolate_zspage(struct size_class *class, bool source)
1765{
1766 int i;
1767 struct zspage *zspage;
1768 enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
1769
1770 if (!source) {
1771 fg[0] = ZS_ALMOST_FULL;
1772 fg[1] = ZS_ALMOST_EMPTY;
1773 }
1774
1775 for (i = 0; i < 2; i++) {
1776 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
1777 struct zspage, list);
1778 if (zspage) {
1779 VM_BUG_ON(is_zspage_isolated(zspage));
1780 remove_zspage(class, zspage, fg[i]);
1781 return zspage;
1782 }
1783 }
1784
1785 return zspage;
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795static enum fullness_group putback_zspage(struct size_class *class,
1796 struct zspage *zspage)
1797{
1798 enum fullness_group fullness;
1799
1800 VM_BUG_ON(is_zspage_isolated(zspage));
1801
1802 fullness = get_fullness_group(class, zspage);
1803 insert_zspage(class, zspage, fullness);
1804 set_zspage_mapping(zspage, class->index, fullness);
1805
1806 return fullness;
1807}
1808
1809#ifdef CONFIG_COMPACTION
1810static struct dentry *zs_mount(struct file_system_type *fs_type,
1811 int flags, const char *dev_name, void *data)
1812{
1813 static const struct dentry_operations ops = {
1814 .d_dname = simple_dname,
1815 };
1816
1817 return mount_pseudo(fs_type, "zsmalloc:", NULL, &ops, ZSMALLOC_MAGIC);
1818}
1819
1820static struct file_system_type zsmalloc_fs = {
1821 .name = "zsmalloc",
1822 .mount = zs_mount,
1823 .kill_sb = kill_anon_super,
1824};
1825
1826static int zsmalloc_mount(void)
1827{
1828 int ret = 0;
1829
1830 zsmalloc_mnt = kern_mount(&zsmalloc_fs);
1831 if (IS_ERR(zsmalloc_mnt))
1832 ret = PTR_ERR(zsmalloc_mnt);
1833
1834 return ret;
1835}
1836
1837static void zsmalloc_unmount(void)
1838{
1839 kern_unmount(zsmalloc_mnt);
1840}
1841
1842static void migrate_lock_init(struct zspage *zspage)
1843{
1844 rwlock_init(&zspage->lock);
1845}
1846
1847static void migrate_read_lock(struct zspage *zspage)
1848{
1849 read_lock(&zspage->lock);
1850}
1851
1852static void migrate_read_unlock(struct zspage *zspage)
1853{
1854 read_unlock(&zspage->lock);
1855}
1856
1857static void migrate_write_lock(struct zspage *zspage)
1858{
1859 write_lock(&zspage->lock);
1860}
1861
1862static void migrate_write_unlock(struct zspage *zspage)
1863{
1864 write_unlock(&zspage->lock);
1865}
1866
1867
1868static void inc_zspage_isolation(struct zspage *zspage)
1869{
1870 zspage->isolated++;
1871}
1872
1873static void dec_zspage_isolation(struct zspage *zspage)
1874{
1875 zspage->isolated--;
1876}
1877
1878static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1879 struct page *newpage, struct page *oldpage)
1880{
1881 struct page *page;
1882 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
1883 int idx = 0;
1884
1885 page = get_first_page(zspage);
1886 do {
1887 if (page == oldpage)
1888 pages[idx] = newpage;
1889 else
1890 pages[idx] = page;
1891 idx++;
1892 } while ((page = get_next_page(page)) != NULL);
1893
1894 create_page_chain(class, zspage, pages);
1895 set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
1896 if (unlikely(PageHugeObject(oldpage)))
1897 newpage->index = oldpage->index;
1898 __SetPageMovable(newpage, page_mapping(oldpage));
1899}
1900
1901bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1902{
1903 struct zs_pool *pool;
1904 struct size_class *class;
1905 int class_idx;
1906 enum fullness_group fullness;
1907 struct zspage *zspage;
1908 struct address_space *mapping;
1909
1910
1911
1912
1913
1914 VM_BUG_ON_PAGE(!PageMovable(page), page);
1915 VM_BUG_ON_PAGE(PageIsolated(page), page);
1916
1917 zspage = get_zspage(page);
1918
1919
1920
1921
1922
1923
1924 get_zspage_mapping(zspage, &class_idx, &fullness);
1925 mapping = page_mapping(page);
1926 pool = mapping->private_data;
1927 class = pool->size_class[class_idx];
1928
1929 spin_lock(&class->lock);
1930 if (get_zspage_inuse(zspage) == 0) {
1931 spin_unlock(&class->lock);
1932 return false;
1933 }
1934
1935
1936 if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1937 spin_unlock(&class->lock);
1938 return false;
1939 }
1940
1941
1942
1943
1944
1945 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1946 get_zspage_mapping(zspage, &class_idx, &fullness);
1947 remove_zspage(class, zspage, fullness);
1948 }
1949
1950 inc_zspage_isolation(zspage);
1951 spin_unlock(&class->lock);
1952
1953 return true;
1954}
1955
1956int zs_page_migrate(struct address_space *mapping, struct page *newpage,
1957 struct page *page, enum migrate_mode mode)
1958{
1959 struct zs_pool *pool;
1960 struct size_class *class;
1961 int class_idx;
1962 enum fullness_group fullness;
1963 struct zspage *zspage;
1964 struct page *dummy;
1965 void *s_addr, *d_addr, *addr;
1966 int offset, pos;
1967 unsigned long handle, head;
1968 unsigned long old_obj, new_obj;
1969 unsigned int obj_idx;
1970 int ret = -EAGAIN;
1971
1972 VM_BUG_ON_PAGE(!PageMovable(page), page);
1973 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1974
1975 zspage = get_zspage(page);
1976
1977
1978 migrate_write_lock(zspage);
1979 get_zspage_mapping(zspage, &class_idx, &fullness);
1980 pool = mapping->private_data;
1981 class = pool->size_class[class_idx];
1982 offset = get_first_obj_offset(page);
1983
1984 spin_lock(&class->lock);
1985 if (!get_zspage_inuse(zspage)) {
1986 ret = -EBUSY;
1987 goto unlock_class;
1988 }
1989
1990 pos = offset;
1991 s_addr = kmap_atomic(page);
1992 while (pos < PAGE_SIZE) {
1993 head = obj_to_head(page, s_addr + pos);
1994 if (head & OBJ_ALLOCATED_TAG) {
1995 handle = head & ~OBJ_ALLOCATED_TAG;
1996 if (!trypin_tag(handle))
1997 goto unpin_objects;
1998 }
1999 pos += class->size;
2000 }
2001
2002
2003
2004
2005 d_addr = kmap_atomic(newpage);
2006 memcpy(d_addr, s_addr, PAGE_SIZE);
2007 kunmap_atomic(d_addr);
2008
2009 for (addr = s_addr + offset; addr < s_addr + pos;
2010 addr += class->size) {
2011 head = obj_to_head(page, addr);
2012 if (head & OBJ_ALLOCATED_TAG) {
2013 handle = head & ~OBJ_ALLOCATED_TAG;
2014 if (!testpin_tag(handle))
2015 BUG();
2016
2017 old_obj = handle_to_obj(handle);
2018 obj_to_location(old_obj, &dummy, &obj_idx);
2019 new_obj = (unsigned long)location_to_obj(newpage,
2020 obj_idx);
2021 new_obj |= BIT(HANDLE_PIN_BIT);
2022 record_obj(handle, new_obj);
2023 }
2024 }
2025
2026 replace_sub_page(class, zspage, newpage, page);
2027 get_page(newpage);
2028
2029 dec_zspage_isolation(zspage);
2030
2031
2032
2033
2034
2035 if (!is_zspage_isolated(zspage))
2036 putback_zspage(class, zspage);
2037
2038 reset_page(page);
2039 put_page(page);
2040 page = newpage;
2041
2042 ret = MIGRATEPAGE_SUCCESS;
2043unpin_objects:
2044 for (addr = s_addr + offset; addr < s_addr + pos;
2045 addr += class->size) {
2046 head = obj_to_head(page, addr);
2047 if (head & OBJ_ALLOCATED_TAG) {
2048 handle = head & ~OBJ_ALLOCATED_TAG;
2049 if (!testpin_tag(handle))
2050 BUG();
2051 unpin_tag(handle);
2052 }
2053 }
2054 kunmap_atomic(s_addr);
2055unlock_class:
2056 spin_unlock(&class->lock);
2057 migrate_write_unlock(zspage);
2058
2059 return ret;
2060}
2061
2062void zs_page_putback(struct page *page)
2063{
2064 struct zs_pool *pool;
2065 struct size_class *class;
2066 int class_idx;
2067 enum fullness_group fg;
2068 struct address_space *mapping;
2069 struct zspage *zspage;
2070
2071 VM_BUG_ON_PAGE(!PageMovable(page), page);
2072 VM_BUG_ON_PAGE(!PageIsolated(page), page);
2073
2074 zspage = get_zspage(page);
2075 get_zspage_mapping(zspage, &class_idx, &fg);
2076 mapping = page_mapping(page);
2077 pool = mapping->private_data;
2078 class = pool->size_class[class_idx];
2079
2080 spin_lock(&class->lock);
2081 dec_zspage_isolation(zspage);
2082 if (!is_zspage_isolated(zspage)) {
2083 fg = putback_zspage(class, zspage);
2084
2085
2086
2087
2088 if (fg == ZS_EMPTY)
2089 schedule_work(&pool->free_work);
2090 }
2091 spin_unlock(&class->lock);
2092}
2093
2094const struct address_space_operations zsmalloc_aops = {
2095 .isolate_page = zs_page_isolate,
2096 .migratepage = zs_page_migrate,
2097 .putback_page = zs_page_putback,
2098};
2099
2100static int zs_register_migration(struct zs_pool *pool)
2101{
2102 pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
2103 if (IS_ERR(pool->inode)) {
2104 pool->inode = NULL;
2105 return 1;
2106 }
2107
2108 pool->inode->i_mapping->private_data = pool;
2109 pool->inode->i_mapping->a_ops = &zsmalloc_aops;
2110 return 0;
2111}
2112
2113static void zs_unregister_migration(struct zs_pool *pool)
2114{
2115 flush_work(&pool->free_work);
2116 iput(pool->inode);
2117}
2118
2119
2120
2121
2122
2123static void async_free_zspage(struct work_struct *work)
2124{
2125 int i;
2126 struct size_class *class;
2127 unsigned int class_idx;
2128 enum fullness_group fullness;
2129 struct zspage *zspage, *tmp;
2130 LIST_HEAD(free_pages);
2131 struct zs_pool *pool = container_of(work, struct zs_pool,
2132 free_work);
2133
2134 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2135 class = pool->size_class[i];
2136 if (class->index != i)
2137 continue;
2138
2139 spin_lock(&class->lock);
2140 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
2141 spin_unlock(&class->lock);
2142 }
2143
2144
2145 list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
2146 list_del(&zspage->list);
2147 lock_zspage(zspage);
2148
2149 get_zspage_mapping(zspage, &class_idx, &fullness);
2150 VM_BUG_ON(fullness != ZS_EMPTY);
2151 class = pool->size_class[class_idx];
2152 spin_lock(&class->lock);
2153 __free_zspage(pool, pool->size_class[class_idx], zspage);
2154 spin_unlock(&class->lock);
2155 }
2156};
2157
2158static void kick_deferred_free(struct zs_pool *pool)
2159{
2160 schedule_work(&pool->free_work);
2161}
2162
2163static void init_deferred_free(struct zs_pool *pool)
2164{
2165 INIT_WORK(&pool->free_work, async_free_zspage);
2166}
2167
2168static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
2169{
2170 struct page *page = get_first_page(zspage);
2171
2172 do {
2173 WARN_ON(!trylock_page(page));
2174 __SetPageMovable(page, pool->inode->i_mapping);
2175 unlock_page(page);
2176 } while ((page = get_next_page(page)) != NULL);
2177}
2178#endif
2179
2180
2181
2182
2183
2184
2185static unsigned long zs_can_compact(struct size_class *class)
2186{
2187 unsigned long obj_wasted;
2188 unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
2189 unsigned long obj_used = zs_stat_get(class, OBJ_USED);
2190
2191 if (obj_allocated <= obj_used)
2192 return 0;
2193
2194 obj_wasted = obj_allocated - obj_used;
2195 obj_wasted /= class->objs_per_zspage;
2196
2197 return obj_wasted * class->pages_per_zspage;
2198}
2199
2200static void __zs_compact(struct zs_pool *pool, struct size_class *class)
2201{
2202 struct zs_compact_control cc;
2203 struct zspage *src_zspage;
2204 struct zspage *dst_zspage = NULL;
2205
2206 spin_lock(&class->lock);
2207 while ((src_zspage = isolate_zspage(class, true))) {
2208
2209 if (!zs_can_compact(class))
2210 break;
2211
2212 cc.obj_idx = 0;
2213 cc.s_page = get_first_page(src_zspage);
2214
2215 while ((dst_zspage = isolate_zspage(class, false))) {
2216 cc.d_page = get_first_page(dst_zspage);
2217
2218
2219
2220
2221 if (!migrate_zspage(pool, class, &cc))
2222 break;
2223
2224 putback_zspage(class, dst_zspage);
2225 }
2226
2227
2228 if (dst_zspage == NULL)
2229 break;
2230
2231 putback_zspage(class, dst_zspage);
2232 if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
2233 free_zspage(pool, class, src_zspage);
2234 pool->stats.pages_compacted += class->pages_per_zspage;
2235 }
2236 spin_unlock(&class->lock);
2237 cond_resched();
2238 spin_lock(&class->lock);
2239 }
2240
2241 if (src_zspage)
2242 putback_zspage(class, src_zspage);
2243
2244 spin_unlock(&class->lock);
2245}
2246
2247unsigned long zs_compact(struct zs_pool *pool)
2248{
2249 int i;
2250 struct size_class *class;
2251
2252 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2253 class = pool->size_class[i];
2254 if (!class)
2255 continue;
2256 if (class->index != i)
2257 continue;
2258 __zs_compact(pool, class);
2259 }
2260
2261 return pool->stats.pages_compacted;
2262}
2263EXPORT_SYMBOL_GPL(zs_compact);
2264
2265void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2266{
2267 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2268}
2269EXPORT_SYMBOL_GPL(zs_pool_stats);
2270
2271static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
2272 struct shrink_control *sc)
2273{
2274 unsigned long pages_freed;
2275 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2276 shrinker);
2277
2278 pages_freed = pool->stats.pages_compacted;
2279
2280
2281
2282
2283
2284 pages_freed = zs_compact(pool) - pages_freed;
2285
2286 return pages_freed ? pages_freed : SHRINK_STOP;
2287}
2288
2289static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2290 struct shrink_control *sc)
2291{
2292 int i;
2293 struct size_class *class;
2294 unsigned long pages_to_free = 0;
2295 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2296 shrinker);
2297
2298 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2299 class = pool->size_class[i];
2300 if (!class)
2301 continue;
2302 if (class->index != i)
2303 continue;
2304
2305 pages_to_free += zs_can_compact(class);
2306 }
2307
2308 return pages_to_free;
2309}
2310
2311static void zs_unregister_shrinker(struct zs_pool *pool)
2312{
2313 if (pool->shrinker_enabled) {
2314 unregister_shrinker(&pool->shrinker);
2315 pool->shrinker_enabled = false;
2316 }
2317}
2318
2319static int zs_register_shrinker(struct zs_pool *pool)
2320{
2321 pool->shrinker.scan_objects = zs_shrinker_scan;
2322 pool->shrinker.count_objects = zs_shrinker_count;
2323 pool->shrinker.batch = 0;
2324 pool->shrinker.seeks = DEFAULT_SEEKS;
2325
2326 return register_shrinker(&pool->shrinker);
2327}
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339struct zs_pool *zs_create_pool(const char *name)
2340{
2341 int i;
2342 struct zs_pool *pool;
2343 struct size_class *prev_class = NULL;
2344
2345 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2346 if (!pool)
2347 return NULL;
2348
2349 init_deferred_free(pool);
2350
2351 pool->name = kstrdup(name, GFP_KERNEL);
2352 if (!pool->name)
2353 goto err;
2354
2355 if (create_cache(pool))
2356 goto err;
2357
2358
2359
2360
2361
2362 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2363 int size;
2364 int pages_per_zspage;
2365 int objs_per_zspage;
2366 struct size_class *class;
2367 int fullness = 0;
2368
2369 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
2370 if (size > ZS_MAX_ALLOC_SIZE)
2371 size = ZS_MAX_ALLOC_SIZE;
2372 pages_per_zspage = get_pages_per_zspage(size);
2373 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384 if (prev_class) {
2385 if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
2386 pool->size_class[i] = prev_class;
2387 continue;
2388 }
2389 }
2390
2391 class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2392 if (!class)
2393 goto err;
2394
2395 class->size = size;
2396 class->index = i;
2397 class->pages_per_zspage = pages_per_zspage;
2398 class->objs_per_zspage = objs_per_zspage;
2399 spin_lock_init(&class->lock);
2400 pool->size_class[i] = class;
2401 for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
2402 fullness++)
2403 INIT_LIST_HEAD(&class->fullness_list[fullness]);
2404
2405 prev_class = class;
2406 }
2407
2408
2409 zs_pool_stat_create(pool, name);
2410
2411 if (zs_register_migration(pool))
2412 goto err;
2413
2414
2415
2416
2417
2418 if (zs_register_shrinker(pool) == 0)
2419 pool->shrinker_enabled = true;
2420 return pool;
2421
2422err:
2423 zs_destroy_pool(pool);
2424 return NULL;
2425}
2426EXPORT_SYMBOL_GPL(zs_create_pool);
2427
2428void zs_destroy_pool(struct zs_pool *pool)
2429{
2430 int i;
2431
2432 zs_unregister_shrinker(pool);
2433 zs_unregister_migration(pool);
2434 zs_pool_stat_destroy(pool);
2435
2436 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2437 int fg;
2438 struct size_class *class = pool->size_class[i];
2439
2440 if (!class)
2441 continue;
2442
2443 if (class->index != i)
2444 continue;
2445
2446 for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
2447 if (!list_empty(&class->fullness_list[fg])) {
2448 pr_info("Freeing non-empty class with size %db, fullness group %d\n",
2449 class->size, fg);
2450 }
2451 }
2452 kfree(class);
2453 }
2454
2455 destroy_cache(pool);
2456 kfree(pool->name);
2457 kfree(pool);
2458}
2459EXPORT_SYMBOL_GPL(zs_destroy_pool);
2460
2461static int __init zs_init(void)
2462{
2463 int ret;
2464
2465 ret = zsmalloc_mount();
2466 if (ret)
2467 goto out;
2468
2469 ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
2470 zs_cpu_prepare, zs_cpu_dead);
2471 if (ret)
2472 goto hp_setup_fail;
2473
2474#ifdef CONFIG_ZPOOL
2475 zpool_register_driver(&zs_zpool_driver);
2476#endif
2477
2478 zs_stat_init();
2479
2480 return 0;
2481
2482hp_setup_fail:
2483 zsmalloc_unmount();
2484out:
2485 return ret;
2486}
2487
2488static void __exit zs_exit(void)
2489{
2490#ifdef CONFIG_ZPOOL
2491 zpool_unregister_driver(&zs_zpool_driver);
2492#endif
2493 zsmalloc_unmount();
2494 cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
2495
2496 zs_stat_exit();
2497}
2498
2499module_init(zs_init);
2500module_exit(zs_exit);
2501
2502MODULE_LICENSE("Dual BSD/GPL");
2503MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2504