1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/ramfs.h>
29#include <linux/pagemap.h>
30#include <linux/file.h>
31#include <linux/mm.h>
32#include <linux/random.h>
33#include <linux/sched/signal.h>
34#include <linux/export.h>
35#include <linux/swap.h>
36#include <linux/uio.h>
37#include <linux/khugepaged.h>
38#include <linux/hugetlb.h>
39#include <linux/frontswap.h>
40#include <linux/fs_parser.h>
41#include <linux/swapfile.h>
42
43static struct vfsmount *shm_mnt;
44
45#ifdef CONFIG_SHMEM
46
47
48
49
50
51
52#include <linux/xattr.h>
53#include <linux/exportfs.h>
54#include <linux/posix_acl.h>
55#include <linux/posix_acl_xattr.h>
56#include <linux/mman.h>
57#include <linux/string.h>
58#include <linux/slab.h>
59#include <linux/backing-dev.h>
60#include <linux/shmem_fs.h>
61#include <linux/writeback.h>
62#include <linux/blkdev.h>
63#include <linux/pagevec.h>
64#include <linux/percpu_counter.h>
65#include <linux/falloc.h>
66#include <linux/splice.h>
67#include <linux/security.h>
68#include <linux/swapops.h>
69#include <linux/mempolicy.h>
70#include <linux/namei.h>
71#include <linux/ctype.h>
72#include <linux/migrate.h>
73#include <linux/highmem.h>
74#include <linux/seq_file.h>
75#include <linux/magic.h>
76#include <linux/syscalls.h>
77#include <linux/fcntl.h>
78#include <uapi/linux/memfd.h>
79#include <linux/userfaultfd_k.h>
80#include <linux/rmap.h>
81#include <linux/uuid.h>
82
83#include <linux/uaccess.h>
84
85#include "internal.h"
86
87#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
88#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
89
90
91#define BOGO_DIRENT_SIZE 20
92
93
94#define SHORT_SYMLINK_LEN 128
95
96
97
98
99
100
101struct shmem_falloc {
102 wait_queue_head_t *waitq;
103 pgoff_t start;
104 pgoff_t next;
105 pgoff_t nr_falloced;
106 pgoff_t nr_unswapped;
107};
108
109struct shmem_options {
110 unsigned long long blocks;
111 unsigned long long inodes;
112 struct mempolicy *mpol;
113 kuid_t uid;
114 kgid_t gid;
115 umode_t mode;
116 bool full_inums;
117 int huge;
118 int seen;
119#define SHMEM_SEEN_BLOCKS 1
120#define SHMEM_SEEN_INODES 2
121#define SHMEM_SEEN_HUGE 4
122#define SHMEM_SEEN_INUMS 8
123};
124
125#ifdef CONFIG_TMPFS
126static unsigned long shmem_default_max_blocks(void)
127{
128 return totalram_pages() / 2;
129}
130
131static unsigned long shmem_default_max_inodes(void)
132{
133 unsigned long nr_pages = totalram_pages();
134
135 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
136}
137#endif
138
139static int shmem_swapin_page(struct inode *inode, pgoff_t index,
140 struct page **pagep, enum sgp_type sgp,
141 gfp_t gfp, struct vm_area_struct *vma,
142 vm_fault_t *fault_type);
143static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
144 struct page **pagep, enum sgp_type sgp,
145 gfp_t gfp, struct vm_area_struct *vma,
146 struct vm_fault *vmf, vm_fault_t *fault_type);
147
148int shmem_getpage(struct inode *inode, pgoff_t index,
149 struct page **pagep, enum sgp_type sgp)
150{
151 return shmem_getpage_gfp(inode, index, pagep, sgp,
152 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
153}
154
155static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
156{
157 return sb->s_fs_info;
158}
159
160
161
162
163
164
165
166static inline int shmem_acct_size(unsigned long flags, loff_t size)
167{
168 return (flags & VM_NORESERVE) ?
169 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
170}
171
172static inline void shmem_unacct_size(unsigned long flags, loff_t size)
173{
174 if (!(flags & VM_NORESERVE))
175 vm_unacct_memory(VM_ACCT(size));
176}
177
178static inline int shmem_reacct_size(unsigned long flags,
179 loff_t oldsize, loff_t newsize)
180{
181 if (!(flags & VM_NORESERVE)) {
182 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
183 return security_vm_enough_memory_mm(current->mm,
184 VM_ACCT(newsize) - VM_ACCT(oldsize));
185 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
186 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
187 }
188 return 0;
189}
190
191
192
193
194
195
196
197static inline int shmem_acct_block(unsigned long flags, long pages)
198{
199 if (!(flags & VM_NORESERVE))
200 return 0;
201
202 return security_vm_enough_memory_mm(current->mm,
203 pages * VM_ACCT(PAGE_SIZE));
204}
205
206static inline void shmem_unacct_blocks(unsigned long flags, long pages)
207{
208 if (flags & VM_NORESERVE)
209 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
210}
211
212static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
213{
214 struct shmem_inode_info *info = SHMEM_I(inode);
215 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
216
217 if (shmem_acct_block(info->flags, pages))
218 return false;
219
220 if (sbinfo->max_blocks) {
221 if (percpu_counter_compare(&sbinfo->used_blocks,
222 sbinfo->max_blocks - pages) > 0)
223 goto unacct;
224 percpu_counter_add(&sbinfo->used_blocks, pages);
225 }
226
227 return true;
228
229unacct:
230 shmem_unacct_blocks(info->flags, pages);
231 return false;
232}
233
234static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
235{
236 struct shmem_inode_info *info = SHMEM_I(inode);
237 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
238
239 if (sbinfo->max_blocks)
240 percpu_counter_sub(&sbinfo->used_blocks, pages);
241 shmem_unacct_blocks(info->flags, pages);
242}
243
244static const struct super_operations shmem_ops;
245const struct address_space_operations shmem_aops;
246static const struct file_operations shmem_file_operations;
247static const struct inode_operations shmem_inode_operations;
248static const struct inode_operations shmem_dir_inode_operations;
249static const struct inode_operations shmem_special_inode_operations;
250static const struct vm_operations_struct shmem_vm_ops;
251static struct file_system_type shmem_fs_type;
252
253bool vma_is_shmem(struct vm_area_struct *vma)
254{
255 return vma->vm_ops == &shmem_vm_ops;
256}
257
258static LIST_HEAD(shmem_swaplist);
259static DEFINE_MUTEX(shmem_swaplist_mutex);
260
261
262
263
264
265
266
267
268
269
270#define SHMEM_INO_BATCH 1024
271static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
272{
273 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
274 ino_t ino;
275
276 if (!(sb->s_flags & SB_KERNMOUNT)) {
277 raw_spin_lock(&sbinfo->stat_lock);
278 if (sbinfo->max_inodes) {
279 if (!sbinfo->free_inodes) {
280 raw_spin_unlock(&sbinfo->stat_lock);
281 return -ENOSPC;
282 }
283 sbinfo->free_inodes--;
284 }
285 if (inop) {
286 ino = sbinfo->next_ino++;
287 if (unlikely(is_zero_ino(ino)))
288 ino = sbinfo->next_ino++;
289 if (unlikely(!sbinfo->full_inums &&
290 ino > UINT_MAX)) {
291
292
293
294
295 if (IS_ENABLED(CONFIG_64BIT))
296 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
297 __func__, MINOR(sb->s_dev));
298 sbinfo->next_ino = 1;
299 ino = sbinfo->next_ino++;
300 }
301 *inop = ino;
302 }
303 raw_spin_unlock(&sbinfo->stat_lock);
304 } else if (inop) {
305
306
307
308
309
310
311
312
313
314
315
316
317 ino_t *next_ino;
318
319 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
320 ino = *next_ino;
321 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
322 raw_spin_lock(&sbinfo->stat_lock);
323 ino = sbinfo->next_ino;
324 sbinfo->next_ino += SHMEM_INO_BATCH;
325 raw_spin_unlock(&sbinfo->stat_lock);
326 if (unlikely(is_zero_ino(ino)))
327 ino++;
328 }
329 *inop = ino;
330 *next_ino = ++ino;
331 put_cpu();
332 }
333
334 return 0;
335}
336
337static void shmem_free_inode(struct super_block *sb)
338{
339 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
340 if (sbinfo->max_inodes) {
341 raw_spin_lock(&sbinfo->stat_lock);
342 sbinfo->free_inodes++;
343 raw_spin_unlock(&sbinfo->stat_lock);
344 }
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359static void shmem_recalc_inode(struct inode *inode)
360{
361 struct shmem_inode_info *info = SHMEM_I(inode);
362 long freed;
363
364 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
365 if (freed > 0) {
366 info->alloced -= freed;
367 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
368 shmem_inode_unacct_blocks(inode, freed);
369 }
370}
371
372bool shmem_charge(struct inode *inode, long pages)
373{
374 struct shmem_inode_info *info = SHMEM_I(inode);
375 unsigned long flags;
376
377 if (!shmem_inode_acct_block(inode, pages))
378 return false;
379
380
381 inode->i_mapping->nrpages += pages;
382
383 spin_lock_irqsave(&info->lock, flags);
384 info->alloced += pages;
385 inode->i_blocks += pages * BLOCKS_PER_PAGE;
386 shmem_recalc_inode(inode);
387 spin_unlock_irqrestore(&info->lock, flags);
388
389 return true;
390}
391
392void shmem_uncharge(struct inode *inode, long pages)
393{
394 struct shmem_inode_info *info = SHMEM_I(inode);
395 unsigned long flags;
396
397
398
399 spin_lock_irqsave(&info->lock, flags);
400 info->alloced -= pages;
401 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
402 shmem_recalc_inode(inode);
403 spin_unlock_irqrestore(&info->lock, flags);
404
405 shmem_inode_unacct_blocks(inode, pages);
406}
407
408
409
410
411static int shmem_replace_entry(struct address_space *mapping,
412 pgoff_t index, void *expected, void *replacement)
413{
414 XA_STATE(xas, &mapping->i_pages, index);
415 void *item;
416
417 VM_BUG_ON(!expected);
418 VM_BUG_ON(!replacement);
419 item = xas_load(&xas);
420 if (item != expected)
421 return -ENOENT;
422 xas_store(&xas, replacement);
423 return 0;
424}
425
426
427
428
429
430
431
432
433static bool shmem_confirm_swap(struct address_space *mapping,
434 pgoff_t index, swp_entry_t swap)
435{
436 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453#define SHMEM_HUGE_NEVER 0
454#define SHMEM_HUGE_ALWAYS 1
455#define SHMEM_HUGE_WITHIN_SIZE 2
456#define SHMEM_HUGE_ADVISE 3
457
458
459
460
461
462
463
464
465
466
467
468#define SHMEM_HUGE_DENY (-1)
469#define SHMEM_HUGE_FORCE (-2)
470
471#ifdef CONFIG_TRANSPARENT_HUGEPAGE
472
473
474static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
475
476bool shmem_is_huge(struct vm_area_struct *vma,
477 struct inode *inode, pgoff_t index)
478{
479 loff_t i_size;
480
481 if (shmem_huge == SHMEM_HUGE_DENY)
482 return false;
483 if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
484 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
485 return false;
486 if (shmem_huge == SHMEM_HUGE_FORCE)
487 return true;
488
489 switch (SHMEM_SB(inode->i_sb)->huge) {
490 case SHMEM_HUGE_ALWAYS:
491 return true;
492 case SHMEM_HUGE_WITHIN_SIZE:
493 index = round_up(index + 1, HPAGE_PMD_NR);
494 i_size = round_up(i_size_read(inode), PAGE_SIZE);
495 if (i_size >> PAGE_SHIFT >= index)
496 return true;
497 fallthrough;
498 case SHMEM_HUGE_ADVISE:
499 if (vma && (vma->vm_flags & VM_HUGEPAGE))
500 return true;
501 fallthrough;
502 default:
503 return false;
504 }
505}
506
507#if defined(CONFIG_SYSFS)
508static int shmem_parse_huge(const char *str)
509{
510 if (!strcmp(str, "never"))
511 return SHMEM_HUGE_NEVER;
512 if (!strcmp(str, "always"))
513 return SHMEM_HUGE_ALWAYS;
514 if (!strcmp(str, "within_size"))
515 return SHMEM_HUGE_WITHIN_SIZE;
516 if (!strcmp(str, "advise"))
517 return SHMEM_HUGE_ADVISE;
518 if (!strcmp(str, "deny"))
519 return SHMEM_HUGE_DENY;
520 if (!strcmp(str, "force"))
521 return SHMEM_HUGE_FORCE;
522 return -EINVAL;
523}
524#endif
525
526#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
527static const char *shmem_format_huge(int huge)
528{
529 switch (huge) {
530 case SHMEM_HUGE_NEVER:
531 return "never";
532 case SHMEM_HUGE_ALWAYS:
533 return "always";
534 case SHMEM_HUGE_WITHIN_SIZE:
535 return "within_size";
536 case SHMEM_HUGE_ADVISE:
537 return "advise";
538 case SHMEM_HUGE_DENY:
539 return "deny";
540 case SHMEM_HUGE_FORCE:
541 return "force";
542 default:
543 VM_BUG_ON(1);
544 return "bad_val";
545 }
546}
547#endif
548
549static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
550 struct shrink_control *sc, unsigned long nr_to_split)
551{
552 LIST_HEAD(list), *pos, *next;
553 LIST_HEAD(to_remove);
554 struct inode *inode;
555 struct shmem_inode_info *info;
556 struct page *page;
557 unsigned long batch = sc ? sc->nr_to_scan : 128;
558 int removed = 0, split = 0;
559
560 if (list_empty(&sbinfo->shrinklist))
561 return SHRINK_STOP;
562
563 spin_lock(&sbinfo->shrinklist_lock);
564 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
565 info = list_entry(pos, struct shmem_inode_info, shrinklist);
566
567
568 inode = igrab(&info->vfs_inode);
569
570
571 if (!inode) {
572 list_del_init(&info->shrinklist);
573 removed++;
574 goto next;
575 }
576
577
578 if (round_up(inode->i_size, PAGE_SIZE) ==
579 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
580 list_move(&info->shrinklist, &to_remove);
581 removed++;
582 goto next;
583 }
584
585 list_move(&info->shrinklist, &list);
586next:
587 if (!--batch)
588 break;
589 }
590 spin_unlock(&sbinfo->shrinklist_lock);
591
592 list_for_each_safe(pos, next, &to_remove) {
593 info = list_entry(pos, struct shmem_inode_info, shrinklist);
594 inode = &info->vfs_inode;
595 list_del_init(&info->shrinklist);
596 iput(inode);
597 }
598
599 list_for_each_safe(pos, next, &list) {
600 int ret;
601
602 info = list_entry(pos, struct shmem_inode_info, shrinklist);
603 inode = &info->vfs_inode;
604
605 if (nr_to_split && split >= nr_to_split)
606 goto leave;
607
608 page = find_get_page(inode->i_mapping,
609 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
610 if (!page)
611 goto drop;
612
613
614 if (!PageTransHuge(page)) {
615 put_page(page);
616 goto drop;
617 }
618
619
620
621
622
623
624
625
626 if (!trylock_page(page)) {
627 put_page(page);
628 goto leave;
629 }
630
631 ret = split_huge_page(page);
632 unlock_page(page);
633 put_page(page);
634
635
636 if (ret)
637 goto leave;
638
639 split++;
640drop:
641 list_del_init(&info->shrinklist);
642 removed++;
643leave:
644 iput(inode);
645 }
646
647 spin_lock(&sbinfo->shrinklist_lock);
648 list_splice_tail(&list, &sbinfo->shrinklist);
649 sbinfo->shrinklist_len -= removed;
650 spin_unlock(&sbinfo->shrinklist_lock);
651
652 return split;
653}
654
655static long shmem_unused_huge_scan(struct super_block *sb,
656 struct shrink_control *sc)
657{
658 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
659
660 if (!READ_ONCE(sbinfo->shrinklist_len))
661 return SHRINK_STOP;
662
663 return shmem_unused_huge_shrink(sbinfo, sc, 0);
664}
665
666static long shmem_unused_huge_count(struct super_block *sb,
667 struct shrink_control *sc)
668{
669 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
670 return READ_ONCE(sbinfo->shrinklist_len);
671}
672#else
673
674#define shmem_huge SHMEM_HUGE_DENY
675
676bool shmem_is_huge(struct vm_area_struct *vma,
677 struct inode *inode, pgoff_t index)
678{
679 return false;
680}
681
682static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
683 struct shrink_control *sc, unsigned long nr_to_split)
684{
685 return 0;
686}
687#endif
688
689
690
691
692static int shmem_add_to_page_cache(struct page *page,
693 struct address_space *mapping,
694 pgoff_t index, void *expected, gfp_t gfp,
695 struct mm_struct *charge_mm)
696{
697 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
698 unsigned long i = 0;
699 unsigned long nr = compound_nr(page);
700 int error;
701
702 VM_BUG_ON_PAGE(PageTail(page), page);
703 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
704 VM_BUG_ON_PAGE(!PageLocked(page), page);
705 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
706 VM_BUG_ON(expected && PageTransHuge(page));
707
708 page_ref_add(page, nr);
709 page->mapping = mapping;
710 page->index = index;
711
712 if (!PageSwapCache(page)) {
713 error = mem_cgroup_charge(page, charge_mm, gfp);
714 if (error) {
715 if (PageTransHuge(page)) {
716 count_vm_event(THP_FILE_FALLBACK);
717 count_vm_event(THP_FILE_FALLBACK_CHARGE);
718 }
719 goto error;
720 }
721 }
722 cgroup_throttle_swaprate(page, gfp);
723
724 do {
725 void *entry;
726 xas_lock_irq(&xas);
727 entry = xas_find_conflict(&xas);
728 if (entry != expected)
729 xas_set_err(&xas, -EEXIST);
730 xas_create_range(&xas);
731 if (xas_error(&xas))
732 goto unlock;
733next:
734 xas_store(&xas, page);
735 if (++i < nr) {
736 xas_next(&xas);
737 goto next;
738 }
739 if (PageTransHuge(page)) {
740 count_vm_event(THP_FILE_ALLOC);
741 __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
742 }
743 mapping->nrpages += nr;
744 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
745 __mod_lruvec_page_state(page, NR_SHMEM, nr);
746unlock:
747 xas_unlock_irq(&xas);
748 } while (xas_nomem(&xas, gfp));
749
750 if (xas_error(&xas)) {
751 error = xas_error(&xas);
752 goto error;
753 }
754
755 return 0;
756error:
757 page->mapping = NULL;
758 page_ref_sub(page, nr);
759 return error;
760}
761
762
763
764
765static void shmem_delete_from_page_cache(struct page *page, void *radswap)
766{
767 struct address_space *mapping = page->mapping;
768 int error;
769
770 VM_BUG_ON_PAGE(PageCompound(page), page);
771
772 xa_lock_irq(&mapping->i_pages);
773 error = shmem_replace_entry(mapping, page->index, page, radswap);
774 page->mapping = NULL;
775 mapping->nrpages--;
776 __dec_lruvec_page_state(page, NR_FILE_PAGES);
777 __dec_lruvec_page_state(page, NR_SHMEM);
778 xa_unlock_irq(&mapping->i_pages);
779 put_page(page);
780 BUG_ON(error);
781}
782
783
784
785
786static int shmem_free_swap(struct address_space *mapping,
787 pgoff_t index, void *radswap)
788{
789 void *old;
790
791 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
792 if (old != radswap)
793 return -ENOENT;
794 free_swap_and_cache(radix_to_swp_entry(radswap));
795 return 0;
796}
797
798
799
800
801
802
803
804
805unsigned long shmem_partial_swap_usage(struct address_space *mapping,
806 pgoff_t start, pgoff_t end)
807{
808 XA_STATE(xas, &mapping->i_pages, start);
809 struct page *page;
810 unsigned long swapped = 0;
811
812 rcu_read_lock();
813 xas_for_each(&xas, page, end - 1) {
814 if (xas_retry(&xas, page))
815 continue;
816 if (xa_is_value(page))
817 swapped++;
818
819 if (need_resched()) {
820 xas_pause(&xas);
821 cond_resched_rcu();
822 }
823 }
824
825 rcu_read_unlock();
826
827 return swapped << PAGE_SHIFT;
828}
829
830
831
832
833
834
835
836
837unsigned long shmem_swap_usage(struct vm_area_struct *vma)
838{
839 struct inode *inode = file_inode(vma->vm_file);
840 struct shmem_inode_info *info = SHMEM_I(inode);
841 struct address_space *mapping = inode->i_mapping;
842 unsigned long swapped;
843
844
845 swapped = READ_ONCE(info->swapped);
846
847
848
849
850
851
852 if (!swapped)
853 return 0;
854
855 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
856 return swapped << PAGE_SHIFT;
857
858
859 return shmem_partial_swap_usage(mapping,
860 linear_page_index(vma, vma->vm_start),
861 linear_page_index(vma, vma->vm_end));
862}
863
864
865
866
867void shmem_unlock_mapping(struct address_space *mapping)
868{
869 struct pagevec pvec;
870 pgoff_t index = 0;
871
872 pagevec_init(&pvec);
873
874
875
876 while (!mapping_unevictable(mapping)) {
877 if (!pagevec_lookup(&pvec, mapping, &index))
878 break;
879 check_move_unevictable_pages(&pvec);
880 pagevec_release(&pvec);
881 cond_resched();
882 }
883}
884
885
886
887
888
889
890
891
892
893
894
895
896
897static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
898{
899 if (!PageTransCompound(page))
900 return true;
901
902
903 if (PageHead(page) &&
904 page->index >= start && page->index + HPAGE_PMD_NR <= end)
905 return true;
906
907
908 return split_huge_page(page) >= 0;
909}
910
911
912
913
914
915static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
916 bool unfalloc)
917{
918 struct address_space *mapping = inode->i_mapping;
919 struct shmem_inode_info *info = SHMEM_I(inode);
920 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
921 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
922 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
923 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
924 struct pagevec pvec;
925 pgoff_t indices[PAGEVEC_SIZE];
926 long nr_swaps_freed = 0;
927 pgoff_t index;
928 int i;
929
930 if (lend == -1)
931 end = -1;
932
933 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
934 info->fallocend = start;
935
936 pagevec_init(&pvec);
937 index = start;
938 while (index < end && find_lock_entries(mapping, index, end - 1,
939 &pvec, indices)) {
940 for (i = 0; i < pagevec_count(&pvec); i++) {
941 struct page *page = pvec.pages[i];
942
943 index = indices[i];
944
945 if (xa_is_value(page)) {
946 if (unfalloc)
947 continue;
948 nr_swaps_freed += !shmem_free_swap(mapping,
949 index, page);
950 continue;
951 }
952 index += thp_nr_pages(page) - 1;
953
954 if (!unfalloc || !PageUptodate(page))
955 truncate_inode_page(mapping, page);
956 unlock_page(page);
957 }
958 pagevec_remove_exceptionals(&pvec);
959 pagevec_release(&pvec);
960 cond_resched();
961 index++;
962 }
963
964 if (partial_start) {
965 struct page *page = NULL;
966 shmem_getpage(inode, start - 1, &page, SGP_READ);
967 if (page) {
968 unsigned int top = PAGE_SIZE;
969 if (start > end) {
970 top = partial_end;
971 partial_end = 0;
972 }
973 zero_user_segment(page, partial_start, top);
974 set_page_dirty(page);
975 unlock_page(page);
976 put_page(page);
977 }
978 }
979 if (partial_end) {
980 struct page *page = NULL;
981 shmem_getpage(inode, end, &page, SGP_READ);
982 if (page) {
983 zero_user_segment(page, 0, partial_end);
984 set_page_dirty(page);
985 unlock_page(page);
986 put_page(page);
987 }
988 }
989 if (start >= end)
990 return;
991
992 index = start;
993 while (index < end) {
994 cond_resched();
995
996 if (!find_get_entries(mapping, index, end - 1, &pvec,
997 indices)) {
998
999 if (index == start || end != -1)
1000 break;
1001
1002 index = start;
1003 continue;
1004 }
1005 for (i = 0; i < pagevec_count(&pvec); i++) {
1006 struct page *page = pvec.pages[i];
1007
1008 index = indices[i];
1009 if (xa_is_value(page)) {
1010 if (unfalloc)
1011 continue;
1012 if (shmem_free_swap(mapping, index, page)) {
1013
1014 index--;
1015 break;
1016 }
1017 nr_swaps_freed++;
1018 continue;
1019 }
1020
1021 lock_page(page);
1022
1023 if (!unfalloc || !PageUptodate(page)) {
1024 if (page_mapping(page) != mapping) {
1025
1026 unlock_page(page);
1027 index--;
1028 break;
1029 }
1030 VM_BUG_ON_PAGE(PageWriteback(page), page);
1031 if (shmem_punch_compound(page, start, end))
1032 truncate_inode_page(mapping, page);
1033 else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1034
1035 clear_highpage(page);
1036 flush_dcache_page(page);
1037 set_page_dirty(page);
1038 if (index <
1039 round_up(start, HPAGE_PMD_NR))
1040 start = index + 1;
1041 }
1042 }
1043 unlock_page(page);
1044 }
1045 pagevec_remove_exceptionals(&pvec);
1046 pagevec_release(&pvec);
1047 index++;
1048 }
1049
1050 spin_lock_irq(&info->lock);
1051 info->swapped -= nr_swaps_freed;
1052 shmem_recalc_inode(inode);
1053 spin_unlock_irq(&info->lock);
1054}
1055
1056void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1057{
1058 shmem_undo_range(inode, lstart, lend, false);
1059 inode->i_ctime = inode->i_mtime = current_time(inode);
1060}
1061EXPORT_SYMBOL_GPL(shmem_truncate_range);
1062
1063static int shmem_getattr(struct user_namespace *mnt_userns,
1064 const struct path *path, struct kstat *stat,
1065 u32 request_mask, unsigned int query_flags)
1066{
1067 struct inode *inode = path->dentry->d_inode;
1068 struct shmem_inode_info *info = SHMEM_I(inode);
1069
1070 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1071 spin_lock_irq(&info->lock);
1072 shmem_recalc_inode(inode);
1073 spin_unlock_irq(&info->lock);
1074 }
1075 generic_fillattr(&init_user_ns, inode, stat);
1076
1077 if (shmem_is_huge(NULL, inode, 0))
1078 stat->blksize = HPAGE_PMD_SIZE;
1079
1080 return 0;
1081}
1082
1083static int shmem_setattr(struct user_namespace *mnt_userns,
1084 struct dentry *dentry, struct iattr *attr)
1085{
1086 struct inode *inode = d_inode(dentry);
1087 struct shmem_inode_info *info = SHMEM_I(inode);
1088 int error;
1089
1090 error = setattr_prepare(&init_user_ns, dentry, attr);
1091 if (error)
1092 return error;
1093
1094 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1095 loff_t oldsize = inode->i_size;
1096 loff_t newsize = attr->ia_size;
1097
1098
1099 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1100 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1101 return -EPERM;
1102
1103 if (newsize != oldsize) {
1104 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1105 oldsize, newsize);
1106 if (error)
1107 return error;
1108 i_size_write(inode, newsize);
1109 inode->i_ctime = inode->i_mtime = current_time(inode);
1110 }
1111 if (newsize <= oldsize) {
1112 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1113 if (oldsize > holebegin)
1114 unmap_mapping_range(inode->i_mapping,
1115 holebegin, 0, 1);
1116 if (info->alloced)
1117 shmem_truncate_range(inode,
1118 newsize, (loff_t)-1);
1119
1120 if (oldsize > holebegin)
1121 unmap_mapping_range(inode->i_mapping,
1122 holebegin, 0, 1);
1123 }
1124 }
1125
1126 setattr_copy(&init_user_ns, inode, attr);
1127 if (attr->ia_valid & ATTR_MODE)
1128 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
1129 return error;
1130}
1131
1132static void shmem_evict_inode(struct inode *inode)
1133{
1134 struct shmem_inode_info *info = SHMEM_I(inode);
1135 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1136
1137 if (shmem_mapping(inode->i_mapping)) {
1138 shmem_unacct_size(info->flags, inode->i_size);
1139 inode->i_size = 0;
1140 shmem_truncate_range(inode, 0, (loff_t)-1);
1141 if (!list_empty(&info->shrinklist)) {
1142 spin_lock(&sbinfo->shrinklist_lock);
1143 if (!list_empty(&info->shrinklist)) {
1144 list_del_init(&info->shrinklist);
1145 sbinfo->shrinklist_len--;
1146 }
1147 spin_unlock(&sbinfo->shrinklist_lock);
1148 }
1149 while (!list_empty(&info->swaplist)) {
1150
1151 wait_var_event(&info->stop_eviction,
1152 !atomic_read(&info->stop_eviction));
1153 mutex_lock(&shmem_swaplist_mutex);
1154
1155 if (!atomic_read(&info->stop_eviction))
1156 list_del_init(&info->swaplist);
1157 mutex_unlock(&shmem_swaplist_mutex);
1158 }
1159 }
1160
1161 simple_xattrs_free(&info->xattrs);
1162 WARN_ON(inode->i_blocks);
1163 shmem_free_inode(inode->i_sb);
1164 clear_inode(inode);
1165}
1166
1167static int shmem_find_swap_entries(struct address_space *mapping,
1168 pgoff_t start, unsigned int nr_entries,
1169 struct page **entries, pgoff_t *indices,
1170 unsigned int type, bool frontswap)
1171{
1172 XA_STATE(xas, &mapping->i_pages, start);
1173 struct page *page;
1174 swp_entry_t entry;
1175 unsigned int ret = 0;
1176
1177 if (!nr_entries)
1178 return 0;
1179
1180 rcu_read_lock();
1181 xas_for_each(&xas, page, ULONG_MAX) {
1182 if (xas_retry(&xas, page))
1183 continue;
1184
1185 if (!xa_is_value(page))
1186 continue;
1187
1188 entry = radix_to_swp_entry(page);
1189 if (swp_type(entry) != type)
1190 continue;
1191 if (frontswap &&
1192 !frontswap_test(swap_info[type], swp_offset(entry)))
1193 continue;
1194
1195 indices[ret] = xas.xa_index;
1196 entries[ret] = page;
1197
1198 if (need_resched()) {
1199 xas_pause(&xas);
1200 cond_resched_rcu();
1201 }
1202 if (++ret == nr_entries)
1203 break;
1204 }
1205 rcu_read_unlock();
1206
1207 return ret;
1208}
1209
1210
1211
1212
1213
1214static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1215 pgoff_t *indices)
1216{
1217 int i = 0;
1218 int ret = 0;
1219 int error = 0;
1220 struct address_space *mapping = inode->i_mapping;
1221
1222 for (i = 0; i < pvec.nr; i++) {
1223 struct page *page = pvec.pages[i];
1224
1225 if (!xa_is_value(page))
1226 continue;
1227 error = shmem_swapin_page(inode, indices[i],
1228 &page, SGP_CACHE,
1229 mapping_gfp_mask(mapping),
1230 NULL, NULL);
1231 if (error == 0) {
1232 unlock_page(page);
1233 put_page(page);
1234 ret++;
1235 }
1236 if (error == -ENOMEM)
1237 break;
1238 error = 0;
1239 }
1240 return error ? error : ret;
1241}
1242
1243
1244
1245
1246static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1247 bool frontswap, unsigned long *fs_pages_to_unuse)
1248{
1249 struct address_space *mapping = inode->i_mapping;
1250 pgoff_t start = 0;
1251 struct pagevec pvec;
1252 pgoff_t indices[PAGEVEC_SIZE];
1253 bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1254 int ret = 0;
1255
1256 pagevec_init(&pvec);
1257 do {
1258 unsigned int nr_entries = PAGEVEC_SIZE;
1259
1260 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1261 nr_entries = *fs_pages_to_unuse;
1262
1263 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1264 pvec.pages, indices,
1265 type, frontswap);
1266 if (pvec.nr == 0) {
1267 ret = 0;
1268 break;
1269 }
1270
1271 ret = shmem_unuse_swap_entries(inode, pvec, indices);
1272 if (ret < 0)
1273 break;
1274
1275 if (frontswap_partial) {
1276 *fs_pages_to_unuse -= ret;
1277 if (*fs_pages_to_unuse == 0) {
1278 ret = FRONTSWAP_PAGES_UNUSED;
1279 break;
1280 }
1281 }
1282
1283 start = indices[pvec.nr - 1];
1284 } while (true);
1285
1286 return ret;
1287}
1288
1289
1290
1291
1292
1293
1294int shmem_unuse(unsigned int type, bool frontswap,
1295 unsigned long *fs_pages_to_unuse)
1296{
1297 struct shmem_inode_info *info, *next;
1298 int error = 0;
1299
1300 if (list_empty(&shmem_swaplist))
1301 return 0;
1302
1303 mutex_lock(&shmem_swaplist_mutex);
1304 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1305 if (!info->swapped) {
1306 list_del_init(&info->swaplist);
1307 continue;
1308 }
1309
1310
1311
1312
1313
1314
1315 atomic_inc(&info->stop_eviction);
1316 mutex_unlock(&shmem_swaplist_mutex);
1317
1318 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1319 fs_pages_to_unuse);
1320 cond_resched();
1321
1322 mutex_lock(&shmem_swaplist_mutex);
1323 next = list_next_entry(info, swaplist);
1324 if (!info->swapped)
1325 list_del_init(&info->swaplist);
1326 if (atomic_dec_and_test(&info->stop_eviction))
1327 wake_up_var(&info->stop_eviction);
1328 if (error)
1329 break;
1330 }
1331 mutex_unlock(&shmem_swaplist_mutex);
1332
1333 return error;
1334}
1335
1336
1337
1338
1339static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1340{
1341 struct shmem_inode_info *info;
1342 struct address_space *mapping;
1343 struct inode *inode;
1344 swp_entry_t swap;
1345 pgoff_t index;
1346
1347
1348
1349
1350
1351
1352 if (PageTransCompound(page)) {
1353
1354 SetPageDirty(page);
1355 if (split_huge_page(page) < 0)
1356 goto redirty;
1357 ClearPageDirty(page);
1358 }
1359
1360 BUG_ON(!PageLocked(page));
1361 mapping = page->mapping;
1362 index = page->index;
1363 inode = mapping->host;
1364 info = SHMEM_I(inode);
1365 if (info->flags & VM_LOCKED)
1366 goto redirty;
1367 if (!total_swap_pages)
1368 goto redirty;
1369
1370
1371
1372
1373
1374
1375
1376
1377 if (!wbc->for_reclaim) {
1378 WARN_ON_ONCE(1);
1379 goto redirty;
1380 }
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393 if (!PageUptodate(page)) {
1394 if (inode->i_private) {
1395 struct shmem_falloc *shmem_falloc;
1396 spin_lock(&inode->i_lock);
1397 shmem_falloc = inode->i_private;
1398 if (shmem_falloc &&
1399 !shmem_falloc->waitq &&
1400 index >= shmem_falloc->start &&
1401 index < shmem_falloc->next)
1402 shmem_falloc->nr_unswapped++;
1403 else
1404 shmem_falloc = NULL;
1405 spin_unlock(&inode->i_lock);
1406 if (shmem_falloc)
1407 goto redirty;
1408 }
1409 clear_highpage(page);
1410 flush_dcache_page(page);
1411 SetPageUptodate(page);
1412 }
1413
1414 swap = get_swap_page(page);
1415 if (!swap.val)
1416 goto redirty;
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426 mutex_lock(&shmem_swaplist_mutex);
1427 if (list_empty(&info->swaplist))
1428 list_add(&info->swaplist, &shmem_swaplist);
1429
1430 if (add_to_swap_cache(page, swap,
1431 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1432 NULL) == 0) {
1433 spin_lock_irq(&info->lock);
1434 shmem_recalc_inode(inode);
1435 info->swapped++;
1436 spin_unlock_irq(&info->lock);
1437
1438 swap_shmem_alloc(swap);
1439 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1440
1441 mutex_unlock(&shmem_swaplist_mutex);
1442 BUG_ON(page_mapped(page));
1443 swap_writepage(page, wbc);
1444 return 0;
1445 }
1446
1447 mutex_unlock(&shmem_swaplist_mutex);
1448 put_swap_page(page, swap);
1449redirty:
1450 set_page_dirty(page);
1451 if (wbc->for_reclaim)
1452 return AOP_WRITEPAGE_ACTIVATE;
1453 unlock_page(page);
1454 return 0;
1455}
1456
1457#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1458static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1459{
1460 char buffer[64];
1461
1462 if (!mpol || mpol->mode == MPOL_DEFAULT)
1463 return;
1464
1465 mpol_to_str(buffer, sizeof(buffer), mpol);
1466
1467 seq_printf(seq, ",mpol=%s", buffer);
1468}
1469
1470static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1471{
1472 struct mempolicy *mpol = NULL;
1473 if (sbinfo->mpol) {
1474 raw_spin_lock(&sbinfo->stat_lock);
1475 mpol = sbinfo->mpol;
1476 mpol_get(mpol);
1477 raw_spin_unlock(&sbinfo->stat_lock);
1478 }
1479 return mpol;
1480}
1481#else
1482static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1483{
1484}
1485static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1486{
1487 return NULL;
1488}
1489#endif
1490#ifndef CONFIG_NUMA
1491#define vm_policy vm_private_data
1492#endif
1493
1494static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1495 struct shmem_inode_info *info, pgoff_t index)
1496{
1497
1498 vma_init(vma, NULL);
1499
1500 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1501 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1502}
1503
1504static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1505{
1506
1507 mpol_cond_put(vma->vm_policy);
1508}
1509
1510static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1511 struct shmem_inode_info *info, pgoff_t index)
1512{
1513 struct vm_area_struct pvma;
1514 struct page *page;
1515 struct vm_fault vmf = {
1516 .vma = &pvma,
1517 };
1518
1519 shmem_pseudo_vma_init(&pvma, info, index);
1520 page = swap_cluster_readahead(swap, gfp, &vmf);
1521 shmem_pseudo_vma_destroy(&pvma);
1522
1523 return page;
1524}
1525
1526
1527
1528
1529
1530static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1531{
1532 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1533 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1534 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1535 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1536
1537
1538 result |= zoneflags;
1539
1540
1541
1542
1543
1544 result |= (limit_gfp & denyflags);
1545 result |= (huge_gfp & limit_gfp) & allowflags;
1546
1547 return result;
1548}
1549
1550static struct page *shmem_alloc_hugepage(gfp_t gfp,
1551 struct shmem_inode_info *info, pgoff_t index)
1552{
1553 struct vm_area_struct pvma;
1554 struct address_space *mapping = info->vfs_inode.i_mapping;
1555 pgoff_t hindex;
1556 struct page *page;
1557
1558 hindex = round_down(index, HPAGE_PMD_NR);
1559 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1560 XA_PRESENT))
1561 return NULL;
1562
1563 shmem_pseudo_vma_init(&pvma, info, hindex);
1564 page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(),
1565 true);
1566 shmem_pseudo_vma_destroy(&pvma);
1567 if (page)
1568 prep_transhuge_page(page);
1569 else
1570 count_vm_event(THP_FILE_FALLBACK);
1571 return page;
1572}
1573
1574static struct page *shmem_alloc_page(gfp_t gfp,
1575 struct shmem_inode_info *info, pgoff_t index)
1576{
1577 struct vm_area_struct pvma;
1578 struct page *page;
1579
1580 shmem_pseudo_vma_init(&pvma, info, index);
1581 page = alloc_page_vma(gfp, &pvma, 0);
1582 shmem_pseudo_vma_destroy(&pvma);
1583
1584 return page;
1585}
1586
1587static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1588 struct inode *inode,
1589 pgoff_t index, bool huge)
1590{
1591 struct shmem_inode_info *info = SHMEM_I(inode);
1592 struct page *page;
1593 int nr;
1594 int err = -ENOSPC;
1595
1596 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1597 huge = false;
1598 nr = huge ? HPAGE_PMD_NR : 1;
1599
1600 if (!shmem_inode_acct_block(inode, nr))
1601 goto failed;
1602
1603 if (huge)
1604 page = shmem_alloc_hugepage(gfp, info, index);
1605 else
1606 page = shmem_alloc_page(gfp, info, index);
1607 if (page) {
1608 __SetPageLocked(page);
1609 __SetPageSwapBacked(page);
1610 return page;
1611 }
1612
1613 err = -ENOMEM;
1614 shmem_inode_unacct_blocks(inode, nr);
1615failed:
1616 return ERR_PTR(err);
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1632{
1633 return page_zonenum(page) > gfp_zone(gfp);
1634}
1635
1636static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1637 struct shmem_inode_info *info, pgoff_t index)
1638{
1639 struct page *oldpage, *newpage;
1640 struct address_space *swap_mapping;
1641 swp_entry_t entry;
1642 pgoff_t swap_index;
1643 int error;
1644
1645 oldpage = *pagep;
1646 entry.val = page_private(oldpage);
1647 swap_index = swp_offset(entry);
1648 swap_mapping = page_mapping(oldpage);
1649
1650
1651
1652
1653
1654 gfp &= ~GFP_CONSTRAINT_MASK;
1655 newpage = shmem_alloc_page(gfp, info, index);
1656 if (!newpage)
1657 return -ENOMEM;
1658
1659 get_page(newpage);
1660 copy_highpage(newpage, oldpage);
1661 flush_dcache_page(newpage);
1662
1663 __SetPageLocked(newpage);
1664 __SetPageSwapBacked(newpage);
1665 SetPageUptodate(newpage);
1666 set_page_private(newpage, entry.val);
1667 SetPageSwapCache(newpage);
1668
1669
1670
1671
1672
1673 xa_lock_irq(&swap_mapping->i_pages);
1674 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1675 if (!error) {
1676 mem_cgroup_migrate(oldpage, newpage);
1677 __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1678 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1679 }
1680 xa_unlock_irq(&swap_mapping->i_pages);
1681
1682 if (unlikely(error)) {
1683
1684
1685
1686
1687
1688 oldpage = newpage;
1689 } else {
1690 lru_cache_add(newpage);
1691 *pagep = newpage;
1692 }
1693
1694 ClearPageSwapCache(oldpage);
1695 set_page_private(oldpage, 0);
1696
1697 unlock_page(oldpage);
1698 put_page(oldpage);
1699 put_page(oldpage);
1700 return error;
1701}
1702
1703
1704
1705
1706
1707
1708
1709static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1710 struct page **pagep, enum sgp_type sgp,
1711 gfp_t gfp, struct vm_area_struct *vma,
1712 vm_fault_t *fault_type)
1713{
1714 struct address_space *mapping = inode->i_mapping;
1715 struct shmem_inode_info *info = SHMEM_I(inode);
1716 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1717 struct page *page;
1718 swp_entry_t swap;
1719 int error;
1720
1721 VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1722 swap = radix_to_swp_entry(*pagep);
1723 *pagep = NULL;
1724
1725
1726 page = lookup_swap_cache(swap, NULL, 0);
1727 if (!page) {
1728
1729 if (fault_type) {
1730 *fault_type |= VM_FAULT_MAJOR;
1731 count_vm_event(PGMAJFAULT);
1732 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1733 }
1734
1735 page = shmem_swapin(swap, gfp, info, index);
1736 if (!page) {
1737 error = -ENOMEM;
1738 goto failed;
1739 }
1740 }
1741
1742
1743 lock_page(page);
1744 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1745 !shmem_confirm_swap(mapping, index, swap)) {
1746 error = -EEXIST;
1747 goto unlock;
1748 }
1749 if (!PageUptodate(page)) {
1750 error = -EIO;
1751 goto failed;
1752 }
1753 wait_on_page_writeback(page);
1754
1755
1756
1757
1758
1759 arch_swap_restore(swap, page);
1760
1761 if (shmem_should_replace_page(page, gfp)) {
1762 error = shmem_replace_page(&page, gfp, info, index);
1763 if (error)
1764 goto failed;
1765 }
1766
1767 error = shmem_add_to_page_cache(page, mapping, index,
1768 swp_to_radix_entry(swap), gfp,
1769 charge_mm);
1770 if (error)
1771 goto failed;
1772
1773 spin_lock_irq(&info->lock);
1774 info->swapped--;
1775 shmem_recalc_inode(inode);
1776 spin_unlock_irq(&info->lock);
1777
1778 if (sgp == SGP_WRITE)
1779 mark_page_accessed(page);
1780
1781 delete_from_swap_cache(page);
1782 set_page_dirty(page);
1783 swap_free(swap);
1784
1785 *pagep = page;
1786 return 0;
1787failed:
1788 if (!shmem_confirm_swap(mapping, index, swap))
1789 error = -EEXIST;
1790unlock:
1791 if (page) {
1792 unlock_page(page);
1793 put_page(page);
1794 }
1795
1796 return error;
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1810 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1811 struct vm_area_struct *vma, struct vm_fault *vmf,
1812 vm_fault_t *fault_type)
1813{
1814 struct address_space *mapping = inode->i_mapping;
1815 struct shmem_inode_info *info = SHMEM_I(inode);
1816 struct shmem_sb_info *sbinfo;
1817 struct mm_struct *charge_mm;
1818 struct page *page;
1819 pgoff_t hindex = index;
1820 gfp_t huge_gfp;
1821 int error;
1822 int once = 0;
1823 int alloced = 0;
1824
1825 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1826 return -EFBIG;
1827repeat:
1828 if (sgp <= SGP_CACHE &&
1829 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1830 return -EINVAL;
1831 }
1832
1833 sbinfo = SHMEM_SB(inode->i_sb);
1834 charge_mm = vma ? vma->vm_mm : NULL;
1835
1836 page = pagecache_get_page(mapping, index,
1837 FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
1838
1839 if (page && vma && userfaultfd_minor(vma)) {
1840 if (!xa_is_value(page)) {
1841 unlock_page(page);
1842 put_page(page);
1843 }
1844 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1845 return 0;
1846 }
1847
1848 if (xa_is_value(page)) {
1849 error = shmem_swapin_page(inode, index, &page,
1850 sgp, gfp, vma, fault_type);
1851 if (error == -EEXIST)
1852 goto repeat;
1853
1854 *pagep = page;
1855 return error;
1856 }
1857
1858 if (page) {
1859 hindex = page->index;
1860 if (sgp == SGP_WRITE)
1861 mark_page_accessed(page);
1862 if (PageUptodate(page))
1863 goto out;
1864
1865 if (sgp != SGP_READ)
1866 goto clear;
1867 unlock_page(page);
1868 put_page(page);
1869 }
1870
1871
1872
1873
1874
1875 *pagep = NULL;
1876 if (sgp == SGP_READ)
1877 return 0;
1878 if (sgp == SGP_NOALLOC)
1879 return -ENOENT;
1880
1881
1882
1883
1884
1885 if (vma && userfaultfd_missing(vma)) {
1886 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1887 return 0;
1888 }
1889
1890
1891 if (S_ISLNK(inode->i_mode))
1892 goto alloc_nohuge;
1893 if (!shmem_is_huge(vma, inode, index))
1894 goto alloc_nohuge;
1895
1896 huge_gfp = vma_thp_gfp_mask(vma);
1897 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1898 page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
1899 if (IS_ERR(page)) {
1900alloc_nohuge:
1901 page = shmem_alloc_and_acct_page(gfp, inode,
1902 index, false);
1903 }
1904 if (IS_ERR(page)) {
1905 int retry = 5;
1906
1907 error = PTR_ERR(page);
1908 page = NULL;
1909 if (error != -ENOSPC)
1910 goto unlock;
1911
1912
1913
1914
1915 while (retry--) {
1916 int ret;
1917
1918 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1919 if (ret == SHRINK_STOP)
1920 break;
1921 if (ret)
1922 goto alloc_nohuge;
1923 }
1924 goto unlock;
1925 }
1926
1927 if (PageTransHuge(page))
1928 hindex = round_down(index, HPAGE_PMD_NR);
1929 else
1930 hindex = index;
1931
1932 if (sgp == SGP_WRITE)
1933 __SetPageReferenced(page);
1934
1935 error = shmem_add_to_page_cache(page, mapping, hindex,
1936 NULL, gfp & GFP_RECLAIM_MASK,
1937 charge_mm);
1938 if (error)
1939 goto unacct;
1940 lru_cache_add(page);
1941
1942 spin_lock_irq(&info->lock);
1943 info->alloced += compound_nr(page);
1944 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1945 shmem_recalc_inode(inode);
1946 spin_unlock_irq(&info->lock);
1947 alloced = true;
1948
1949 if (PageTransHuge(page) &&
1950 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1951 hindex + HPAGE_PMD_NR - 1) {
1952
1953
1954
1955
1956 spin_lock(&sbinfo->shrinklist_lock);
1957
1958
1959
1960
1961 if (list_empty_careful(&info->shrinklist)) {
1962 list_add_tail(&info->shrinklist,
1963 &sbinfo->shrinklist);
1964 sbinfo->shrinklist_len++;
1965 }
1966 spin_unlock(&sbinfo->shrinklist_lock);
1967 }
1968
1969
1970
1971
1972 if (sgp == SGP_FALLOC)
1973 sgp = SGP_WRITE;
1974clear:
1975
1976
1977
1978
1979
1980 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1981 int i;
1982
1983 for (i = 0; i < compound_nr(page); i++) {
1984 clear_highpage(page + i);
1985 flush_dcache_page(page + i);
1986 }
1987 SetPageUptodate(page);
1988 }
1989
1990
1991 if (sgp <= SGP_CACHE &&
1992 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1993 if (alloced) {
1994 ClearPageDirty(page);
1995 delete_from_page_cache(page);
1996 spin_lock_irq(&info->lock);
1997 shmem_recalc_inode(inode);
1998 spin_unlock_irq(&info->lock);
1999 }
2000 error = -EINVAL;
2001 goto unlock;
2002 }
2003out:
2004 *pagep = page + index - hindex;
2005 return 0;
2006
2007
2008
2009
2010unacct:
2011 shmem_inode_unacct_blocks(inode, compound_nr(page));
2012
2013 if (PageTransHuge(page)) {
2014 unlock_page(page);
2015 put_page(page);
2016 goto alloc_nohuge;
2017 }
2018unlock:
2019 if (page) {
2020 unlock_page(page);
2021 put_page(page);
2022 }
2023 if (error == -ENOSPC && !once++) {
2024 spin_lock_irq(&info->lock);
2025 shmem_recalc_inode(inode);
2026 spin_unlock_irq(&info->lock);
2027 goto repeat;
2028 }
2029 if (error == -EEXIST)
2030 goto repeat;
2031 return error;
2032}
2033
2034
2035
2036
2037
2038
2039static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2040{
2041 int ret = default_wake_function(wait, mode, sync, key);
2042 list_del_init(&wait->entry);
2043 return ret;
2044}
2045
2046static vm_fault_t shmem_fault(struct vm_fault *vmf)
2047{
2048 struct vm_area_struct *vma = vmf->vma;
2049 struct inode *inode = file_inode(vma->vm_file);
2050 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2051 int err;
2052 vm_fault_t ret = VM_FAULT_LOCKED;
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071 if (unlikely(inode->i_private)) {
2072 struct shmem_falloc *shmem_falloc;
2073
2074 spin_lock(&inode->i_lock);
2075 shmem_falloc = inode->i_private;
2076 if (shmem_falloc &&
2077 shmem_falloc->waitq &&
2078 vmf->pgoff >= shmem_falloc->start &&
2079 vmf->pgoff < shmem_falloc->next) {
2080 struct file *fpin;
2081 wait_queue_head_t *shmem_falloc_waitq;
2082 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2083
2084 ret = VM_FAULT_NOPAGE;
2085 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2086 if (fpin)
2087 ret = VM_FAULT_RETRY;
2088
2089 shmem_falloc_waitq = shmem_falloc->waitq;
2090 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2091 TASK_UNINTERRUPTIBLE);
2092 spin_unlock(&inode->i_lock);
2093 schedule();
2094
2095
2096
2097
2098
2099
2100
2101
2102 spin_lock(&inode->i_lock);
2103 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2104 spin_unlock(&inode->i_lock);
2105
2106 if (fpin)
2107 fput(fpin);
2108 return ret;
2109 }
2110 spin_unlock(&inode->i_lock);
2111 }
2112
2113 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
2114 gfp, vma, vmf, &ret);
2115 if (err)
2116 return vmf_error(err);
2117 return ret;
2118}
2119
2120unsigned long shmem_get_unmapped_area(struct file *file,
2121 unsigned long uaddr, unsigned long len,
2122 unsigned long pgoff, unsigned long flags)
2123{
2124 unsigned long (*get_area)(struct file *,
2125 unsigned long, unsigned long, unsigned long, unsigned long);
2126 unsigned long addr;
2127 unsigned long offset;
2128 unsigned long inflated_len;
2129 unsigned long inflated_addr;
2130 unsigned long inflated_offset;
2131
2132 if (len > TASK_SIZE)
2133 return -ENOMEM;
2134
2135 get_area = current->mm->get_unmapped_area;
2136 addr = get_area(file, uaddr, len, pgoff, flags);
2137
2138 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2139 return addr;
2140 if (IS_ERR_VALUE(addr))
2141 return addr;
2142 if (addr & ~PAGE_MASK)
2143 return addr;
2144 if (addr > TASK_SIZE - len)
2145 return addr;
2146
2147 if (shmem_huge == SHMEM_HUGE_DENY)
2148 return addr;
2149 if (len < HPAGE_PMD_SIZE)
2150 return addr;
2151 if (flags & MAP_FIXED)
2152 return addr;
2153
2154
2155
2156
2157
2158
2159 if (uaddr == addr)
2160 return addr;
2161
2162 if (shmem_huge != SHMEM_HUGE_FORCE) {
2163 struct super_block *sb;
2164
2165 if (file) {
2166 VM_BUG_ON(file->f_op != &shmem_file_operations);
2167 sb = file_inode(file)->i_sb;
2168 } else {
2169
2170
2171
2172
2173 if (IS_ERR(shm_mnt))
2174 return addr;
2175 sb = shm_mnt->mnt_sb;
2176 }
2177 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2178 return addr;
2179 }
2180
2181 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2182 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2183 return addr;
2184 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2185 return addr;
2186
2187 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2188 if (inflated_len > TASK_SIZE)
2189 return addr;
2190 if (inflated_len < len)
2191 return addr;
2192
2193 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2194 if (IS_ERR_VALUE(inflated_addr))
2195 return addr;
2196 if (inflated_addr & ~PAGE_MASK)
2197 return addr;
2198
2199 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2200 inflated_addr += offset - inflated_offset;
2201 if (inflated_offset > offset)
2202 inflated_addr += HPAGE_PMD_SIZE;
2203
2204 if (inflated_addr > TASK_SIZE - len)
2205 return addr;
2206 return inflated_addr;
2207}
2208
2209#ifdef CONFIG_NUMA
2210static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2211{
2212 struct inode *inode = file_inode(vma->vm_file);
2213 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2214}
2215
2216static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2217 unsigned long addr)
2218{
2219 struct inode *inode = file_inode(vma->vm_file);
2220 pgoff_t index;
2221
2222 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2223 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2224}
2225#endif
2226
2227int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2228{
2229 struct inode *inode = file_inode(file);
2230 struct shmem_inode_info *info = SHMEM_I(inode);
2231 int retval = -ENOMEM;
2232
2233
2234
2235
2236
2237
2238 if (lock && !(info->flags & VM_LOCKED)) {
2239 if (!user_shm_lock(inode->i_size, ucounts))
2240 goto out_nomem;
2241 info->flags |= VM_LOCKED;
2242 mapping_set_unevictable(file->f_mapping);
2243 }
2244 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2245 user_shm_unlock(inode->i_size, ucounts);
2246 info->flags &= ~VM_LOCKED;
2247 mapping_clear_unevictable(file->f_mapping);
2248 }
2249 retval = 0;
2250
2251out_nomem:
2252 return retval;
2253}
2254
2255static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2256{
2257 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2258 int ret;
2259
2260 ret = seal_check_future_write(info->seals, vma);
2261 if (ret)
2262 return ret;
2263
2264
2265 vma->vm_flags |= VM_MTE_ALLOWED;
2266
2267 file_accessed(file);
2268 vma->vm_ops = &shmem_vm_ops;
2269 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2270 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2271 (vma->vm_end & HPAGE_PMD_MASK)) {
2272 khugepaged_enter(vma, vma->vm_flags);
2273 }
2274 return 0;
2275}
2276
2277static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2278 umode_t mode, dev_t dev, unsigned long flags)
2279{
2280 struct inode *inode;
2281 struct shmem_inode_info *info;
2282 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2283 ino_t ino;
2284
2285 if (shmem_reserve_inode(sb, &ino))
2286 return NULL;
2287
2288 inode = new_inode(sb);
2289 if (inode) {
2290 inode->i_ino = ino;
2291 inode_init_owner(&init_user_ns, inode, dir, mode);
2292 inode->i_blocks = 0;
2293 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2294 inode->i_generation = prandom_u32();
2295 info = SHMEM_I(inode);
2296 memset(info, 0, (char *)inode - (char *)info);
2297 spin_lock_init(&info->lock);
2298 atomic_set(&info->stop_eviction, 0);
2299 info->seals = F_SEAL_SEAL;
2300 info->flags = flags & VM_NORESERVE;
2301 INIT_LIST_HEAD(&info->shrinklist);
2302 INIT_LIST_HEAD(&info->swaplist);
2303 simple_xattrs_init(&info->xattrs);
2304 cache_no_acl(inode);
2305
2306 switch (mode & S_IFMT) {
2307 default:
2308 inode->i_op = &shmem_special_inode_operations;
2309 init_special_inode(inode, mode, dev);
2310 break;
2311 case S_IFREG:
2312 inode->i_mapping->a_ops = &shmem_aops;
2313 inode->i_op = &shmem_inode_operations;
2314 inode->i_fop = &shmem_file_operations;
2315 mpol_shared_policy_init(&info->policy,
2316 shmem_get_sbmpol(sbinfo));
2317 break;
2318 case S_IFDIR:
2319 inc_nlink(inode);
2320
2321 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2322 inode->i_op = &shmem_dir_inode_operations;
2323 inode->i_fop = &simple_dir_operations;
2324 break;
2325 case S_IFLNK:
2326
2327
2328
2329
2330 mpol_shared_policy_init(&info->policy, NULL);
2331 break;
2332 }
2333
2334 lockdep_annotate_inode_mutex_key(inode);
2335 } else
2336 shmem_free_inode(sb);
2337 return inode;
2338}
2339
2340#ifdef CONFIG_USERFAULTFD
2341int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2342 pmd_t *dst_pmd,
2343 struct vm_area_struct *dst_vma,
2344 unsigned long dst_addr,
2345 unsigned long src_addr,
2346 bool zeropage,
2347 struct page **pagep)
2348{
2349 struct inode *inode = file_inode(dst_vma->vm_file);
2350 struct shmem_inode_info *info = SHMEM_I(inode);
2351 struct address_space *mapping = inode->i_mapping;
2352 gfp_t gfp = mapping_gfp_mask(mapping);
2353 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2354 void *page_kaddr;
2355 struct page *page;
2356 int ret;
2357 pgoff_t max_off;
2358
2359 if (!shmem_inode_acct_block(inode, 1)) {
2360
2361
2362
2363
2364
2365 if (unlikely(*pagep)) {
2366 put_page(*pagep);
2367 *pagep = NULL;
2368 }
2369 return -ENOMEM;
2370 }
2371
2372 if (!*pagep) {
2373 ret = -ENOMEM;
2374 page = shmem_alloc_page(gfp, info, pgoff);
2375 if (!page)
2376 goto out_unacct_blocks;
2377
2378 if (!zeropage) {
2379 page_kaddr = kmap_atomic(page);
2380 ret = copy_from_user(page_kaddr,
2381 (const void __user *)src_addr,
2382 PAGE_SIZE);
2383 kunmap_atomic(page_kaddr);
2384
2385
2386 if (unlikely(ret)) {
2387 *pagep = page;
2388 ret = -ENOENT;
2389
2390 goto out_unacct_blocks;
2391 }
2392 } else {
2393 clear_highpage(page);
2394 }
2395 } else {
2396 page = *pagep;
2397 *pagep = NULL;
2398 }
2399
2400 VM_BUG_ON(PageLocked(page));
2401 VM_BUG_ON(PageSwapBacked(page));
2402 __SetPageLocked(page);
2403 __SetPageSwapBacked(page);
2404 __SetPageUptodate(page);
2405
2406 ret = -EFAULT;
2407 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2408 if (unlikely(pgoff >= max_off))
2409 goto out_release;
2410
2411 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2412 gfp & GFP_RECLAIM_MASK, dst_mm);
2413 if (ret)
2414 goto out_release;
2415
2416 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
2417 page, true, false);
2418 if (ret)
2419 goto out_delete_from_cache;
2420
2421 spin_lock_irq(&info->lock);
2422 info->alloced++;
2423 inode->i_blocks += BLOCKS_PER_PAGE;
2424 shmem_recalc_inode(inode);
2425 spin_unlock_irq(&info->lock);
2426
2427 SetPageDirty(page);
2428 unlock_page(page);
2429 return 0;
2430out_delete_from_cache:
2431 delete_from_page_cache(page);
2432out_release:
2433 unlock_page(page);
2434 put_page(page);
2435out_unacct_blocks:
2436 shmem_inode_unacct_blocks(inode, 1);
2437 return ret;
2438}
2439#endif
2440
2441#ifdef CONFIG_TMPFS
2442static const struct inode_operations shmem_symlink_inode_operations;
2443static const struct inode_operations shmem_short_symlink_operations;
2444
2445#ifdef CONFIG_TMPFS_XATTR
2446static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2447#else
2448#define shmem_initxattrs NULL
2449#endif
2450
2451static int
2452shmem_write_begin(struct file *file, struct address_space *mapping,
2453 loff_t pos, unsigned len, unsigned flags,
2454 struct page **pagep, void **fsdata)
2455{
2456 struct inode *inode = mapping->host;
2457 struct shmem_inode_info *info = SHMEM_I(inode);
2458 pgoff_t index = pos >> PAGE_SHIFT;
2459
2460
2461 if (unlikely(info->seals & (F_SEAL_GROW |
2462 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2463 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2464 return -EPERM;
2465 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2466 return -EPERM;
2467 }
2468
2469 return shmem_getpage(inode, index, pagep, SGP_WRITE);
2470}
2471
2472static int
2473shmem_write_end(struct file *file, struct address_space *mapping,
2474 loff_t pos, unsigned len, unsigned copied,
2475 struct page *page, void *fsdata)
2476{
2477 struct inode *inode = mapping->host;
2478
2479 if (pos + copied > inode->i_size)
2480 i_size_write(inode, pos + copied);
2481
2482 if (!PageUptodate(page)) {
2483 struct page *head = compound_head(page);
2484 if (PageTransCompound(page)) {
2485 int i;
2486
2487 for (i = 0; i < HPAGE_PMD_NR; i++) {
2488 if (head + i == page)
2489 continue;
2490 clear_highpage(head + i);
2491 flush_dcache_page(head + i);
2492 }
2493 }
2494 if (copied < PAGE_SIZE) {
2495 unsigned from = pos & (PAGE_SIZE - 1);
2496 zero_user_segments(page, 0, from,
2497 from + copied, PAGE_SIZE);
2498 }
2499 SetPageUptodate(head);
2500 }
2501 set_page_dirty(page);
2502 unlock_page(page);
2503 put_page(page);
2504
2505 return copied;
2506}
2507
2508static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2509{
2510 struct file *file = iocb->ki_filp;
2511 struct inode *inode = file_inode(file);
2512 struct address_space *mapping = inode->i_mapping;
2513 pgoff_t index;
2514 unsigned long offset;
2515 enum sgp_type sgp = SGP_READ;
2516 int error = 0;
2517 ssize_t retval = 0;
2518 loff_t *ppos = &iocb->ki_pos;
2519
2520
2521
2522
2523
2524
2525 if (!iter_is_iovec(to))
2526 sgp = SGP_CACHE;
2527
2528 index = *ppos >> PAGE_SHIFT;
2529 offset = *ppos & ~PAGE_MASK;
2530
2531 for (;;) {
2532 struct page *page = NULL;
2533 pgoff_t end_index;
2534 unsigned long nr, ret;
2535 loff_t i_size = i_size_read(inode);
2536
2537 end_index = i_size >> PAGE_SHIFT;
2538 if (index > end_index)
2539 break;
2540 if (index == end_index) {
2541 nr = i_size & ~PAGE_MASK;
2542 if (nr <= offset)
2543 break;
2544 }
2545
2546 error = shmem_getpage(inode, index, &page, sgp);
2547 if (error) {
2548 if (error == -EINVAL)
2549 error = 0;
2550 break;
2551 }
2552 if (page) {
2553 if (sgp == SGP_CACHE)
2554 set_page_dirty(page);
2555 unlock_page(page);
2556 }
2557
2558
2559
2560
2561
2562 nr = PAGE_SIZE;
2563 i_size = i_size_read(inode);
2564 end_index = i_size >> PAGE_SHIFT;
2565 if (index == end_index) {
2566 nr = i_size & ~PAGE_MASK;
2567 if (nr <= offset) {
2568 if (page)
2569 put_page(page);
2570 break;
2571 }
2572 }
2573 nr -= offset;
2574
2575 if (page) {
2576
2577
2578
2579
2580
2581 if (mapping_writably_mapped(mapping))
2582 flush_dcache_page(page);
2583
2584
2585
2586 if (!offset)
2587 mark_page_accessed(page);
2588 } else {
2589 page = ZERO_PAGE(0);
2590 get_page(page);
2591 }
2592
2593
2594
2595
2596
2597 ret = copy_page_to_iter(page, offset, nr, to);
2598 retval += ret;
2599 offset += ret;
2600 index += offset >> PAGE_SHIFT;
2601 offset &= ~PAGE_MASK;
2602
2603 put_page(page);
2604 if (!iov_iter_count(to))
2605 break;
2606 if (ret < nr) {
2607 error = -EFAULT;
2608 break;
2609 }
2610 cond_resched();
2611 }
2612
2613 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2614 file_accessed(file);
2615 return retval ? retval : error;
2616}
2617
2618static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2619{
2620 struct address_space *mapping = file->f_mapping;
2621 struct inode *inode = mapping->host;
2622
2623 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2624 return generic_file_llseek_size(file, offset, whence,
2625 MAX_LFS_FILESIZE, i_size_read(inode));
2626 if (offset < 0)
2627 return -ENXIO;
2628
2629 inode_lock(inode);
2630
2631 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2632 if (offset >= 0)
2633 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2634 inode_unlock(inode);
2635 return offset;
2636}
2637
2638static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2639 loff_t len)
2640{
2641 struct inode *inode = file_inode(file);
2642 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2643 struct shmem_inode_info *info = SHMEM_I(inode);
2644 struct shmem_falloc shmem_falloc;
2645 pgoff_t start, index, end, undo_fallocend;
2646 int error;
2647
2648 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2649 return -EOPNOTSUPP;
2650
2651 inode_lock(inode);
2652
2653 if (mode & FALLOC_FL_PUNCH_HOLE) {
2654 struct address_space *mapping = file->f_mapping;
2655 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2656 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2657 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2658
2659
2660 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2661 error = -EPERM;
2662 goto out;
2663 }
2664
2665 shmem_falloc.waitq = &shmem_falloc_waitq;
2666 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2667 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2668 spin_lock(&inode->i_lock);
2669 inode->i_private = &shmem_falloc;
2670 spin_unlock(&inode->i_lock);
2671
2672 if ((u64)unmap_end > (u64)unmap_start)
2673 unmap_mapping_range(mapping, unmap_start,
2674 1 + unmap_end - unmap_start, 0);
2675 shmem_truncate_range(inode, offset, offset + len - 1);
2676
2677
2678 spin_lock(&inode->i_lock);
2679 inode->i_private = NULL;
2680 wake_up_all(&shmem_falloc_waitq);
2681 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2682 spin_unlock(&inode->i_lock);
2683 error = 0;
2684 goto out;
2685 }
2686
2687
2688 error = inode_newsize_ok(inode, offset + len);
2689 if (error)
2690 goto out;
2691
2692 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2693 error = -EPERM;
2694 goto out;
2695 }
2696
2697 start = offset >> PAGE_SHIFT;
2698 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2699
2700 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2701 error = -ENOSPC;
2702 goto out;
2703 }
2704
2705 shmem_falloc.waitq = NULL;
2706 shmem_falloc.start = start;
2707 shmem_falloc.next = start;
2708 shmem_falloc.nr_falloced = 0;
2709 shmem_falloc.nr_unswapped = 0;
2710 spin_lock(&inode->i_lock);
2711 inode->i_private = &shmem_falloc;
2712 spin_unlock(&inode->i_lock);
2713
2714
2715
2716
2717
2718
2719 undo_fallocend = info->fallocend;
2720 if (info->fallocend < end)
2721 info->fallocend = end;
2722
2723 for (index = start; index < end; ) {
2724 struct page *page;
2725
2726
2727
2728
2729
2730 if (signal_pending(current))
2731 error = -EINTR;
2732 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2733 error = -ENOMEM;
2734 else
2735 error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2736 if (error) {
2737 info->fallocend = undo_fallocend;
2738
2739 if (index > start) {
2740 shmem_undo_range(inode,
2741 (loff_t)start << PAGE_SHIFT,
2742 ((loff_t)index << PAGE_SHIFT) - 1, true);
2743 }
2744 goto undone;
2745 }
2746
2747 index++;
2748
2749
2750
2751
2752
2753 if (PageTransCompound(page)) {
2754 index = round_up(index, HPAGE_PMD_NR);
2755
2756 if (!index)
2757 index--;
2758 }
2759
2760
2761
2762
2763
2764 if (!PageUptodate(page))
2765 shmem_falloc.nr_falloced += index - shmem_falloc.next;
2766 shmem_falloc.next = index;
2767
2768
2769
2770
2771
2772
2773
2774
2775 set_page_dirty(page);
2776 unlock_page(page);
2777 put_page(page);
2778 cond_resched();
2779 }
2780
2781 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2782 i_size_write(inode, offset + len);
2783 inode->i_ctime = current_time(inode);
2784undone:
2785 spin_lock(&inode->i_lock);
2786 inode->i_private = NULL;
2787 spin_unlock(&inode->i_lock);
2788out:
2789 inode_unlock(inode);
2790 return error;
2791}
2792
2793static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2794{
2795 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2796
2797 buf->f_type = TMPFS_MAGIC;
2798 buf->f_bsize = PAGE_SIZE;
2799 buf->f_namelen = NAME_MAX;
2800 if (sbinfo->max_blocks) {
2801 buf->f_blocks = sbinfo->max_blocks;
2802 buf->f_bavail =
2803 buf->f_bfree = sbinfo->max_blocks -
2804 percpu_counter_sum(&sbinfo->used_blocks);
2805 }
2806 if (sbinfo->max_inodes) {
2807 buf->f_files = sbinfo->max_inodes;
2808 buf->f_ffree = sbinfo->free_inodes;
2809 }
2810
2811
2812 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
2813
2814 return 0;
2815}
2816
2817
2818
2819
2820static int
2821shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2822 struct dentry *dentry, umode_t mode, dev_t dev)
2823{
2824 struct inode *inode;
2825 int error = -ENOSPC;
2826
2827 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2828 if (inode) {
2829 error = simple_acl_create(dir, inode);
2830 if (error)
2831 goto out_iput;
2832 error = security_inode_init_security(inode, dir,
2833 &dentry->d_name,
2834 shmem_initxattrs, NULL);
2835 if (error && error != -EOPNOTSUPP)
2836 goto out_iput;
2837
2838 error = 0;
2839 dir->i_size += BOGO_DIRENT_SIZE;
2840 dir->i_ctime = dir->i_mtime = current_time(dir);
2841 d_instantiate(dentry, inode);
2842 dget(dentry);
2843 }
2844 return error;
2845out_iput:
2846 iput(inode);
2847 return error;
2848}
2849
2850static int
2851shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2852 struct dentry *dentry, umode_t mode)
2853{
2854 struct inode *inode;
2855 int error = -ENOSPC;
2856
2857 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2858 if (inode) {
2859 error = security_inode_init_security(inode, dir,
2860 NULL,
2861 shmem_initxattrs, NULL);
2862 if (error && error != -EOPNOTSUPP)
2863 goto out_iput;
2864 error = simple_acl_create(dir, inode);
2865 if (error)
2866 goto out_iput;
2867 d_tmpfile(dentry, inode);
2868 }
2869 return error;
2870out_iput:
2871 iput(inode);
2872 return error;
2873}
2874
2875static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2876 struct dentry *dentry, umode_t mode)
2877{
2878 int error;
2879
2880 if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2881 mode | S_IFDIR, 0)))
2882 return error;
2883 inc_nlink(dir);
2884 return 0;
2885}
2886
2887static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2888 struct dentry *dentry, umode_t mode, bool excl)
2889{
2890 return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
2891}
2892
2893
2894
2895
2896static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2897{
2898 struct inode *inode = d_inode(old_dentry);
2899 int ret = 0;
2900
2901
2902
2903
2904
2905
2906
2907
2908 if (inode->i_nlink) {
2909 ret = shmem_reserve_inode(inode->i_sb, NULL);
2910 if (ret)
2911 goto out;
2912 }
2913
2914 dir->i_size += BOGO_DIRENT_SIZE;
2915 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2916 inc_nlink(inode);
2917 ihold(inode);
2918 dget(dentry);
2919 d_instantiate(dentry, inode);
2920out:
2921 return ret;
2922}
2923
2924static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2925{
2926 struct inode *inode = d_inode(dentry);
2927
2928 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2929 shmem_free_inode(inode->i_sb);
2930
2931 dir->i_size -= BOGO_DIRENT_SIZE;
2932 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2933 drop_nlink(inode);
2934 dput(dentry);
2935 return 0;
2936}
2937
2938static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2939{
2940 if (!simple_empty(dentry))
2941 return -ENOTEMPTY;
2942
2943 drop_nlink(d_inode(dentry));
2944 drop_nlink(dir);
2945 return shmem_unlink(dir, dentry);
2946}
2947
2948static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2949{
2950 bool old_is_dir = d_is_dir(old_dentry);
2951 bool new_is_dir = d_is_dir(new_dentry);
2952
2953 if (old_dir != new_dir && old_is_dir != new_is_dir) {
2954 if (old_is_dir) {
2955 drop_nlink(old_dir);
2956 inc_nlink(new_dir);
2957 } else {
2958 drop_nlink(new_dir);
2959 inc_nlink(old_dir);
2960 }
2961 }
2962 old_dir->i_ctime = old_dir->i_mtime =
2963 new_dir->i_ctime = new_dir->i_mtime =
2964 d_inode(old_dentry)->i_ctime =
2965 d_inode(new_dentry)->i_ctime = current_time(old_dir);
2966
2967 return 0;
2968}
2969
2970static int shmem_whiteout(struct user_namespace *mnt_userns,
2971 struct inode *old_dir, struct dentry *old_dentry)
2972{
2973 struct dentry *whiteout;
2974 int error;
2975
2976 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2977 if (!whiteout)
2978 return -ENOMEM;
2979
2980 error = shmem_mknod(&init_user_ns, old_dir, whiteout,
2981 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2982 dput(whiteout);
2983 if (error)
2984 return error;
2985
2986
2987
2988
2989
2990
2991
2992
2993 d_rehash(whiteout);
2994 return 0;
2995}
2996
2997
2998
2999
3000
3001
3002
3003static int shmem_rename2(struct user_namespace *mnt_userns,
3004 struct inode *old_dir, struct dentry *old_dentry,
3005 struct inode *new_dir, struct dentry *new_dentry,
3006 unsigned int flags)
3007{
3008 struct inode *inode = d_inode(old_dentry);
3009 int they_are_dirs = S_ISDIR(inode->i_mode);
3010
3011 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3012 return -EINVAL;
3013
3014 if (flags & RENAME_EXCHANGE)
3015 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3016
3017 if (!simple_empty(new_dentry))
3018 return -ENOTEMPTY;
3019
3020 if (flags & RENAME_WHITEOUT) {
3021 int error;
3022
3023 error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
3024 if (error)
3025 return error;
3026 }
3027
3028 if (d_really_is_positive(new_dentry)) {
3029 (void) shmem_unlink(new_dir, new_dentry);
3030 if (they_are_dirs) {
3031 drop_nlink(d_inode(new_dentry));
3032 drop_nlink(old_dir);
3033 }
3034 } else if (they_are_dirs) {
3035 drop_nlink(old_dir);
3036 inc_nlink(new_dir);
3037 }
3038
3039 old_dir->i_size -= BOGO_DIRENT_SIZE;
3040 new_dir->i_size += BOGO_DIRENT_SIZE;
3041 old_dir->i_ctime = old_dir->i_mtime =
3042 new_dir->i_ctime = new_dir->i_mtime =
3043 inode->i_ctime = current_time(old_dir);
3044 return 0;
3045}
3046
3047static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3048 struct dentry *dentry, const char *symname)
3049{
3050 int error;
3051 int len;
3052 struct inode *inode;
3053 struct page *page;
3054
3055 len = strlen(symname) + 1;
3056 if (len > PAGE_SIZE)
3057 return -ENAMETOOLONG;
3058
3059 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3060 VM_NORESERVE);
3061 if (!inode)
3062 return -ENOSPC;
3063
3064 error = security_inode_init_security(inode, dir, &dentry->d_name,
3065 shmem_initxattrs, NULL);
3066 if (error && error != -EOPNOTSUPP) {
3067 iput(inode);
3068 return error;
3069 }
3070
3071 inode->i_size = len-1;
3072 if (len <= SHORT_SYMLINK_LEN) {
3073 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3074 if (!inode->i_link) {
3075 iput(inode);
3076 return -ENOMEM;
3077 }
3078 inode->i_op = &shmem_short_symlink_operations;
3079 } else {
3080 inode_nohighmem(inode);
3081 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3082 if (error) {
3083 iput(inode);
3084 return error;
3085 }
3086 inode->i_mapping->a_ops = &shmem_aops;
3087 inode->i_op = &shmem_symlink_inode_operations;
3088 memcpy(page_address(page), symname, len);
3089 SetPageUptodate(page);
3090 set_page_dirty(page);
3091 unlock_page(page);
3092 put_page(page);
3093 }
3094 dir->i_size += BOGO_DIRENT_SIZE;
3095 dir->i_ctime = dir->i_mtime = current_time(dir);
3096 d_instantiate(dentry, inode);
3097 dget(dentry);
3098 return 0;
3099}
3100
3101static void shmem_put_link(void *arg)
3102{
3103 mark_page_accessed(arg);
3104 put_page(arg);
3105}
3106
3107static const char *shmem_get_link(struct dentry *dentry,
3108 struct inode *inode,
3109 struct delayed_call *done)
3110{
3111 struct page *page = NULL;
3112 int error;
3113 if (!dentry) {
3114 page = find_get_page(inode->i_mapping, 0);
3115 if (!page)
3116 return ERR_PTR(-ECHILD);
3117 if (!PageUptodate(page)) {
3118 put_page(page);
3119 return ERR_PTR(-ECHILD);
3120 }
3121 } else {
3122 error = shmem_getpage(inode, 0, &page, SGP_READ);
3123 if (error)
3124 return ERR_PTR(error);
3125 unlock_page(page);
3126 }
3127 set_delayed_call(done, shmem_put_link, page);
3128 return page_address(page);
3129}
3130
3131#ifdef CONFIG_TMPFS_XATTR
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142static int shmem_initxattrs(struct inode *inode,
3143 const struct xattr *xattr_array,
3144 void *fs_info)
3145{
3146 struct shmem_inode_info *info = SHMEM_I(inode);
3147 const struct xattr *xattr;
3148 struct simple_xattr *new_xattr;
3149 size_t len;
3150
3151 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3152 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3153 if (!new_xattr)
3154 return -ENOMEM;
3155
3156 len = strlen(xattr->name) + 1;
3157 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3158 GFP_KERNEL);
3159 if (!new_xattr->name) {
3160 kvfree(new_xattr);
3161 return -ENOMEM;
3162 }
3163
3164 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3165 XATTR_SECURITY_PREFIX_LEN);
3166 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3167 xattr->name, len);
3168
3169 simple_xattr_list_add(&info->xattrs, new_xattr);
3170 }
3171
3172 return 0;
3173}
3174
3175static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3176 struct dentry *unused, struct inode *inode,
3177 const char *name, void *buffer, size_t size)
3178{
3179 struct shmem_inode_info *info = SHMEM_I(inode);
3180
3181 name = xattr_full_name(handler, name);
3182 return simple_xattr_get(&info->xattrs, name, buffer, size);
3183}
3184
3185static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3186 struct user_namespace *mnt_userns,
3187 struct dentry *unused, struct inode *inode,
3188 const char *name, const void *value,
3189 size_t size, int flags)
3190{
3191 struct shmem_inode_info *info = SHMEM_I(inode);
3192
3193 name = xattr_full_name(handler, name);
3194 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3195}
3196
3197static const struct xattr_handler shmem_security_xattr_handler = {
3198 .prefix = XATTR_SECURITY_PREFIX,
3199 .get = shmem_xattr_handler_get,
3200 .set = shmem_xattr_handler_set,
3201};
3202
3203static const struct xattr_handler shmem_trusted_xattr_handler = {
3204 .prefix = XATTR_TRUSTED_PREFIX,
3205 .get = shmem_xattr_handler_get,
3206 .set = shmem_xattr_handler_set,
3207};
3208
3209static const struct xattr_handler *shmem_xattr_handlers[] = {
3210#ifdef CONFIG_TMPFS_POSIX_ACL
3211 &posix_acl_access_xattr_handler,
3212 &posix_acl_default_xattr_handler,
3213#endif
3214 &shmem_security_xattr_handler,
3215 &shmem_trusted_xattr_handler,
3216 NULL
3217};
3218
3219static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3220{
3221 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3222 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3223}
3224#endif
3225
3226static const struct inode_operations shmem_short_symlink_operations = {
3227 .get_link = simple_get_link,
3228#ifdef CONFIG_TMPFS_XATTR
3229 .listxattr = shmem_listxattr,
3230#endif
3231};
3232
3233static const struct inode_operations shmem_symlink_inode_operations = {
3234 .get_link = shmem_get_link,
3235#ifdef CONFIG_TMPFS_XATTR
3236 .listxattr = shmem_listxattr,
3237#endif
3238};
3239
3240static struct dentry *shmem_get_parent(struct dentry *child)
3241{
3242 return ERR_PTR(-ESTALE);
3243}
3244
3245static int shmem_match(struct inode *ino, void *vfh)
3246{
3247 __u32 *fh = vfh;
3248 __u64 inum = fh[2];
3249 inum = (inum << 32) | fh[1];
3250 return ino->i_ino == inum && fh[0] == ino->i_generation;
3251}
3252
3253
3254static struct dentry *shmem_find_alias(struct inode *inode)
3255{
3256 struct dentry *alias = d_find_alias(inode);
3257
3258 return alias ?: d_find_any_alias(inode);
3259}
3260
3261
3262static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3263 struct fid *fid, int fh_len, int fh_type)
3264{
3265 struct inode *inode;
3266 struct dentry *dentry = NULL;
3267 u64 inum;
3268
3269 if (fh_len < 3)
3270 return NULL;
3271
3272 inum = fid->raw[2];
3273 inum = (inum << 32) | fid->raw[1];
3274
3275 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3276 shmem_match, fid->raw);
3277 if (inode) {
3278 dentry = shmem_find_alias(inode);
3279 iput(inode);
3280 }
3281
3282 return dentry;
3283}
3284
3285static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3286 struct inode *parent)
3287{
3288 if (*len < 3) {
3289 *len = 3;
3290 return FILEID_INVALID;
3291 }
3292
3293 if (inode_unhashed(inode)) {
3294
3295
3296
3297
3298
3299 static DEFINE_SPINLOCK(lock);
3300 spin_lock(&lock);
3301 if (inode_unhashed(inode))
3302 __insert_inode_hash(inode,
3303 inode->i_ino + inode->i_generation);
3304 spin_unlock(&lock);
3305 }
3306
3307 fh[0] = inode->i_generation;
3308 fh[1] = inode->i_ino;
3309 fh[2] = ((__u64)inode->i_ino) >> 32;
3310
3311 *len = 3;
3312 return 1;
3313}
3314
3315static const struct export_operations shmem_export_ops = {
3316 .get_parent = shmem_get_parent,
3317 .encode_fh = shmem_encode_fh,
3318 .fh_to_dentry = shmem_fh_to_dentry,
3319};
3320
3321enum shmem_param {
3322 Opt_gid,
3323 Opt_huge,
3324 Opt_mode,
3325 Opt_mpol,
3326 Opt_nr_blocks,
3327 Opt_nr_inodes,
3328 Opt_size,
3329 Opt_uid,
3330 Opt_inode32,
3331 Opt_inode64,
3332};
3333
3334static const struct constant_table shmem_param_enums_huge[] = {
3335 {"never", SHMEM_HUGE_NEVER },
3336 {"always", SHMEM_HUGE_ALWAYS },
3337 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3338 {"advise", SHMEM_HUGE_ADVISE },
3339 {}
3340};
3341
3342const struct fs_parameter_spec shmem_fs_parameters[] = {
3343 fsparam_u32 ("gid", Opt_gid),
3344 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
3345 fsparam_u32oct("mode", Opt_mode),
3346 fsparam_string("mpol", Opt_mpol),
3347 fsparam_string("nr_blocks", Opt_nr_blocks),
3348 fsparam_string("nr_inodes", Opt_nr_inodes),
3349 fsparam_string("size", Opt_size),
3350 fsparam_u32 ("uid", Opt_uid),
3351 fsparam_flag ("inode32", Opt_inode32),
3352 fsparam_flag ("inode64", Opt_inode64),
3353 {}
3354};
3355
3356static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3357{
3358 struct shmem_options *ctx = fc->fs_private;
3359 struct fs_parse_result result;
3360 unsigned long long size;
3361 char *rest;
3362 int opt;
3363
3364 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3365 if (opt < 0)
3366 return opt;
3367
3368 switch (opt) {
3369 case Opt_size:
3370 size = memparse(param->string, &rest);
3371 if (*rest == '%') {
3372 size <<= PAGE_SHIFT;
3373 size *= totalram_pages();
3374 do_div(size, 100);
3375 rest++;
3376 }
3377 if (*rest)
3378 goto bad_value;
3379 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3380 ctx->seen |= SHMEM_SEEN_BLOCKS;
3381 break;
3382 case Opt_nr_blocks:
3383 ctx->blocks = memparse(param->string, &rest);
3384 if (*rest)
3385 goto bad_value;
3386 ctx->seen |= SHMEM_SEEN_BLOCKS;
3387 break;
3388 case Opt_nr_inodes:
3389 ctx->inodes = memparse(param->string, &rest);
3390 if (*rest)
3391 goto bad_value;
3392 ctx->seen |= SHMEM_SEEN_INODES;
3393 break;
3394 case Opt_mode:
3395 ctx->mode = result.uint_32 & 07777;
3396 break;
3397 case Opt_uid:
3398 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3399 if (!uid_valid(ctx->uid))
3400 goto bad_value;
3401 break;
3402 case Opt_gid:
3403 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3404 if (!gid_valid(ctx->gid))
3405 goto bad_value;
3406 break;
3407 case Opt_huge:
3408 ctx->huge = result.uint_32;
3409 if (ctx->huge != SHMEM_HUGE_NEVER &&
3410 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3411 has_transparent_hugepage()))
3412 goto unsupported_parameter;
3413 ctx->seen |= SHMEM_SEEN_HUGE;
3414 break;
3415 case Opt_mpol:
3416 if (IS_ENABLED(CONFIG_NUMA)) {
3417 mpol_put(ctx->mpol);
3418 ctx->mpol = NULL;
3419 if (mpol_parse_str(param->string, &ctx->mpol))
3420 goto bad_value;
3421 break;
3422 }
3423 goto unsupported_parameter;
3424 case Opt_inode32:
3425 ctx->full_inums = false;
3426 ctx->seen |= SHMEM_SEEN_INUMS;
3427 break;
3428 case Opt_inode64:
3429 if (sizeof(ino_t) < 8) {
3430 return invalfc(fc,
3431 "Cannot use inode64 with <64bit inums in kernel\n");
3432 }
3433 ctx->full_inums = true;
3434 ctx->seen |= SHMEM_SEEN_INUMS;
3435 break;
3436 }
3437 return 0;
3438
3439unsupported_parameter:
3440 return invalfc(fc, "Unsupported parameter '%s'", param->key);
3441bad_value:
3442 return invalfc(fc, "Bad value for '%s'", param->key);
3443}
3444
3445static int shmem_parse_options(struct fs_context *fc, void *data)
3446{
3447 char *options = data;
3448
3449 if (options) {
3450 int err = security_sb_eat_lsm_opts(options, &fc->security);
3451 if (err)
3452 return err;
3453 }
3454
3455 while (options != NULL) {
3456 char *this_char = options;
3457 for (;;) {
3458
3459
3460
3461
3462
3463 options = strchr(options, ',');
3464 if (options == NULL)
3465 break;
3466 options++;
3467 if (!isdigit(*options)) {
3468 options[-1] = '\0';
3469 break;
3470 }
3471 }
3472 if (*this_char) {
3473 char *value = strchr(this_char, '=');
3474 size_t len = 0;
3475 int err;
3476
3477 if (value) {
3478 *value++ = '\0';
3479 len = strlen(value);
3480 }
3481 err = vfs_parse_fs_string(fc, this_char, value, len);
3482 if (err < 0)
3483 return err;
3484 }
3485 }
3486 return 0;
3487}
3488
3489
3490
3491
3492
3493
3494
3495
3496static int shmem_reconfigure(struct fs_context *fc)
3497{
3498 struct shmem_options *ctx = fc->fs_private;
3499 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3500 unsigned long inodes;
3501 struct mempolicy *mpol = NULL;
3502 const char *err;
3503
3504 raw_spin_lock(&sbinfo->stat_lock);
3505 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3506 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3507 if (!sbinfo->max_blocks) {
3508 err = "Cannot retroactively limit size";
3509 goto out;
3510 }
3511 if (percpu_counter_compare(&sbinfo->used_blocks,
3512 ctx->blocks) > 0) {
3513 err = "Too small a size for current use";
3514 goto out;
3515 }
3516 }
3517 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3518 if (!sbinfo->max_inodes) {
3519 err = "Cannot retroactively limit inodes";
3520 goto out;
3521 }
3522 if (ctx->inodes < inodes) {
3523 err = "Too few inodes for current use";
3524 goto out;
3525 }
3526 }
3527
3528 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3529 sbinfo->next_ino > UINT_MAX) {
3530 err = "Current inum too high to switch to 32-bit inums";
3531 goto out;
3532 }
3533
3534 if (ctx->seen & SHMEM_SEEN_HUGE)
3535 sbinfo->huge = ctx->huge;
3536 if (ctx->seen & SHMEM_SEEN_INUMS)
3537 sbinfo->full_inums = ctx->full_inums;
3538 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3539 sbinfo->max_blocks = ctx->blocks;
3540 if (ctx->seen & SHMEM_SEEN_INODES) {
3541 sbinfo->max_inodes = ctx->inodes;
3542 sbinfo->free_inodes = ctx->inodes - inodes;
3543 }
3544
3545
3546
3547
3548 if (ctx->mpol) {
3549 mpol = sbinfo->mpol;
3550 sbinfo->mpol = ctx->mpol;
3551 ctx->mpol = NULL;
3552 }
3553 raw_spin_unlock(&sbinfo->stat_lock);
3554 mpol_put(mpol);
3555 return 0;
3556out:
3557 raw_spin_unlock(&sbinfo->stat_lock);
3558 return invalfc(fc, "%s", err);
3559}
3560
3561static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3562{
3563 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3564
3565 if (sbinfo->max_blocks != shmem_default_max_blocks())
3566 seq_printf(seq, ",size=%luk",
3567 sbinfo->max_blocks << (PAGE_SHIFT - 10));
3568 if (sbinfo->max_inodes != shmem_default_max_inodes())
3569 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3570 if (sbinfo->mode != (0777 | S_ISVTX))
3571 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3572 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3573 seq_printf(seq, ",uid=%u",
3574 from_kuid_munged(&init_user_ns, sbinfo->uid));
3575 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3576 seq_printf(seq, ",gid=%u",
3577 from_kgid_munged(&init_user_ns, sbinfo->gid));
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3600 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3601#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3602
3603 if (sbinfo->huge)
3604 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3605#endif
3606 shmem_show_mpol(seq, sbinfo->mpol);
3607 return 0;
3608}
3609
3610#endif
3611
3612static void shmem_put_super(struct super_block *sb)
3613{
3614 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3615
3616 free_percpu(sbinfo->ino_batch);
3617 percpu_counter_destroy(&sbinfo->used_blocks);
3618 mpol_put(sbinfo->mpol);
3619 kfree(sbinfo);
3620 sb->s_fs_info = NULL;
3621}
3622
3623static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3624{
3625 struct shmem_options *ctx = fc->fs_private;
3626 struct inode *inode;
3627 struct shmem_sb_info *sbinfo;
3628
3629
3630 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3631 L1_CACHE_BYTES), GFP_KERNEL);
3632 if (!sbinfo)
3633 return -ENOMEM;
3634
3635 sb->s_fs_info = sbinfo;
3636
3637#ifdef CONFIG_TMPFS
3638
3639
3640
3641
3642
3643 if (!(sb->s_flags & SB_KERNMOUNT)) {
3644 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3645 ctx->blocks = shmem_default_max_blocks();
3646 if (!(ctx->seen & SHMEM_SEEN_INODES))
3647 ctx->inodes = shmem_default_max_inodes();
3648 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3649 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3650 } else {
3651 sb->s_flags |= SB_NOUSER;
3652 }
3653 sb->s_export_op = &shmem_export_ops;
3654 sb->s_flags |= SB_NOSEC;
3655#else
3656 sb->s_flags |= SB_NOUSER;
3657#endif
3658 sbinfo->max_blocks = ctx->blocks;
3659 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3660 if (sb->s_flags & SB_KERNMOUNT) {
3661 sbinfo->ino_batch = alloc_percpu(ino_t);
3662 if (!sbinfo->ino_batch)
3663 goto failed;
3664 }
3665 sbinfo->uid = ctx->uid;
3666 sbinfo->gid = ctx->gid;
3667 sbinfo->full_inums = ctx->full_inums;
3668 sbinfo->mode = ctx->mode;
3669 sbinfo->huge = ctx->huge;
3670 sbinfo->mpol = ctx->mpol;
3671 ctx->mpol = NULL;
3672
3673 raw_spin_lock_init(&sbinfo->stat_lock);
3674 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3675 goto failed;
3676 spin_lock_init(&sbinfo->shrinklist_lock);
3677 INIT_LIST_HEAD(&sbinfo->shrinklist);
3678
3679 sb->s_maxbytes = MAX_LFS_FILESIZE;
3680 sb->s_blocksize = PAGE_SIZE;
3681 sb->s_blocksize_bits = PAGE_SHIFT;
3682 sb->s_magic = TMPFS_MAGIC;
3683 sb->s_op = &shmem_ops;
3684 sb->s_time_gran = 1;
3685#ifdef CONFIG_TMPFS_XATTR
3686 sb->s_xattr = shmem_xattr_handlers;
3687#endif
3688#ifdef CONFIG_TMPFS_POSIX_ACL
3689 sb->s_flags |= SB_POSIXACL;
3690#endif
3691 uuid_gen(&sb->s_uuid);
3692
3693 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3694 if (!inode)
3695 goto failed;
3696 inode->i_uid = sbinfo->uid;
3697 inode->i_gid = sbinfo->gid;
3698 sb->s_root = d_make_root(inode);
3699 if (!sb->s_root)
3700 goto failed;
3701 return 0;
3702
3703failed:
3704 shmem_put_super(sb);
3705 return -ENOMEM;
3706}
3707
3708static int shmem_get_tree(struct fs_context *fc)
3709{
3710 return get_tree_nodev(fc, shmem_fill_super);
3711}
3712
3713static void shmem_free_fc(struct fs_context *fc)
3714{
3715 struct shmem_options *ctx = fc->fs_private;
3716
3717 if (ctx) {
3718 mpol_put(ctx->mpol);
3719 kfree(ctx);
3720 }
3721}
3722
3723static const struct fs_context_operations shmem_fs_context_ops = {
3724 .free = shmem_free_fc,
3725 .get_tree = shmem_get_tree,
3726#ifdef CONFIG_TMPFS
3727 .parse_monolithic = shmem_parse_options,
3728 .parse_param = shmem_parse_one,
3729 .reconfigure = shmem_reconfigure,
3730#endif
3731};
3732
3733static struct kmem_cache *shmem_inode_cachep;
3734
3735static struct inode *shmem_alloc_inode(struct super_block *sb)
3736{
3737 struct shmem_inode_info *info;
3738 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3739 if (!info)
3740 return NULL;
3741 return &info->vfs_inode;
3742}
3743
3744static void shmem_free_in_core_inode(struct inode *inode)
3745{
3746 if (S_ISLNK(inode->i_mode))
3747 kfree(inode->i_link);
3748 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3749}
3750
3751static void shmem_destroy_inode(struct inode *inode)
3752{
3753 if (S_ISREG(inode->i_mode))
3754 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3755}
3756
3757static void shmem_init_inode(void *foo)
3758{
3759 struct shmem_inode_info *info = foo;
3760 inode_init_once(&info->vfs_inode);
3761}
3762
3763static void shmem_init_inodecache(void)
3764{
3765 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3766 sizeof(struct shmem_inode_info),
3767 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3768}
3769
3770static void shmem_destroy_inodecache(void)
3771{
3772 kmem_cache_destroy(shmem_inode_cachep);
3773}
3774
3775const struct address_space_operations shmem_aops = {
3776 .writepage = shmem_writepage,
3777 .set_page_dirty = __set_page_dirty_no_writeback,
3778#ifdef CONFIG_TMPFS
3779 .write_begin = shmem_write_begin,
3780 .write_end = shmem_write_end,
3781#endif
3782#ifdef CONFIG_MIGRATION
3783 .migratepage = migrate_page,
3784#endif
3785 .error_remove_page = generic_error_remove_page,
3786};
3787EXPORT_SYMBOL(shmem_aops);
3788
3789static const struct file_operations shmem_file_operations = {
3790 .mmap = shmem_mmap,
3791 .get_unmapped_area = shmem_get_unmapped_area,
3792#ifdef CONFIG_TMPFS
3793 .llseek = shmem_file_llseek,
3794 .read_iter = shmem_file_read_iter,
3795 .write_iter = generic_file_write_iter,
3796 .fsync = noop_fsync,
3797 .splice_read = generic_file_splice_read,
3798 .splice_write = iter_file_splice_write,
3799 .fallocate = shmem_fallocate,
3800#endif
3801};
3802
3803static const struct inode_operations shmem_inode_operations = {
3804 .getattr = shmem_getattr,
3805 .setattr = shmem_setattr,
3806#ifdef CONFIG_TMPFS_XATTR
3807 .listxattr = shmem_listxattr,
3808 .set_acl = simple_set_acl,
3809#endif
3810};
3811
3812static const struct inode_operations shmem_dir_inode_operations = {
3813#ifdef CONFIG_TMPFS
3814 .create = shmem_create,
3815 .lookup = simple_lookup,
3816 .link = shmem_link,
3817 .unlink = shmem_unlink,
3818 .symlink = shmem_symlink,
3819 .mkdir = shmem_mkdir,
3820 .rmdir = shmem_rmdir,
3821 .mknod = shmem_mknod,
3822 .rename = shmem_rename2,
3823 .tmpfile = shmem_tmpfile,
3824#endif
3825#ifdef CONFIG_TMPFS_XATTR
3826 .listxattr = shmem_listxattr,
3827#endif
3828#ifdef CONFIG_TMPFS_POSIX_ACL
3829 .setattr = shmem_setattr,
3830 .set_acl = simple_set_acl,
3831#endif
3832};
3833
3834static const struct inode_operations shmem_special_inode_operations = {
3835#ifdef CONFIG_TMPFS_XATTR
3836 .listxattr = shmem_listxattr,
3837#endif
3838#ifdef CONFIG_TMPFS_POSIX_ACL
3839 .setattr = shmem_setattr,
3840 .set_acl = simple_set_acl,
3841#endif
3842};
3843
3844static const struct super_operations shmem_ops = {
3845 .alloc_inode = shmem_alloc_inode,
3846 .free_inode = shmem_free_in_core_inode,
3847 .destroy_inode = shmem_destroy_inode,
3848#ifdef CONFIG_TMPFS
3849 .statfs = shmem_statfs,
3850 .show_options = shmem_show_options,
3851#endif
3852 .evict_inode = shmem_evict_inode,
3853 .drop_inode = generic_delete_inode,
3854 .put_super = shmem_put_super,
3855#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3856 .nr_cached_objects = shmem_unused_huge_count,
3857 .free_cached_objects = shmem_unused_huge_scan,
3858#endif
3859};
3860
3861static const struct vm_operations_struct shmem_vm_ops = {
3862 .fault = shmem_fault,
3863 .map_pages = filemap_map_pages,
3864#ifdef CONFIG_NUMA
3865 .set_policy = shmem_set_policy,
3866 .get_policy = shmem_get_policy,
3867#endif
3868};
3869
3870int shmem_init_fs_context(struct fs_context *fc)
3871{
3872 struct shmem_options *ctx;
3873
3874 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3875 if (!ctx)
3876 return -ENOMEM;
3877
3878 ctx->mode = 0777 | S_ISVTX;
3879 ctx->uid = current_fsuid();
3880 ctx->gid = current_fsgid();
3881
3882 fc->fs_private = ctx;
3883 fc->ops = &shmem_fs_context_ops;
3884 return 0;
3885}
3886
3887static struct file_system_type shmem_fs_type = {
3888 .owner = THIS_MODULE,
3889 .name = "tmpfs",
3890 .init_fs_context = shmem_init_fs_context,
3891#ifdef CONFIG_TMPFS
3892 .parameters = shmem_fs_parameters,
3893#endif
3894 .kill_sb = kill_litter_super,
3895 .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT,
3896};
3897
3898int __init shmem_init(void)
3899{
3900 int error;
3901
3902 shmem_init_inodecache();
3903
3904 error = register_filesystem(&shmem_fs_type);
3905 if (error) {
3906 pr_err("Could not register tmpfs\n");
3907 goto out2;
3908 }
3909
3910 shm_mnt = kern_mount(&shmem_fs_type);
3911 if (IS_ERR(shm_mnt)) {
3912 error = PTR_ERR(shm_mnt);
3913 pr_err("Could not kern_mount tmpfs\n");
3914 goto out1;
3915 }
3916
3917#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3918 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3919 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3920 else
3921 shmem_huge = SHMEM_HUGE_NEVER;
3922#endif
3923 return 0;
3924
3925out1:
3926 unregister_filesystem(&shmem_fs_type);
3927out2:
3928 shmem_destroy_inodecache();
3929 shm_mnt = ERR_PTR(error);
3930 return error;
3931}
3932
3933#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
3934static ssize_t shmem_enabled_show(struct kobject *kobj,
3935 struct kobj_attribute *attr, char *buf)
3936{
3937 static const int values[] = {
3938 SHMEM_HUGE_ALWAYS,
3939 SHMEM_HUGE_WITHIN_SIZE,
3940 SHMEM_HUGE_ADVISE,
3941 SHMEM_HUGE_NEVER,
3942 SHMEM_HUGE_DENY,
3943 SHMEM_HUGE_FORCE,
3944 };
3945 int len = 0;
3946 int i;
3947
3948 for (i = 0; i < ARRAY_SIZE(values); i++) {
3949 len += sysfs_emit_at(buf, len,
3950 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
3951 i ? " " : "",
3952 shmem_format_huge(values[i]));
3953 }
3954
3955 len += sysfs_emit_at(buf, len, "\n");
3956
3957 return len;
3958}
3959
3960static ssize_t shmem_enabled_store(struct kobject *kobj,
3961 struct kobj_attribute *attr, const char *buf, size_t count)
3962{
3963 char tmp[16];
3964 int huge;
3965
3966 if (count + 1 > sizeof(tmp))
3967 return -EINVAL;
3968 memcpy(tmp, buf, count);
3969 tmp[count] = '\0';
3970 if (count && tmp[count - 1] == '\n')
3971 tmp[count - 1] = '\0';
3972
3973 huge = shmem_parse_huge(tmp);
3974 if (huge == -EINVAL)
3975 return -EINVAL;
3976 if (!has_transparent_hugepage() &&
3977 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3978 return -EINVAL;
3979
3980 shmem_huge = huge;
3981 if (shmem_huge > SHMEM_HUGE_DENY)
3982 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3983 return count;
3984}
3985
3986struct kobj_attribute shmem_enabled_attr =
3987 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3988#endif
3989
3990#else
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001static struct file_system_type shmem_fs_type = {
4002 .name = "tmpfs",
4003 .init_fs_context = ramfs_init_fs_context,
4004 .parameters = ramfs_fs_parameters,
4005 .kill_sb = kill_litter_super,
4006 .fs_flags = FS_USERNS_MOUNT,
4007};
4008
4009int __init shmem_init(void)
4010{
4011 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4012
4013 shm_mnt = kern_mount(&shmem_fs_type);
4014 BUG_ON(IS_ERR(shm_mnt));
4015
4016 return 0;
4017}
4018
4019int shmem_unuse(unsigned int type, bool frontswap,
4020 unsigned long *fs_pages_to_unuse)
4021{
4022 return 0;
4023}
4024
4025int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4026{
4027 return 0;
4028}
4029
4030void shmem_unlock_mapping(struct address_space *mapping)
4031{
4032}
4033
4034#ifdef CONFIG_MMU
4035unsigned long shmem_get_unmapped_area(struct file *file,
4036 unsigned long addr, unsigned long len,
4037 unsigned long pgoff, unsigned long flags)
4038{
4039 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4040}
4041#endif
4042
4043void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4044{
4045 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4046}
4047EXPORT_SYMBOL_GPL(shmem_truncate_range);
4048
4049#define shmem_vm_ops generic_file_vm_ops
4050#define shmem_file_operations ramfs_file_operations
4051#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
4052#define shmem_acct_size(flags, size) 0
4053#define shmem_unacct_size(flags, size) do {} while (0)
4054
4055#endif
4056
4057
4058
4059static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4060 unsigned long flags, unsigned int i_flags)
4061{
4062 struct inode *inode;
4063 struct file *res;
4064
4065 if (IS_ERR(mnt))
4066 return ERR_CAST(mnt);
4067
4068 if (size < 0 || size > MAX_LFS_FILESIZE)
4069 return ERR_PTR(-EINVAL);
4070
4071 if (shmem_acct_size(flags, size))
4072 return ERR_PTR(-ENOMEM);
4073
4074 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4075 flags);
4076 if (unlikely(!inode)) {
4077 shmem_unacct_size(flags, size);
4078 return ERR_PTR(-ENOSPC);
4079 }
4080 inode->i_flags |= i_flags;
4081 inode->i_size = size;
4082 clear_nlink(inode);
4083 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4084 if (!IS_ERR(res))
4085 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4086 &shmem_file_operations);
4087 if (IS_ERR(res))
4088 iput(inode);
4089 return res;
4090}
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4103{
4104 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4105}
4106
4107
4108
4109
4110
4111
4112
4113struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4114{
4115 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4116}
4117EXPORT_SYMBOL_GPL(shmem_file_setup);
4118
4119
4120
4121
4122
4123
4124
4125
4126struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4127 loff_t size, unsigned long flags)
4128{
4129 return __shmem_file_setup(mnt, name, size, flags, 0);
4130}
4131EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4132
4133
4134
4135
4136
4137int shmem_zero_setup(struct vm_area_struct *vma)
4138{
4139 struct file *file;
4140 loff_t size = vma->vm_end - vma->vm_start;
4141
4142
4143
4144
4145
4146
4147
4148 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4149 if (IS_ERR(file))
4150 return PTR_ERR(file);
4151
4152 if (vma->vm_file)
4153 fput(vma->vm_file);
4154 vma->vm_file = file;
4155 vma->vm_ops = &shmem_vm_ops;
4156
4157 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4158 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4159 (vma->vm_end & HPAGE_PMD_MASK)) {
4160 khugepaged_enter(vma, vma->vm_flags);
4161 }
4162
4163 return 0;
4164}
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4182 pgoff_t index, gfp_t gfp)
4183{
4184#ifdef CONFIG_SHMEM
4185 struct inode *inode = mapping->host;
4186 struct page *page;
4187 int error;
4188
4189 BUG_ON(!shmem_mapping(mapping));
4190 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4191 gfp, NULL, NULL, NULL);
4192 if (error)
4193 page = ERR_PTR(error);
4194 else
4195 unlock_page(page);
4196 return page;
4197#else
4198
4199
4200
4201 return read_cache_page_gfp(mapping, index, gfp);
4202#endif
4203}
4204EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4205