linux/mm/shmem.c
<<
>>
Prefs
   1/*
   2 * Resizable virtual memory filesystem for Linux.
   3 *
   4 * Copyright (C) 2000 Linus Torvalds.
   5 *               2000 Transmeta Corp.
   6 *               2000-2001 Christoph Rohland
   7 *               2000-2001 SAP AG
   8 *               2002 Red Hat Inc.
   9 * Copyright (C) 2002-2011 Hugh Dickins.
  10 * Copyright (C) 2011 Google Inc.
  11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
  12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
  13 *
  14 * Extended attribute support for tmpfs:
  15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
  16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
  17 *
  18 * tiny-shmem:
  19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
  20 *
  21 * This file is released under the GPL.
  22 */
  23
  24#include <linux/fs.h>
  25#include <linux/init.h>
  26#include <linux/vfs.h>
  27#include <linux/mount.h>
  28#include <linux/ramfs.h>
  29#include <linux/pagemap.h>
  30#include <linux/file.h>
  31#include <linux/mm.h>
  32#include <linux/random.h>
  33#include <linux/sched/signal.h>
  34#include <linux/export.h>
  35#include <linux/swap.h>
  36#include <linux/uio.h>
  37#include <linux/khugepaged.h>
  38#include <linux/hugetlb.h>
  39#include <linux/frontswap.h>
  40
  41#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
  42
  43static struct vfsmount *shm_mnt;
  44
  45#ifdef CONFIG_SHMEM
  46/*
  47 * This virtual memory filesystem is heavily based on the ramfs. It
  48 * extends ramfs by the ability to use swap and honor resource limits
  49 * which makes it a completely usable filesystem.
  50 */
  51
  52#include <linux/xattr.h>
  53#include <linux/exportfs.h>
  54#include <linux/posix_acl.h>
  55#include <linux/posix_acl_xattr.h>
  56#include <linux/mman.h>
  57#include <linux/string.h>
  58#include <linux/slab.h>
  59#include <linux/backing-dev.h>
  60#include <linux/shmem_fs.h>
  61#include <linux/writeback.h>
  62#include <linux/blkdev.h>
  63#include <linux/pagevec.h>
  64#include <linux/percpu_counter.h>
  65#include <linux/falloc.h>
  66#include <linux/splice.h>
  67#include <linux/security.h>
  68#include <linux/swapops.h>
  69#include <linux/mempolicy.h>
  70#include <linux/namei.h>
  71#include <linux/ctype.h>
  72#include <linux/migrate.h>
  73#include <linux/highmem.h>
  74#include <linux/seq_file.h>
  75#include <linux/magic.h>
  76#include <linux/syscalls.h>
  77#include <linux/fcntl.h>
  78#include <uapi/linux/memfd.h>
  79#include <linux/userfaultfd_k.h>
  80#include <linux/rmap.h>
  81#include <linux/uuid.h>
  82
  83#include <linux/uaccess.h>
  84#include <asm/pgtable.h>
  85
  86#include "internal.h"
  87
  88#define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
  89#define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
  90
  91/* Pretend that each entry is of this size in directory's i_size */
  92#define BOGO_DIRENT_SIZE 20
  93
  94/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
  95#define SHORT_SYMLINK_LEN 128
  96
  97/*
  98 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
  99 * inode->i_private (with i_mutex making sure that it has only one user at
 100 * a time): we would prefer not to enlarge the shmem inode just for that.
 101 */
 102struct shmem_falloc {
 103        wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
 104        pgoff_t start;          /* start of range currently being fallocated */
 105        pgoff_t next;           /* the next page offset to be fallocated */
 106        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
 107        pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
 108};
 109
 110#ifdef CONFIG_TMPFS
 111static unsigned long shmem_default_max_blocks(void)
 112{
 113        return totalram_pages() / 2;
 114}
 115
 116static unsigned long shmem_default_max_inodes(void)
 117{
 118        unsigned long nr_pages = totalram_pages();
 119
 120        return min(nr_pages - totalhigh_pages(), nr_pages / 2);
 121}
 122#endif
 123
 124static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
 125static int shmem_replace_page(struct page **pagep, gfp_t gfp,
 126                                struct shmem_inode_info *info, pgoff_t index);
 127static int shmem_swapin_page(struct inode *inode, pgoff_t index,
 128                             struct page **pagep, enum sgp_type sgp,
 129                             gfp_t gfp, struct vm_area_struct *vma,
 130                             vm_fault_t *fault_type);
 131static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 132                struct page **pagep, enum sgp_type sgp,
 133                gfp_t gfp, struct vm_area_struct *vma,
 134                struct vm_fault *vmf, vm_fault_t *fault_type);
 135
 136int shmem_getpage(struct inode *inode, pgoff_t index,
 137                struct page **pagep, enum sgp_type sgp)
 138{
 139        return shmem_getpage_gfp(inode, index, pagep, sgp,
 140                mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
 141}
 142
 143static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
 144{
 145        return sb->s_fs_info;
 146}
 147
 148/*
 149 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
 150 * for shared memory and for shared anonymous (/dev/zero) mappings
 151 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
 152 * consistent with the pre-accounting of private mappings ...
 153 */
 154static inline int shmem_acct_size(unsigned long flags, loff_t size)
 155{
 156        return (flags & VM_NORESERVE) ?
 157                0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
 158}
 159
 160static inline void shmem_unacct_size(unsigned long flags, loff_t size)
 161{
 162        if (!(flags & VM_NORESERVE))
 163                vm_unacct_memory(VM_ACCT(size));
 164}
 165
 166static inline int shmem_reacct_size(unsigned long flags,
 167                loff_t oldsize, loff_t newsize)
 168{
 169        if (!(flags & VM_NORESERVE)) {
 170                if (VM_ACCT(newsize) > VM_ACCT(oldsize))
 171                        return security_vm_enough_memory_mm(current->mm,
 172                                        VM_ACCT(newsize) - VM_ACCT(oldsize));
 173                else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
 174                        vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
 175        }
 176        return 0;
 177}
 178
 179/*
 180 * ... whereas tmpfs objects are accounted incrementally as
 181 * pages are allocated, in order to allow large sparse files.
 182 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
 183 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
 184 */
 185static inline int shmem_acct_block(unsigned long flags, long pages)
 186{
 187        if (!(flags & VM_NORESERVE))
 188                return 0;
 189
 190        return security_vm_enough_memory_mm(current->mm,
 191                        pages * VM_ACCT(PAGE_SIZE));
 192}
 193
 194static inline void shmem_unacct_blocks(unsigned long flags, long pages)
 195{
 196        if (flags & VM_NORESERVE)
 197                vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
 198}
 199
 200static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
 201{
 202        struct shmem_inode_info *info = SHMEM_I(inode);
 203        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 204
 205        if (shmem_acct_block(info->flags, pages))
 206                return false;
 207
 208        if (sbinfo->max_blocks) {
 209                if (percpu_counter_compare(&sbinfo->used_blocks,
 210                                           sbinfo->max_blocks - pages) > 0)
 211                        goto unacct;
 212                percpu_counter_add(&sbinfo->used_blocks, pages);
 213        }
 214
 215        return true;
 216
 217unacct:
 218        shmem_unacct_blocks(info->flags, pages);
 219        return false;
 220}
 221
 222static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
 223{
 224        struct shmem_inode_info *info = SHMEM_I(inode);
 225        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 226
 227        if (sbinfo->max_blocks)
 228                percpu_counter_sub(&sbinfo->used_blocks, pages);
 229        shmem_unacct_blocks(info->flags, pages);
 230}
 231
 232static const struct super_operations shmem_ops;
 233static const struct address_space_operations shmem_aops;
 234static const struct file_operations shmem_file_operations;
 235static const struct inode_operations shmem_inode_operations;
 236static const struct inode_operations shmem_dir_inode_operations;
 237static const struct inode_operations shmem_special_inode_operations;
 238static const struct vm_operations_struct shmem_vm_ops;
 239static struct file_system_type shmem_fs_type;
 240
 241bool vma_is_shmem(struct vm_area_struct *vma)
 242{
 243        return vma->vm_ops == &shmem_vm_ops;
 244}
 245
 246static LIST_HEAD(shmem_swaplist);
 247static DEFINE_MUTEX(shmem_swaplist_mutex);
 248
 249static int shmem_reserve_inode(struct super_block *sb)
 250{
 251        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 252        if (sbinfo->max_inodes) {
 253                spin_lock(&sbinfo->stat_lock);
 254                if (!sbinfo->free_inodes) {
 255                        spin_unlock(&sbinfo->stat_lock);
 256                        return -ENOSPC;
 257                }
 258                sbinfo->free_inodes--;
 259                spin_unlock(&sbinfo->stat_lock);
 260        }
 261        return 0;
 262}
 263
 264static void shmem_free_inode(struct super_block *sb)
 265{
 266        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 267        if (sbinfo->max_inodes) {
 268                spin_lock(&sbinfo->stat_lock);
 269                sbinfo->free_inodes++;
 270                spin_unlock(&sbinfo->stat_lock);
 271        }
 272}
 273
 274/**
 275 * shmem_recalc_inode - recalculate the block usage of an inode
 276 * @inode: inode to recalc
 277 *
 278 * We have to calculate the free blocks since the mm can drop
 279 * undirtied hole pages behind our back.
 280 *
 281 * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
 282 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
 283 *
 284 * It has to be called with the spinlock held.
 285 */
 286static void shmem_recalc_inode(struct inode *inode)
 287{
 288        struct shmem_inode_info *info = SHMEM_I(inode);
 289        long freed;
 290
 291        freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
 292        if (freed > 0) {
 293                info->alloced -= freed;
 294                inode->i_blocks -= freed * BLOCKS_PER_PAGE;
 295                shmem_inode_unacct_blocks(inode, freed);
 296        }
 297}
 298
 299bool shmem_charge(struct inode *inode, long pages)
 300{
 301        struct shmem_inode_info *info = SHMEM_I(inode);
 302        unsigned long flags;
 303
 304        if (!shmem_inode_acct_block(inode, pages))
 305                return false;
 306
 307        /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
 308        inode->i_mapping->nrpages += pages;
 309
 310        spin_lock_irqsave(&info->lock, flags);
 311        info->alloced += pages;
 312        inode->i_blocks += pages * BLOCKS_PER_PAGE;
 313        shmem_recalc_inode(inode);
 314        spin_unlock_irqrestore(&info->lock, flags);
 315
 316        return true;
 317}
 318
 319void shmem_uncharge(struct inode *inode, long pages)
 320{
 321        struct shmem_inode_info *info = SHMEM_I(inode);
 322        unsigned long flags;
 323
 324        /* nrpages adjustment done by __delete_from_page_cache() or caller */
 325
 326        spin_lock_irqsave(&info->lock, flags);
 327        info->alloced -= pages;
 328        inode->i_blocks -= pages * BLOCKS_PER_PAGE;
 329        shmem_recalc_inode(inode);
 330        spin_unlock_irqrestore(&info->lock, flags);
 331
 332        shmem_inode_unacct_blocks(inode, pages);
 333}
 334
 335/*
 336 * Replace item expected in xarray by a new item, while holding xa_lock.
 337 */
 338static int shmem_replace_entry(struct address_space *mapping,
 339                        pgoff_t index, void *expected, void *replacement)
 340{
 341        XA_STATE(xas, &mapping->i_pages, index);
 342        void *item;
 343
 344        VM_BUG_ON(!expected);
 345        VM_BUG_ON(!replacement);
 346        item = xas_load(&xas);
 347        if (item != expected)
 348                return -ENOENT;
 349        xas_store(&xas, replacement);
 350        return 0;
 351}
 352
 353/*
 354 * Sometimes, before we decide whether to proceed or to fail, we must check
 355 * that an entry was not already brought back from swap by a racing thread.
 356 *
 357 * Checking page is not enough: by the time a SwapCache page is locked, it
 358 * might be reused, and again be SwapCache, using the same swap as before.
 359 */
 360static bool shmem_confirm_swap(struct address_space *mapping,
 361                               pgoff_t index, swp_entry_t swap)
 362{
 363        return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
 364}
 365
 366/*
 367 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
 368 *
 369 * SHMEM_HUGE_NEVER:
 370 *      disables huge pages for the mount;
 371 * SHMEM_HUGE_ALWAYS:
 372 *      enables huge pages for the mount;
 373 * SHMEM_HUGE_WITHIN_SIZE:
 374 *      only allocate huge pages if the page will be fully within i_size,
 375 *      also respect fadvise()/madvise() hints;
 376 * SHMEM_HUGE_ADVISE:
 377 *      only allocate huge pages if requested with fadvise()/madvise();
 378 */
 379
 380#define SHMEM_HUGE_NEVER        0
 381#define SHMEM_HUGE_ALWAYS       1
 382#define SHMEM_HUGE_WITHIN_SIZE  2
 383#define SHMEM_HUGE_ADVISE       3
 384
 385/*
 386 * Special values.
 387 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
 388 *
 389 * SHMEM_HUGE_DENY:
 390 *      disables huge on shm_mnt and all mounts, for emergency use;
 391 * SHMEM_HUGE_FORCE:
 392 *      enables huge on shm_mnt and all mounts, w/o needing option, for testing;
 393 *
 394 */
 395#define SHMEM_HUGE_DENY         (-1)
 396#define SHMEM_HUGE_FORCE        (-2)
 397
 398#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
 399/* ifdef here to avoid bloating shmem.o when not necessary */
 400
 401static int shmem_huge __read_mostly;
 402
 403#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
 404static int shmem_parse_huge(const char *str)
 405{
 406        if (!strcmp(str, "never"))
 407                return SHMEM_HUGE_NEVER;
 408        if (!strcmp(str, "always"))
 409                return SHMEM_HUGE_ALWAYS;
 410        if (!strcmp(str, "within_size"))
 411                return SHMEM_HUGE_WITHIN_SIZE;
 412        if (!strcmp(str, "advise"))
 413                return SHMEM_HUGE_ADVISE;
 414        if (!strcmp(str, "deny"))
 415                return SHMEM_HUGE_DENY;
 416        if (!strcmp(str, "force"))
 417                return SHMEM_HUGE_FORCE;
 418        return -EINVAL;
 419}
 420
 421static const char *shmem_format_huge(int huge)
 422{
 423        switch (huge) {
 424        case SHMEM_HUGE_NEVER:
 425                return "never";
 426        case SHMEM_HUGE_ALWAYS:
 427                return "always";
 428        case SHMEM_HUGE_WITHIN_SIZE:
 429                return "within_size";
 430        case SHMEM_HUGE_ADVISE:
 431                return "advise";
 432        case SHMEM_HUGE_DENY:
 433                return "deny";
 434        case SHMEM_HUGE_FORCE:
 435                return "force";
 436        default:
 437                VM_BUG_ON(1);
 438                return "bad_val";
 439        }
 440}
 441#endif
 442
 443static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 444                struct shrink_control *sc, unsigned long nr_to_split)
 445{
 446        LIST_HEAD(list), *pos, *next;
 447        LIST_HEAD(to_remove);
 448        struct inode *inode;
 449        struct shmem_inode_info *info;
 450        struct page *page;
 451        unsigned long batch = sc ? sc->nr_to_scan : 128;
 452        int removed = 0, split = 0;
 453
 454        if (list_empty(&sbinfo->shrinklist))
 455                return SHRINK_STOP;
 456
 457        spin_lock(&sbinfo->shrinklist_lock);
 458        list_for_each_safe(pos, next, &sbinfo->shrinklist) {
 459                info = list_entry(pos, struct shmem_inode_info, shrinklist);
 460
 461                /* pin the inode */
 462                inode = igrab(&info->vfs_inode);
 463
 464                /* inode is about to be evicted */
 465                if (!inode) {
 466                        list_del_init(&info->shrinklist);
 467                        removed++;
 468                        goto next;
 469                }
 470
 471                /* Check if there's anything to gain */
 472                if (round_up(inode->i_size, PAGE_SIZE) ==
 473                                round_up(inode->i_size, HPAGE_PMD_SIZE)) {
 474                        list_move(&info->shrinklist, &to_remove);
 475                        removed++;
 476                        goto next;
 477                }
 478
 479                list_move(&info->shrinklist, &list);
 480next:
 481                if (!--batch)
 482                        break;
 483        }
 484        spin_unlock(&sbinfo->shrinklist_lock);
 485
 486        list_for_each_safe(pos, next, &to_remove) {
 487                info = list_entry(pos, struct shmem_inode_info, shrinklist);
 488                inode = &info->vfs_inode;
 489                list_del_init(&info->shrinklist);
 490                iput(inode);
 491        }
 492
 493        list_for_each_safe(pos, next, &list) {
 494                int ret;
 495
 496                info = list_entry(pos, struct shmem_inode_info, shrinklist);
 497                inode = &info->vfs_inode;
 498
 499                if (nr_to_split && split >= nr_to_split)
 500                        goto leave;
 501
 502                page = find_get_page(inode->i_mapping,
 503                                (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
 504                if (!page)
 505                        goto drop;
 506
 507                /* No huge page at the end of the file: nothing to split */
 508                if (!PageTransHuge(page)) {
 509                        put_page(page);
 510                        goto drop;
 511                }
 512
 513                /*
 514                 * Leave the inode on the list if we failed to lock
 515                 * the page at this time.
 516                 *
 517                 * Waiting for the lock may lead to deadlock in the
 518                 * reclaim path.
 519                 */
 520                if (!trylock_page(page)) {
 521                        put_page(page);
 522                        goto leave;
 523                }
 524
 525                ret = split_huge_page(page);
 526                unlock_page(page);
 527                put_page(page);
 528
 529                /* If split failed leave the inode on the list */
 530                if (ret)
 531                        goto leave;
 532
 533                split++;
 534drop:
 535                list_del_init(&info->shrinklist);
 536                removed++;
 537leave:
 538                iput(inode);
 539        }
 540
 541        spin_lock(&sbinfo->shrinklist_lock);
 542        list_splice_tail(&list, &sbinfo->shrinklist);
 543        sbinfo->shrinklist_len -= removed;
 544        spin_unlock(&sbinfo->shrinklist_lock);
 545
 546        return split;
 547}
 548
 549static long shmem_unused_huge_scan(struct super_block *sb,
 550                struct shrink_control *sc)
 551{
 552        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 553
 554        if (!READ_ONCE(sbinfo->shrinklist_len))
 555                return SHRINK_STOP;
 556
 557        return shmem_unused_huge_shrink(sbinfo, sc, 0);
 558}
 559
 560static long shmem_unused_huge_count(struct super_block *sb,
 561                struct shrink_control *sc)
 562{
 563        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 564        return READ_ONCE(sbinfo->shrinklist_len);
 565}
 566#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 567
 568#define shmem_huge SHMEM_HUGE_DENY
 569
 570static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 571                struct shrink_control *sc, unsigned long nr_to_split)
 572{
 573        return 0;
 574}
 575#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 576
 577static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
 578{
 579        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
 580            (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
 581            shmem_huge != SHMEM_HUGE_DENY)
 582                return true;
 583        return false;
 584}
 585
 586/*
 587 * Like add_to_page_cache_locked, but error if expected item has gone.
 588 */
 589static int shmem_add_to_page_cache(struct page *page,
 590                                   struct address_space *mapping,
 591                                   pgoff_t index, void *expected, gfp_t gfp)
 592{
 593        XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
 594        unsigned long i = 0;
 595        unsigned long nr = 1UL << compound_order(page);
 596
 597        VM_BUG_ON_PAGE(PageTail(page), page);
 598        VM_BUG_ON_PAGE(index != round_down(index, nr), page);
 599        VM_BUG_ON_PAGE(!PageLocked(page), page);
 600        VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 601        VM_BUG_ON(expected && PageTransHuge(page));
 602
 603        page_ref_add(page, nr);
 604        page->mapping = mapping;
 605        page->index = index;
 606
 607        do {
 608                void *entry;
 609                xas_lock_irq(&xas);
 610                entry = xas_find_conflict(&xas);
 611                if (entry != expected)
 612                        xas_set_err(&xas, -EEXIST);
 613                xas_create_range(&xas);
 614                if (xas_error(&xas))
 615                        goto unlock;
 616next:
 617                xas_store(&xas, page + i);
 618                if (++i < nr) {
 619                        xas_next(&xas);
 620                        goto next;
 621                }
 622                if (PageTransHuge(page)) {
 623                        count_vm_event(THP_FILE_ALLOC);
 624                        __inc_node_page_state(page, NR_SHMEM_THPS);
 625                }
 626                mapping->nrpages += nr;
 627                __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
 628                __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
 629unlock:
 630                xas_unlock_irq(&xas);
 631        } while (xas_nomem(&xas, gfp));
 632
 633        if (xas_error(&xas)) {
 634                page->mapping = NULL;
 635                page_ref_sub(page, nr);
 636                return xas_error(&xas);
 637        }
 638
 639        return 0;
 640}
 641
 642/*
 643 * Like delete_from_page_cache, but substitutes swap for page.
 644 */
 645static void shmem_delete_from_page_cache(struct page *page, void *radswap)
 646{
 647        struct address_space *mapping = page->mapping;
 648        int error;
 649
 650        VM_BUG_ON_PAGE(PageCompound(page), page);
 651
 652        xa_lock_irq(&mapping->i_pages);
 653        error = shmem_replace_entry(mapping, page->index, page, radswap);
 654        page->mapping = NULL;
 655        mapping->nrpages--;
 656        __dec_node_page_state(page, NR_FILE_PAGES);
 657        __dec_node_page_state(page, NR_SHMEM);
 658        xa_unlock_irq(&mapping->i_pages);
 659        put_page(page);
 660        BUG_ON(error);
 661}
 662
 663/*
 664 * Remove swap entry from page cache, free the swap and its page cache.
 665 */
 666static int shmem_free_swap(struct address_space *mapping,
 667                           pgoff_t index, void *radswap)
 668{
 669        void *old;
 670
 671        old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
 672        if (old != radswap)
 673                return -ENOENT;
 674        free_swap_and_cache(radix_to_swp_entry(radswap));
 675        return 0;
 676}
 677
 678/*
 679 * Determine (in bytes) how many of the shmem object's pages mapped by the
 680 * given offsets are swapped out.
 681 *
 682 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
 683 * as long as the inode doesn't go away and racy results are not a problem.
 684 */
 685unsigned long shmem_partial_swap_usage(struct address_space *mapping,
 686                                                pgoff_t start, pgoff_t end)
 687{
 688        XA_STATE(xas, &mapping->i_pages, start);
 689        struct page *page;
 690        unsigned long swapped = 0;
 691
 692        rcu_read_lock();
 693        xas_for_each(&xas, page, end - 1) {
 694                if (xas_retry(&xas, page))
 695                        continue;
 696                if (xa_is_value(page))
 697                        swapped++;
 698
 699                if (need_resched()) {
 700                        xas_pause(&xas);
 701                        cond_resched_rcu();
 702                }
 703        }
 704
 705        rcu_read_unlock();
 706
 707        return swapped << PAGE_SHIFT;
 708}
 709
 710/*
 711 * Determine (in bytes) how many of the shmem object's pages mapped by the
 712 * given vma is swapped out.
 713 *
 714 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
 715 * as long as the inode doesn't go away and racy results are not a problem.
 716 */
 717unsigned long shmem_swap_usage(struct vm_area_struct *vma)
 718{
 719        struct inode *inode = file_inode(vma->vm_file);
 720        struct shmem_inode_info *info = SHMEM_I(inode);
 721        struct address_space *mapping = inode->i_mapping;
 722        unsigned long swapped;
 723
 724        /* Be careful as we don't hold info->lock */
 725        swapped = READ_ONCE(info->swapped);
 726
 727        /*
 728         * The easier cases are when the shmem object has nothing in swap, or
 729         * the vma maps it whole. Then we can simply use the stats that we
 730         * already track.
 731         */
 732        if (!swapped)
 733                return 0;
 734
 735        if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
 736                return swapped << PAGE_SHIFT;
 737
 738        /* Here comes the more involved part */
 739        return shmem_partial_swap_usage(mapping,
 740                        linear_page_index(vma, vma->vm_start),
 741                        linear_page_index(vma, vma->vm_end));
 742}
 743
 744/*
 745 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
 746 */
 747void shmem_unlock_mapping(struct address_space *mapping)
 748{
 749        struct pagevec pvec;
 750        pgoff_t indices[PAGEVEC_SIZE];
 751        pgoff_t index = 0;
 752
 753        pagevec_init(&pvec);
 754        /*
 755         * Minor point, but we might as well stop if someone else SHM_LOCKs it.
 756         */
 757        while (!mapping_unevictable(mapping)) {
 758                /*
 759                 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
 760                 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
 761                 */
 762                pvec.nr = find_get_entries(mapping, index,
 763                                           PAGEVEC_SIZE, pvec.pages, indices);
 764                if (!pvec.nr)
 765                        break;
 766                index = indices[pvec.nr - 1] + 1;
 767                pagevec_remove_exceptionals(&pvec);
 768                check_move_unevictable_pages(&pvec);
 769                pagevec_release(&pvec);
 770                cond_resched();
 771        }
 772}
 773
 774/*
 775 * Remove range of pages and swap entries from page cache, and free them.
 776 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
 777 */
 778static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 779                                                                 bool unfalloc)
 780{
 781        struct address_space *mapping = inode->i_mapping;
 782        struct shmem_inode_info *info = SHMEM_I(inode);
 783        pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 784        pgoff_t end = (lend + 1) >> PAGE_SHIFT;
 785        unsigned int partial_start = lstart & (PAGE_SIZE - 1);
 786        unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
 787        struct pagevec pvec;
 788        pgoff_t indices[PAGEVEC_SIZE];
 789        long nr_swaps_freed = 0;
 790        pgoff_t index;
 791        int i;
 792
 793        if (lend == -1)
 794                end = -1;       /* unsigned, so actually very big */
 795
 796        pagevec_init(&pvec);
 797        index = start;
 798        while (index < end) {
 799                pvec.nr = find_get_entries(mapping, index,
 800                        min(end - index, (pgoff_t)PAGEVEC_SIZE),
 801                        pvec.pages, indices);
 802                if (!pvec.nr)
 803                        break;
 804                for (i = 0; i < pagevec_count(&pvec); i++) {
 805                        struct page *page = pvec.pages[i];
 806
 807                        index = indices[i];
 808                        if (index >= end)
 809                                break;
 810
 811                        if (xa_is_value(page)) {
 812                                if (unfalloc)
 813                                        continue;
 814                                nr_swaps_freed += !shmem_free_swap(mapping,
 815                                                                index, page);
 816                                continue;
 817                        }
 818
 819                        VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
 820
 821                        if (!trylock_page(page))
 822                                continue;
 823
 824                        if (PageTransTail(page)) {
 825                                /* Middle of THP: zero out the page */
 826                                clear_highpage(page);
 827                                unlock_page(page);
 828                                continue;
 829                        } else if (PageTransHuge(page)) {
 830                                if (index == round_down(end, HPAGE_PMD_NR)) {
 831                                        /*
 832                                         * Range ends in the middle of THP:
 833                                         * zero out the page
 834                                         */
 835                                        clear_highpage(page);
 836                                        unlock_page(page);
 837                                        continue;
 838                                }
 839                                index += HPAGE_PMD_NR - 1;
 840                                i += HPAGE_PMD_NR - 1;
 841                        }
 842
 843                        if (!unfalloc || !PageUptodate(page)) {
 844                                VM_BUG_ON_PAGE(PageTail(page), page);
 845                                if (page_mapping(page) == mapping) {
 846                                        VM_BUG_ON_PAGE(PageWriteback(page), page);
 847                                        truncate_inode_page(mapping, page);
 848                                }
 849                        }
 850                        unlock_page(page);
 851                }
 852                pagevec_remove_exceptionals(&pvec);
 853                pagevec_release(&pvec);
 854                cond_resched();
 855                index++;
 856        }
 857
 858        if (partial_start) {
 859                struct page *page = NULL;
 860                shmem_getpage(inode, start - 1, &page, SGP_READ);
 861                if (page) {
 862                        unsigned int top = PAGE_SIZE;
 863                        if (start > end) {
 864                                top = partial_end;
 865                                partial_end = 0;
 866                        }
 867                        zero_user_segment(page, partial_start, top);
 868                        set_page_dirty(page);
 869                        unlock_page(page);
 870                        put_page(page);
 871                }
 872        }
 873        if (partial_end) {
 874                struct page *page = NULL;
 875                shmem_getpage(inode, end, &page, SGP_READ);
 876                if (page) {
 877                        zero_user_segment(page, 0, partial_end);
 878                        set_page_dirty(page);
 879                        unlock_page(page);
 880                        put_page(page);
 881                }
 882        }
 883        if (start >= end)
 884                return;
 885
 886        index = start;
 887        while (index < end) {
 888                cond_resched();
 889
 890                pvec.nr = find_get_entries(mapping, index,
 891                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
 892                                pvec.pages, indices);
 893                if (!pvec.nr) {
 894                        /* If all gone or hole-punch or unfalloc, we're done */
 895                        if (index == start || end != -1)
 896                                break;
 897                        /* But if truncating, restart to make sure all gone */
 898                        index = start;
 899                        continue;
 900                }
 901                for (i = 0; i < pagevec_count(&pvec); i++) {
 902                        struct page *page = pvec.pages[i];
 903
 904                        index = indices[i];
 905                        if (index >= end)
 906                                break;
 907
 908                        if (xa_is_value(page)) {
 909                                if (unfalloc)
 910                                        continue;
 911                                if (shmem_free_swap(mapping, index, page)) {
 912                                        /* Swap was replaced by page: retry */
 913                                        index--;
 914                                        break;
 915                                }
 916                                nr_swaps_freed++;
 917                                continue;
 918                        }
 919
 920                        lock_page(page);
 921
 922                        if (PageTransTail(page)) {
 923                                /* Middle of THP: zero out the page */
 924                                clear_highpage(page);
 925                                unlock_page(page);
 926                                /*
 927                                 * Partial thp truncate due 'start' in middle
 928                                 * of THP: don't need to look on these pages
 929                                 * again on !pvec.nr restart.
 930                                 */
 931                                if (index != round_down(end, HPAGE_PMD_NR))
 932                                        start++;
 933                                continue;
 934                        } else if (PageTransHuge(page)) {
 935                                if (index == round_down(end, HPAGE_PMD_NR)) {
 936                                        /*
 937                                         * Range ends in the middle of THP:
 938                                         * zero out the page
 939                                         */
 940                                        clear_highpage(page);
 941                                        unlock_page(page);
 942                                        continue;
 943                                }
 944                                index += HPAGE_PMD_NR - 1;
 945                                i += HPAGE_PMD_NR - 1;
 946                        }
 947
 948                        if (!unfalloc || !PageUptodate(page)) {
 949                                VM_BUG_ON_PAGE(PageTail(page), page);
 950                                if (page_mapping(page) == mapping) {
 951                                        VM_BUG_ON_PAGE(PageWriteback(page), page);
 952                                        truncate_inode_page(mapping, page);
 953                                } else {
 954                                        /* Page was replaced by swap: retry */
 955                                        unlock_page(page);
 956                                        index--;
 957                                        break;
 958                                }
 959                        }
 960                        unlock_page(page);
 961                }
 962                pagevec_remove_exceptionals(&pvec);
 963                pagevec_release(&pvec);
 964                index++;
 965        }
 966
 967        spin_lock_irq(&info->lock);
 968        info->swapped -= nr_swaps_freed;
 969        shmem_recalc_inode(inode);
 970        spin_unlock_irq(&info->lock);
 971}
 972
 973void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 974{
 975        shmem_undo_range(inode, lstart, lend, false);
 976        inode->i_ctime = inode->i_mtime = current_time(inode);
 977}
 978EXPORT_SYMBOL_GPL(shmem_truncate_range);
 979
 980static int shmem_getattr(const struct path *path, struct kstat *stat,
 981                         u32 request_mask, unsigned int query_flags)
 982{
 983        struct inode *inode = path->dentry->d_inode;
 984        struct shmem_inode_info *info = SHMEM_I(inode);
 985        struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
 986
 987        if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
 988                spin_lock_irq(&info->lock);
 989                shmem_recalc_inode(inode);
 990                spin_unlock_irq(&info->lock);
 991        }
 992        generic_fillattr(inode, stat);
 993
 994        if (is_huge_enabled(sb_info))
 995                stat->blksize = HPAGE_PMD_SIZE;
 996
 997        return 0;
 998}
 999
1000static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1001{
1002        struct inode *inode = d_inode(dentry);
1003        struct shmem_inode_info *info = SHMEM_I(inode);
1004        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1005        int error;
1006
1007        error = setattr_prepare(dentry, attr);
1008        if (error)
1009                return error;
1010
1011        if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1012                loff_t oldsize = inode->i_size;
1013                loff_t newsize = attr->ia_size;
1014
1015                /* protected by i_mutex */
1016                if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1017                    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1018                        return -EPERM;
1019
1020                if (newsize != oldsize) {
1021                        error = shmem_reacct_size(SHMEM_I(inode)->flags,
1022                                        oldsize, newsize);
1023                        if (error)
1024                                return error;
1025                        i_size_write(inode, newsize);
1026                        inode->i_ctime = inode->i_mtime = current_time(inode);
1027                }
1028                if (newsize <= oldsize) {
1029                        loff_t holebegin = round_up(newsize, PAGE_SIZE);
1030                        if (oldsize > holebegin)
1031                                unmap_mapping_range(inode->i_mapping,
1032                                                        holebegin, 0, 1);
1033                        if (info->alloced)
1034                                shmem_truncate_range(inode,
1035                                                        newsize, (loff_t)-1);
1036                        /* unmap again to remove racily COWed private pages */
1037                        if (oldsize > holebegin)
1038                                unmap_mapping_range(inode->i_mapping,
1039                                                        holebegin, 0, 1);
1040
1041                        /*
1042                         * Part of the huge page can be beyond i_size: subject
1043                         * to shrink under memory pressure.
1044                         */
1045                        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1046                                spin_lock(&sbinfo->shrinklist_lock);
1047                                /*
1048                                 * _careful to defend against unlocked access to
1049                                 * ->shrink_list in shmem_unused_huge_shrink()
1050                                 */
1051                                if (list_empty_careful(&info->shrinklist)) {
1052                                        list_add_tail(&info->shrinklist,
1053                                                        &sbinfo->shrinklist);
1054                                        sbinfo->shrinklist_len++;
1055                                }
1056                                spin_unlock(&sbinfo->shrinklist_lock);
1057                        }
1058                }
1059        }
1060
1061        setattr_copy(inode, attr);
1062        if (attr->ia_valid & ATTR_MODE)
1063                error = posix_acl_chmod(inode, inode->i_mode);
1064        return error;
1065}
1066
1067static void shmem_evict_inode(struct inode *inode)
1068{
1069        struct shmem_inode_info *info = SHMEM_I(inode);
1070        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1071
1072        if (inode->i_mapping->a_ops == &shmem_aops) {
1073                shmem_unacct_size(info->flags, inode->i_size);
1074                inode->i_size = 0;
1075                shmem_truncate_range(inode, 0, (loff_t)-1);
1076                if (!list_empty(&info->shrinklist)) {
1077                        spin_lock(&sbinfo->shrinklist_lock);
1078                        if (!list_empty(&info->shrinklist)) {
1079                                list_del_init(&info->shrinklist);
1080                                sbinfo->shrinklist_len--;
1081                        }
1082                        spin_unlock(&sbinfo->shrinklist_lock);
1083                }
1084                while (!list_empty(&info->swaplist)) {
1085                        /* Wait while shmem_unuse() is scanning this inode... */
1086                        wait_var_event(&info->stop_eviction,
1087                                       !atomic_read(&info->stop_eviction));
1088                        mutex_lock(&shmem_swaplist_mutex);
1089                        /* ...but beware of the race if we peeked too early */
1090                        if (!atomic_read(&info->stop_eviction))
1091                                list_del_init(&info->swaplist);
1092                        mutex_unlock(&shmem_swaplist_mutex);
1093                }
1094        }
1095
1096        simple_xattrs_free(&info->xattrs);
1097        WARN_ON(inode->i_blocks);
1098        shmem_free_inode(inode->i_sb);
1099        clear_inode(inode);
1100}
1101
1102extern struct swap_info_struct *swap_info[];
1103
1104static int shmem_find_swap_entries(struct address_space *mapping,
1105                                   pgoff_t start, unsigned int nr_entries,
1106                                   struct page **entries, pgoff_t *indices,
1107                                   unsigned int type, bool frontswap)
1108{
1109        XA_STATE(xas, &mapping->i_pages, start);
1110        struct page *page;
1111        swp_entry_t entry;
1112        unsigned int ret = 0;
1113
1114        if (!nr_entries)
1115                return 0;
1116
1117        rcu_read_lock();
1118        xas_for_each(&xas, page, ULONG_MAX) {
1119                if (xas_retry(&xas, page))
1120                        continue;
1121
1122                if (!xa_is_value(page))
1123                        continue;
1124
1125                entry = radix_to_swp_entry(page);
1126                if (swp_type(entry) != type)
1127                        continue;
1128                if (frontswap &&
1129                    !frontswap_test(swap_info[type], swp_offset(entry)))
1130                        continue;
1131
1132                indices[ret] = xas.xa_index;
1133                entries[ret] = page;
1134
1135                if (need_resched()) {
1136                        xas_pause(&xas);
1137                        cond_resched_rcu();
1138                }
1139                if (++ret == nr_entries)
1140                        break;
1141        }
1142        rcu_read_unlock();
1143
1144        return ret;
1145}
1146
1147/*
1148 * Move the swapped pages for an inode to page cache. Returns the count
1149 * of pages swapped in, or the error in case of failure.
1150 */
1151static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1152                                    pgoff_t *indices)
1153{
1154        int i = 0;
1155        int ret = 0;
1156        int error = 0;
1157        struct address_space *mapping = inode->i_mapping;
1158
1159        for (i = 0; i < pvec.nr; i++) {
1160                struct page *page = pvec.pages[i];
1161
1162                if (!xa_is_value(page))
1163                        continue;
1164                error = shmem_swapin_page(inode, indices[i],
1165                                          &page, SGP_CACHE,
1166                                          mapping_gfp_mask(mapping),
1167                                          NULL, NULL);
1168                if (error == 0) {
1169                        unlock_page(page);
1170                        put_page(page);
1171                        ret++;
1172                }
1173                if (error == -ENOMEM)
1174                        break;
1175                error = 0;
1176        }
1177        return error ? error : ret;
1178}
1179
1180/*
1181 * If swap found in inode, free it and move page from swapcache to filecache.
1182 */
1183static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1184                             bool frontswap, unsigned long *fs_pages_to_unuse)
1185{
1186        struct address_space *mapping = inode->i_mapping;
1187        pgoff_t start = 0;
1188        struct pagevec pvec;
1189        pgoff_t indices[PAGEVEC_SIZE];
1190        bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1191        int ret = 0;
1192
1193        pagevec_init(&pvec);
1194        do {
1195                unsigned int nr_entries = PAGEVEC_SIZE;
1196
1197                if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1198                        nr_entries = *fs_pages_to_unuse;
1199
1200                pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1201                                                  pvec.pages, indices,
1202                                                  type, frontswap);
1203                if (pvec.nr == 0) {
1204                        ret = 0;
1205                        break;
1206                }
1207
1208                ret = shmem_unuse_swap_entries(inode, pvec, indices);
1209                if (ret < 0)
1210                        break;
1211
1212                if (frontswap_partial) {
1213                        *fs_pages_to_unuse -= ret;
1214                        if (*fs_pages_to_unuse == 0) {
1215                                ret = FRONTSWAP_PAGES_UNUSED;
1216                                break;
1217                        }
1218                }
1219
1220                start = indices[pvec.nr - 1];
1221        } while (true);
1222
1223        return ret;
1224}
1225
1226/*
1227 * Read all the shared memory data that resides in the swap
1228 * device 'type' back into memory, so the swap device can be
1229 * unused.
1230 */
1231int shmem_unuse(unsigned int type, bool frontswap,
1232                unsigned long *fs_pages_to_unuse)
1233{
1234        struct shmem_inode_info *info, *next;
1235        int error = 0;
1236
1237        if (list_empty(&shmem_swaplist))
1238                return 0;
1239
1240        mutex_lock(&shmem_swaplist_mutex);
1241        list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1242                if (!info->swapped) {
1243                        list_del_init(&info->swaplist);
1244                        continue;
1245                }
1246                /*
1247                 * Drop the swaplist mutex while searching the inode for swap;
1248                 * but before doing so, make sure shmem_evict_inode() will not
1249                 * remove placeholder inode from swaplist, nor let it be freed
1250                 * (igrab() would protect from unlink, but not from unmount).
1251                 */
1252                atomic_inc(&info->stop_eviction);
1253                mutex_unlock(&shmem_swaplist_mutex);
1254
1255                error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1256                                          fs_pages_to_unuse);
1257                cond_resched();
1258
1259                mutex_lock(&shmem_swaplist_mutex);
1260                next = list_next_entry(info, swaplist);
1261                if (!info->swapped)
1262                        list_del_init(&info->swaplist);
1263                if (atomic_dec_and_test(&info->stop_eviction))
1264                        wake_up_var(&info->stop_eviction);
1265                if (error)
1266                        break;
1267        }
1268        mutex_unlock(&shmem_swaplist_mutex);
1269
1270        return error;
1271}
1272
1273/*
1274 * Move the page from the page cache to the swap cache.
1275 */
1276static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1277{
1278        struct shmem_inode_info *info;
1279        struct address_space *mapping;
1280        struct inode *inode;
1281        swp_entry_t swap;
1282        pgoff_t index;
1283
1284        VM_BUG_ON_PAGE(PageCompound(page), page);
1285        BUG_ON(!PageLocked(page));
1286        mapping = page->mapping;
1287        index = page->index;
1288        inode = mapping->host;
1289        info = SHMEM_I(inode);
1290        if (info->flags & VM_LOCKED)
1291                goto redirty;
1292        if (!total_swap_pages)
1293                goto redirty;
1294
1295        /*
1296         * Our capabilities prevent regular writeback or sync from ever calling
1297         * shmem_writepage; but a stacking filesystem might use ->writepage of
1298         * its underlying filesystem, in which case tmpfs should write out to
1299         * swap only in response to memory pressure, and not for the writeback
1300         * threads or sync.
1301         */
1302        if (!wbc->for_reclaim) {
1303                WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
1304                goto redirty;
1305        }
1306
1307        /*
1308         * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1309         * value into swapfile.c, the only way we can correctly account for a
1310         * fallocated page arriving here is now to initialize it and write it.
1311         *
1312         * That's okay for a page already fallocated earlier, but if we have
1313         * not yet completed the fallocation, then (a) we want to keep track
1314         * of this page in case we have to undo it, and (b) it may not be a
1315         * good idea to continue anyway, once we're pushing into swap.  So
1316         * reactivate the page, and let shmem_fallocate() quit when too many.
1317         */
1318        if (!PageUptodate(page)) {
1319                if (inode->i_private) {
1320                        struct shmem_falloc *shmem_falloc;
1321                        spin_lock(&inode->i_lock);
1322                        shmem_falloc = inode->i_private;
1323                        if (shmem_falloc &&
1324                            !shmem_falloc->waitq &&
1325                            index >= shmem_falloc->start &&
1326                            index < shmem_falloc->next)
1327                                shmem_falloc->nr_unswapped++;
1328                        else
1329                                shmem_falloc = NULL;
1330                        spin_unlock(&inode->i_lock);
1331                        if (shmem_falloc)
1332                                goto redirty;
1333                }
1334                clear_highpage(page);
1335                flush_dcache_page(page);
1336                SetPageUptodate(page);
1337        }
1338
1339        swap = get_swap_page(page);
1340        if (!swap.val)
1341                goto redirty;
1342
1343        /*
1344         * Add inode to shmem_unuse()'s list of swapped-out inodes,
1345         * if it's not already there.  Do it now before the page is
1346         * moved to swap cache, when its pagelock no longer protects
1347         * the inode from eviction.  But don't unlock the mutex until
1348         * we've incremented swapped, because shmem_unuse_inode() will
1349         * prune a !swapped inode from the swaplist under this mutex.
1350         */
1351        mutex_lock(&shmem_swaplist_mutex);
1352        if (list_empty(&info->swaplist))
1353                list_add(&info->swaplist, &shmem_swaplist);
1354
1355        if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1356                spin_lock_irq(&info->lock);
1357                shmem_recalc_inode(inode);
1358                info->swapped++;
1359                spin_unlock_irq(&info->lock);
1360
1361                swap_shmem_alloc(swap);
1362                shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1363
1364                mutex_unlock(&shmem_swaplist_mutex);
1365                BUG_ON(page_mapped(page));
1366                swap_writepage(page, wbc);
1367                return 0;
1368        }
1369
1370        mutex_unlock(&shmem_swaplist_mutex);
1371        put_swap_page(page, swap);
1372redirty:
1373        set_page_dirty(page);
1374        if (wbc->for_reclaim)
1375                return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1376        unlock_page(page);
1377        return 0;
1378}
1379
1380#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1381static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1382{
1383        char buffer[64];
1384
1385        if (!mpol || mpol->mode == MPOL_DEFAULT)
1386                return;         /* show nothing */
1387
1388        mpol_to_str(buffer, sizeof(buffer), mpol);
1389
1390        seq_printf(seq, ",mpol=%s", buffer);
1391}
1392
1393static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1394{
1395        struct mempolicy *mpol = NULL;
1396        if (sbinfo->mpol) {
1397                spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1398                mpol = sbinfo->mpol;
1399                mpol_get(mpol);
1400                spin_unlock(&sbinfo->stat_lock);
1401        }
1402        return mpol;
1403}
1404#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1405static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1406{
1407}
1408static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1409{
1410        return NULL;
1411}
1412#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1413#ifndef CONFIG_NUMA
1414#define vm_policy vm_private_data
1415#endif
1416
1417static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1418                struct shmem_inode_info *info, pgoff_t index)
1419{
1420        /* Create a pseudo vma that just contains the policy */
1421        vma_init(vma, NULL);
1422        /* Bias interleave by inode number to distribute better across nodes */
1423        vma->vm_pgoff = index + info->vfs_inode.i_ino;
1424        vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1425}
1426
1427static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1428{
1429        /* Drop reference taken by mpol_shared_policy_lookup() */
1430        mpol_cond_put(vma->vm_policy);
1431}
1432
1433static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1434                        struct shmem_inode_info *info, pgoff_t index)
1435{
1436        struct vm_area_struct pvma;
1437        struct page *page;
1438        struct vm_fault vmf;
1439
1440        shmem_pseudo_vma_init(&pvma, info, index);
1441        vmf.vma = &pvma;
1442        vmf.address = 0;
1443        page = swap_cluster_readahead(swap, gfp, &vmf);
1444        shmem_pseudo_vma_destroy(&pvma);
1445
1446        return page;
1447}
1448
1449static struct page *shmem_alloc_hugepage(gfp_t gfp,
1450                struct shmem_inode_info *info, pgoff_t index)
1451{
1452        struct vm_area_struct pvma;
1453        struct address_space *mapping = info->vfs_inode.i_mapping;
1454        pgoff_t hindex;
1455        struct page *page;
1456
1457        if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1458                return NULL;
1459
1460        hindex = round_down(index, HPAGE_PMD_NR);
1461        if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1462                                                                XA_PRESENT))
1463                return NULL;
1464
1465        shmem_pseudo_vma_init(&pvma, info, hindex);
1466        page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1467                        HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1468        shmem_pseudo_vma_destroy(&pvma);
1469        if (page)
1470                prep_transhuge_page(page);
1471        return page;
1472}
1473
1474static struct page *shmem_alloc_page(gfp_t gfp,
1475                        struct shmem_inode_info *info, pgoff_t index)
1476{
1477        struct vm_area_struct pvma;
1478        struct page *page;
1479
1480        shmem_pseudo_vma_init(&pvma, info, index);
1481        page = alloc_page_vma(gfp, &pvma, 0);
1482        shmem_pseudo_vma_destroy(&pvma);
1483
1484        return page;
1485}
1486
1487static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1488                struct inode *inode,
1489                pgoff_t index, bool huge)
1490{
1491        struct shmem_inode_info *info = SHMEM_I(inode);
1492        struct page *page;
1493        int nr;
1494        int err = -ENOSPC;
1495
1496        if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1497                huge = false;
1498        nr = huge ? HPAGE_PMD_NR : 1;
1499
1500        if (!shmem_inode_acct_block(inode, nr))
1501                goto failed;
1502
1503        if (huge)
1504                page = shmem_alloc_hugepage(gfp, info, index);
1505        else
1506                page = shmem_alloc_page(gfp, info, index);
1507        if (page) {
1508                __SetPageLocked(page);
1509                __SetPageSwapBacked(page);
1510                return page;
1511        }
1512
1513        err = -ENOMEM;
1514        shmem_inode_unacct_blocks(inode, nr);
1515failed:
1516        return ERR_PTR(err);
1517}
1518
1519/*
1520 * When a page is moved from swapcache to shmem filecache (either by the
1521 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1522 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1523 * ignorance of the mapping it belongs to.  If that mapping has special
1524 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1525 * we may need to copy to a suitable page before moving to filecache.
1526 *
1527 * In a future release, this may well be extended to respect cpuset and
1528 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1529 * but for now it is a simple matter of zone.
1530 */
1531static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1532{
1533        return page_zonenum(page) > gfp_zone(gfp);
1534}
1535
1536static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1537                                struct shmem_inode_info *info, pgoff_t index)
1538{
1539        struct page *oldpage, *newpage;
1540        struct address_space *swap_mapping;
1541        swp_entry_t entry;
1542        pgoff_t swap_index;
1543        int error;
1544
1545        oldpage = *pagep;
1546        entry.val = page_private(oldpage);
1547        swap_index = swp_offset(entry);
1548        swap_mapping = page_mapping(oldpage);
1549
1550        /*
1551         * We have arrived here because our zones are constrained, so don't
1552         * limit chance of success by further cpuset and node constraints.
1553         */
1554        gfp &= ~GFP_CONSTRAINT_MASK;
1555        newpage = shmem_alloc_page(gfp, info, index);
1556        if (!newpage)
1557                return -ENOMEM;
1558
1559        get_page(newpage);
1560        copy_highpage(newpage, oldpage);
1561        flush_dcache_page(newpage);
1562
1563        __SetPageLocked(newpage);
1564        __SetPageSwapBacked(newpage);
1565        SetPageUptodate(newpage);
1566        set_page_private(newpage, entry.val);
1567        SetPageSwapCache(newpage);
1568
1569        /*
1570         * Our caller will very soon move newpage out of swapcache, but it's
1571         * a nice clean interface for us to replace oldpage by newpage there.
1572         */
1573        xa_lock_irq(&swap_mapping->i_pages);
1574        error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1575        if (!error) {
1576                __inc_node_page_state(newpage, NR_FILE_PAGES);
1577                __dec_node_page_state(oldpage, NR_FILE_PAGES);
1578        }
1579        xa_unlock_irq(&swap_mapping->i_pages);
1580
1581        if (unlikely(error)) {
1582                /*
1583                 * Is this possible?  I think not, now that our callers check
1584                 * both PageSwapCache and page_private after getting page lock;
1585                 * but be defensive.  Reverse old to newpage for clear and free.
1586                 */
1587                oldpage = newpage;
1588        } else {
1589                mem_cgroup_migrate(oldpage, newpage);
1590                lru_cache_add_anon(newpage);
1591                *pagep = newpage;
1592        }
1593
1594        ClearPageSwapCache(oldpage);
1595        set_page_private(oldpage, 0);
1596
1597        unlock_page(oldpage);
1598        put_page(oldpage);
1599        put_page(oldpage);
1600        return error;
1601}
1602
1603/*
1604 * Swap in the page pointed to by *pagep.
1605 * Caller has to make sure that *pagep contains a valid swapped page.
1606 * Returns 0 and the page in pagep if success. On failure, returns the
1607 * the error code and NULL in *pagep.
1608 */
1609static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1610                             struct page **pagep, enum sgp_type sgp,
1611                             gfp_t gfp, struct vm_area_struct *vma,
1612                             vm_fault_t *fault_type)
1613{
1614        struct address_space *mapping = inode->i_mapping;
1615        struct shmem_inode_info *info = SHMEM_I(inode);
1616        struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
1617        struct mem_cgroup *memcg;
1618        struct page *page;
1619        swp_entry_t swap;
1620        int error;
1621
1622        VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1623        swap = radix_to_swp_entry(*pagep);
1624        *pagep = NULL;
1625
1626        /* Look it up and read it in.. */
1627        page = lookup_swap_cache(swap, NULL, 0);
1628        if (!page) {
1629                /* Or update major stats only when swapin succeeds?? */
1630                if (fault_type) {
1631                        *fault_type |= VM_FAULT_MAJOR;
1632                        count_vm_event(PGMAJFAULT);
1633                        count_memcg_event_mm(charge_mm, PGMAJFAULT);
1634                }
1635                /* Here we actually start the io */
1636                page = shmem_swapin(swap, gfp, info, index);
1637                if (!page) {
1638                        error = -ENOMEM;
1639                        goto failed;
1640                }
1641        }
1642
1643        /* We have to do this with page locked to prevent races */
1644        lock_page(page);
1645        if (!PageSwapCache(page) || page_private(page) != swap.val ||
1646            !shmem_confirm_swap(mapping, index, swap)) {
1647                error = -EEXIST;
1648                goto unlock;
1649        }
1650        if (!PageUptodate(page)) {
1651                error = -EIO;
1652                goto failed;
1653        }
1654        wait_on_page_writeback(page);
1655
1656        if (shmem_should_replace_page(page, gfp)) {
1657                error = shmem_replace_page(&page, gfp, info, index);
1658                if (error)
1659                        goto failed;
1660        }
1661
1662        error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1663                                            false);
1664        if (!error) {
1665                error = shmem_add_to_page_cache(page, mapping, index,
1666                                                swp_to_radix_entry(swap), gfp);
1667                /*
1668                 * We already confirmed swap under page lock, and make
1669                 * no memory allocation here, so usually no possibility
1670                 * of error; but free_swap_and_cache() only trylocks a
1671                 * page, so it is just possible that the entry has been
1672                 * truncated or holepunched since swap was confirmed.
1673                 * shmem_undo_range() will have done some of the
1674                 * unaccounting, now delete_from_swap_cache() will do
1675                 * the rest.
1676                 */
1677                if (error) {
1678                        mem_cgroup_cancel_charge(page, memcg, false);
1679                        delete_from_swap_cache(page);
1680                }
1681        }
1682        if (error)
1683                goto failed;
1684
1685        mem_cgroup_commit_charge(page, memcg, true, false);
1686
1687        spin_lock_irq(&info->lock);
1688        info->swapped--;
1689        shmem_recalc_inode(inode);
1690        spin_unlock_irq(&info->lock);
1691
1692        if (sgp == SGP_WRITE)
1693                mark_page_accessed(page);
1694
1695        delete_from_swap_cache(page);
1696        set_page_dirty(page);
1697        swap_free(swap);
1698
1699        *pagep = page;
1700        return 0;
1701failed:
1702        if (!shmem_confirm_swap(mapping, index, swap))
1703                error = -EEXIST;
1704unlock:
1705        if (page) {
1706                unlock_page(page);
1707                put_page(page);
1708        }
1709
1710        return error;
1711}
1712
1713/*
1714 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1715 *
1716 * If we allocate a new one we do not mark it dirty. That's up to the
1717 * vm. If we swap it in we mark it dirty since we also free the swap
1718 * entry since a page cannot live in both the swap and page cache.
1719 *
1720 * fault_mm and fault_type are only supplied by shmem_fault:
1721 * otherwise they are NULL.
1722 */
1723static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1724        struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1725        struct vm_area_struct *vma, struct vm_fault *vmf,
1726                        vm_fault_t *fault_type)
1727{
1728        struct address_space *mapping = inode->i_mapping;
1729        struct shmem_inode_info *info = SHMEM_I(inode);
1730        struct shmem_sb_info *sbinfo;
1731        struct mm_struct *charge_mm;
1732        struct mem_cgroup *memcg;
1733        struct page *page;
1734        enum sgp_type sgp_huge = sgp;
1735        pgoff_t hindex = index;
1736        int error;
1737        int once = 0;
1738        int alloced = 0;
1739
1740        if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1741                return -EFBIG;
1742        if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1743                sgp = SGP_CACHE;
1744repeat:
1745        if (sgp <= SGP_CACHE &&
1746            ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1747                return -EINVAL;
1748        }
1749
1750        sbinfo = SHMEM_SB(inode->i_sb);
1751        charge_mm = vma ? vma->vm_mm : current->mm;
1752
1753        page = find_lock_entry(mapping, index);
1754        if (xa_is_value(page)) {
1755                error = shmem_swapin_page(inode, index, &page,
1756                                          sgp, gfp, vma, fault_type);
1757                if (error == -EEXIST)
1758                        goto repeat;
1759
1760                *pagep = page;
1761                return error;
1762        }
1763
1764        if (page && sgp == SGP_WRITE)
1765                mark_page_accessed(page);
1766
1767        /* fallocated page? */
1768        if (page && !PageUptodate(page)) {
1769                if (sgp != SGP_READ)
1770                        goto clear;
1771                unlock_page(page);
1772                put_page(page);
1773                page = NULL;
1774        }
1775        if (page || sgp == SGP_READ) {
1776                *pagep = page;
1777                return 0;
1778        }
1779
1780        /*
1781         * Fast cache lookup did not find it:
1782         * bring it back from swap or allocate.
1783         */
1784
1785        if (vma && userfaultfd_missing(vma)) {
1786                *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1787                return 0;
1788        }
1789
1790        /* shmem_symlink() */
1791        if (mapping->a_ops != &shmem_aops)
1792                goto alloc_nohuge;
1793        if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1794                goto alloc_nohuge;
1795        if (shmem_huge == SHMEM_HUGE_FORCE)
1796                goto alloc_huge;
1797        switch (sbinfo->huge) {
1798                loff_t i_size;
1799                pgoff_t off;
1800        case SHMEM_HUGE_NEVER:
1801                goto alloc_nohuge;
1802        case SHMEM_HUGE_WITHIN_SIZE:
1803                off = round_up(index, HPAGE_PMD_NR);
1804                i_size = round_up(i_size_read(inode), PAGE_SIZE);
1805                if (i_size >= HPAGE_PMD_SIZE &&
1806                    i_size >> PAGE_SHIFT >= off)
1807                        goto alloc_huge;
1808                /* fallthrough */
1809        case SHMEM_HUGE_ADVISE:
1810                if (sgp_huge == SGP_HUGE)
1811                        goto alloc_huge;
1812                /* TODO: implement fadvise() hints */
1813                goto alloc_nohuge;
1814        }
1815
1816alloc_huge:
1817        page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1818        if (IS_ERR(page)) {
1819alloc_nohuge:
1820                page = shmem_alloc_and_acct_page(gfp, inode,
1821                                                 index, false);
1822        }
1823        if (IS_ERR(page)) {
1824                int retry = 5;
1825
1826                error = PTR_ERR(page);
1827                page = NULL;
1828                if (error != -ENOSPC)
1829                        goto unlock;
1830                /*
1831                 * Try to reclaim some space by splitting a huge page
1832                 * beyond i_size on the filesystem.
1833                 */
1834                while (retry--) {
1835                        int ret;
1836
1837                        ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1838                        if (ret == SHRINK_STOP)
1839                                break;
1840                        if (ret)
1841                                goto alloc_nohuge;
1842                }
1843                goto unlock;
1844        }
1845
1846        if (PageTransHuge(page))
1847                hindex = round_down(index, HPAGE_PMD_NR);
1848        else
1849                hindex = index;
1850
1851        if (sgp == SGP_WRITE)
1852                __SetPageReferenced(page);
1853
1854        error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1855                                            PageTransHuge(page));
1856        if (error)
1857                goto unacct;
1858        error = shmem_add_to_page_cache(page, mapping, hindex,
1859                                        NULL, gfp & GFP_RECLAIM_MASK);
1860        if (error) {
1861                mem_cgroup_cancel_charge(page, memcg,
1862                                         PageTransHuge(page));
1863                goto unacct;
1864        }
1865        mem_cgroup_commit_charge(page, memcg, false,
1866                                 PageTransHuge(page));
1867        lru_cache_add_anon(page);
1868
1869        spin_lock_irq(&info->lock);
1870        info->alloced += 1 << compound_order(page);
1871        inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1872        shmem_recalc_inode(inode);
1873        spin_unlock_irq(&info->lock);
1874        alloced = true;
1875
1876        if (PageTransHuge(page) &&
1877            DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1878                        hindex + HPAGE_PMD_NR - 1) {
1879                /*
1880                 * Part of the huge page is beyond i_size: subject
1881                 * to shrink under memory pressure.
1882                 */
1883                spin_lock(&sbinfo->shrinklist_lock);
1884                /*
1885                 * _careful to defend against unlocked access to
1886                 * ->shrink_list in shmem_unused_huge_shrink()
1887                 */
1888                if (list_empty_careful(&info->shrinklist)) {
1889                        list_add_tail(&info->shrinklist,
1890                                      &sbinfo->shrinklist);
1891                        sbinfo->shrinklist_len++;
1892                }
1893                spin_unlock(&sbinfo->shrinklist_lock);
1894        }
1895
1896        /*
1897         * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1898         */
1899        if (sgp == SGP_FALLOC)
1900                sgp = SGP_WRITE;
1901clear:
1902        /*
1903         * Let SGP_WRITE caller clear ends if write does not fill page;
1904         * but SGP_FALLOC on a page fallocated earlier must initialize
1905         * it now, lest undo on failure cancel our earlier guarantee.
1906         */
1907        if (sgp != SGP_WRITE && !PageUptodate(page)) {
1908                struct page *head = compound_head(page);
1909                int i;
1910
1911                for (i = 0; i < (1 << compound_order(head)); i++) {
1912                        clear_highpage(head + i);
1913                        flush_dcache_page(head + i);
1914                }
1915                SetPageUptodate(head);
1916        }
1917
1918        /* Perhaps the file has been truncated since we checked */
1919        if (sgp <= SGP_CACHE &&
1920            ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1921                if (alloced) {
1922                        ClearPageDirty(page);
1923                        delete_from_page_cache(page);
1924                        spin_lock_irq(&info->lock);
1925                        shmem_recalc_inode(inode);
1926                        spin_unlock_irq(&info->lock);
1927                }
1928                error = -EINVAL;
1929                goto unlock;
1930        }
1931        *pagep = page + index - hindex;
1932        return 0;
1933
1934        /*
1935         * Error recovery.
1936         */
1937unacct:
1938        shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
1939
1940        if (PageTransHuge(page)) {
1941                unlock_page(page);
1942                put_page(page);
1943                goto alloc_nohuge;
1944        }
1945unlock:
1946        if (page) {
1947                unlock_page(page);
1948                put_page(page);
1949        }
1950        if (error == -ENOSPC && !once++) {
1951                spin_lock_irq(&info->lock);
1952                shmem_recalc_inode(inode);
1953                spin_unlock_irq(&info->lock);
1954                goto repeat;
1955        }
1956        if (error == -EEXIST)
1957                goto repeat;
1958        return error;
1959}
1960
1961/*
1962 * This is like autoremove_wake_function, but it removes the wait queue
1963 * entry unconditionally - even if something else had already woken the
1964 * target.
1965 */
1966static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1967{
1968        int ret = default_wake_function(wait, mode, sync, key);
1969        list_del_init(&wait->entry);
1970        return ret;
1971}
1972
1973static vm_fault_t shmem_fault(struct vm_fault *vmf)
1974{
1975        struct vm_area_struct *vma = vmf->vma;
1976        struct inode *inode = file_inode(vma->vm_file);
1977        gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1978        enum sgp_type sgp;
1979        int err;
1980        vm_fault_t ret = VM_FAULT_LOCKED;
1981
1982        /*
1983         * Trinity finds that probing a hole which tmpfs is punching can
1984         * prevent the hole-punch from ever completing: which in turn
1985         * locks writers out with its hold on i_mutex.  So refrain from
1986         * faulting pages into the hole while it's being punched.  Although
1987         * shmem_undo_range() does remove the additions, it may be unable to
1988         * keep up, as each new page needs its own unmap_mapping_range() call,
1989         * and the i_mmap tree grows ever slower to scan if new vmas are added.
1990         *
1991         * It does not matter if we sometimes reach this check just before the
1992         * hole-punch begins, so that one fault then races with the punch:
1993         * we just need to make racing faults a rare case.
1994         *
1995         * The implementation below would be much simpler if we just used a
1996         * standard mutex or completion: but we cannot take i_mutex in fault,
1997         * and bloating every shmem inode for this unlikely case would be sad.
1998         */
1999        if (unlikely(inode->i_private)) {
2000                struct shmem_falloc *shmem_falloc;
2001
2002                spin_lock(&inode->i_lock);
2003                shmem_falloc = inode->i_private;
2004                if (shmem_falloc &&
2005                    shmem_falloc->waitq &&
2006                    vmf->pgoff >= shmem_falloc->start &&
2007                    vmf->pgoff < shmem_falloc->next) {
2008                        wait_queue_head_t *shmem_falloc_waitq;
2009                        DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2010
2011                        ret = VM_FAULT_NOPAGE;
2012                        if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
2013                           !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
2014                                /* It's polite to up mmap_sem if we can */
2015                                up_read(&vma->vm_mm->mmap_sem);
2016                                ret = VM_FAULT_RETRY;
2017                        }
2018
2019                        shmem_falloc_waitq = shmem_falloc->waitq;
2020                        prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2021                                        TASK_UNINTERRUPTIBLE);
2022                        spin_unlock(&inode->i_lock);
2023                        schedule();
2024
2025                        /*
2026                         * shmem_falloc_waitq points into the shmem_fallocate()
2027                         * stack of the hole-punching task: shmem_falloc_waitq
2028                         * is usually invalid by the time we reach here, but
2029                         * finish_wait() does not dereference it in that case;
2030                         * though i_lock needed lest racing with wake_up_all().
2031                         */
2032                        spin_lock(&inode->i_lock);
2033                        finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2034                        spin_unlock(&inode->i_lock);
2035                        return ret;
2036                }
2037                spin_unlock(&inode->i_lock);
2038        }
2039
2040        sgp = SGP_CACHE;
2041
2042        if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2043            test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2044                sgp = SGP_NOHUGE;
2045        else if (vma->vm_flags & VM_HUGEPAGE)
2046                sgp = SGP_HUGE;
2047
2048        err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2049                                  gfp, vma, vmf, &ret);
2050        if (err)
2051                return vmf_error(err);
2052        return ret;
2053}
2054
2055unsigned long shmem_get_unmapped_area(struct file *file,
2056                                      unsigned long uaddr, unsigned long len,
2057                                      unsigned long pgoff, unsigned long flags)
2058{
2059        unsigned long (*get_area)(struct file *,
2060                unsigned long, unsigned long, unsigned long, unsigned long);
2061        unsigned long addr;
2062        unsigned long offset;
2063        unsigned long inflated_len;
2064        unsigned long inflated_addr;
2065        unsigned long inflated_offset;
2066
2067        if (len > TASK_SIZE)
2068                return -ENOMEM;
2069
2070        get_area = current->mm->get_unmapped_area;
2071        addr = get_area(file, uaddr, len, pgoff, flags);
2072
2073        if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2074                return addr;
2075        if (IS_ERR_VALUE(addr))
2076                return addr;
2077        if (addr & ~PAGE_MASK)
2078                return addr;
2079        if (addr > TASK_SIZE - len)
2080                return addr;
2081
2082        if (shmem_huge == SHMEM_HUGE_DENY)
2083                return addr;
2084        if (len < HPAGE_PMD_SIZE)
2085                return addr;
2086        if (flags & MAP_FIXED)
2087                return addr;
2088        /*
2089         * Our priority is to support MAP_SHARED mapped hugely;
2090         * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2091         * But if caller specified an address hint, respect that as before.
2092         */
2093        if (uaddr)
2094                return addr;
2095
2096        if (shmem_huge != SHMEM_HUGE_FORCE) {
2097                struct super_block *sb;
2098
2099                if (file) {
2100                        VM_BUG_ON(file->f_op != &shmem_file_operations);
2101                        sb = file_inode(file)->i_sb;
2102                } else {
2103                        /*
2104                         * Called directly from mm/mmap.c, or drivers/char/mem.c
2105                         * for "/dev/zero", to create a shared anonymous object.
2106                         */
2107                        if (IS_ERR(shm_mnt))
2108                                return addr;
2109                        sb = shm_mnt->mnt_sb;
2110                }
2111                if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2112                        return addr;
2113        }
2114
2115        offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2116        if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2117                return addr;
2118        if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2119                return addr;
2120
2121        inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2122        if (inflated_len > TASK_SIZE)
2123                return addr;
2124        if (inflated_len < len)
2125                return addr;
2126
2127        inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2128        if (IS_ERR_VALUE(inflated_addr))
2129                return addr;
2130        if (inflated_addr & ~PAGE_MASK)
2131                return addr;
2132
2133        inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2134        inflated_addr += offset - inflated_offset;
2135        if (inflated_offset > offset)
2136                inflated_addr += HPAGE_PMD_SIZE;
2137
2138        if (inflated_addr > TASK_SIZE - len)
2139                return addr;
2140        return inflated_addr;
2141}
2142
2143#ifdef CONFIG_NUMA
2144static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2145{
2146        struct inode *inode = file_inode(vma->vm_file);
2147        return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2148}
2149
2150static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2151                                          unsigned long addr)
2152{
2153        struct inode *inode = file_inode(vma->vm_file);
2154        pgoff_t index;
2155
2156        index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2157        return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2158}
2159#endif
2160
2161int shmem_lock(struct file *file, int lock, struct user_struct *user)
2162{
2163        struct inode *inode = file_inode(file);
2164        struct shmem_inode_info *info = SHMEM_I(inode);
2165        int retval = -ENOMEM;
2166
2167        spin_lock_irq(&info->lock);
2168        if (lock && !(info->flags & VM_LOCKED)) {
2169                if (!user_shm_lock(inode->i_size, user))
2170                        goto out_nomem;
2171                info->flags |= VM_LOCKED;
2172                mapping_set_unevictable(file->f_mapping);
2173        }
2174        if (!lock && (info->flags & VM_LOCKED) && user) {
2175                user_shm_unlock(inode->i_size, user);
2176                info->flags &= ~VM_LOCKED;
2177                mapping_clear_unevictable(file->f_mapping);
2178        }
2179        retval = 0;
2180
2181out_nomem:
2182        spin_unlock_irq(&info->lock);
2183        return retval;
2184}
2185
2186static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2187{
2188        struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2189
2190        if (info->seals & F_SEAL_FUTURE_WRITE) {
2191                /*
2192                 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2193                 * "future write" seal active.
2194                 */
2195                if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2196                        return -EPERM;
2197
2198                /*
2199                 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
2200                 * read-only mapping, take care to not allow mprotect to revert
2201                 * protections.
2202                 */
2203                vma->vm_flags &= ~(VM_MAYWRITE);
2204        }
2205
2206        file_accessed(file);
2207        vma->vm_ops = &shmem_vm_ops;
2208        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2209                        ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2210                        (vma->vm_end & HPAGE_PMD_MASK)) {
2211                khugepaged_enter(vma, vma->vm_flags);
2212        }
2213        return 0;
2214}
2215
2216static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2217                                     umode_t mode, dev_t dev, unsigned long flags)
2218{
2219        struct inode *inode;
2220        struct shmem_inode_info *info;
2221        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2222
2223        if (shmem_reserve_inode(sb))
2224                return NULL;
2225
2226        inode = new_inode(sb);
2227        if (inode) {
2228                inode->i_ino = get_next_ino();
2229                inode_init_owner(inode, dir, mode);
2230                inode->i_blocks = 0;
2231                inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2232                inode->i_generation = prandom_u32();
2233                info = SHMEM_I(inode);
2234                memset(info, 0, (char *)inode - (char *)info);
2235                spin_lock_init(&info->lock);
2236                atomic_set(&info->stop_eviction, 0);
2237                info->seals = F_SEAL_SEAL;
2238                info->flags = flags & VM_NORESERVE;
2239                INIT_LIST_HEAD(&info->shrinklist);
2240                INIT_LIST_HEAD(&info->swaplist);
2241                simple_xattrs_init(&info->xattrs);
2242                cache_no_acl(inode);
2243
2244                switch (mode & S_IFMT) {
2245                default:
2246                        inode->i_op = &shmem_special_inode_operations;
2247                        init_special_inode(inode, mode, dev);
2248                        break;
2249                case S_IFREG:
2250                        inode->i_mapping->a_ops = &shmem_aops;
2251                        inode->i_op = &shmem_inode_operations;
2252                        inode->i_fop = &shmem_file_operations;
2253                        mpol_shared_policy_init(&info->policy,
2254                                                 shmem_get_sbmpol(sbinfo));
2255                        break;
2256                case S_IFDIR:
2257                        inc_nlink(inode);
2258                        /* Some things misbehave if size == 0 on a directory */
2259                        inode->i_size = 2 * BOGO_DIRENT_SIZE;
2260                        inode->i_op = &shmem_dir_inode_operations;
2261                        inode->i_fop = &simple_dir_operations;
2262                        break;
2263                case S_IFLNK:
2264                        /*
2265                         * Must not load anything in the rbtree,
2266                         * mpol_free_shared_policy will not be called.
2267                         */
2268                        mpol_shared_policy_init(&info->policy, NULL);
2269                        break;
2270                }
2271
2272                lockdep_annotate_inode_mutex_key(inode);
2273        } else
2274                shmem_free_inode(sb);
2275        return inode;
2276}
2277
2278bool shmem_mapping(struct address_space *mapping)
2279{
2280        return mapping->a_ops == &shmem_aops;
2281}
2282
2283static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2284                                  pmd_t *dst_pmd,
2285                                  struct vm_area_struct *dst_vma,
2286                                  unsigned long dst_addr,
2287                                  unsigned long src_addr,
2288                                  bool zeropage,
2289                                  struct page **pagep)
2290{
2291        struct inode *inode = file_inode(dst_vma->vm_file);
2292        struct shmem_inode_info *info = SHMEM_I(inode);
2293        struct address_space *mapping = inode->i_mapping;
2294        gfp_t gfp = mapping_gfp_mask(mapping);
2295        pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2296        struct mem_cgroup *memcg;
2297        spinlock_t *ptl;
2298        void *page_kaddr;
2299        struct page *page;
2300        pte_t _dst_pte, *dst_pte;
2301        int ret;
2302        pgoff_t offset, max_off;
2303
2304        ret = -ENOMEM;
2305        if (!shmem_inode_acct_block(inode, 1))
2306                goto out;
2307
2308        if (!*pagep) {
2309                page = shmem_alloc_page(gfp, info, pgoff);
2310                if (!page)
2311                        goto out_unacct_blocks;
2312
2313                if (!zeropage) {        /* mcopy_atomic */
2314                        page_kaddr = kmap_atomic(page);
2315                        ret = copy_from_user(page_kaddr,
2316                                             (const void __user *)src_addr,
2317                                             PAGE_SIZE);
2318                        kunmap_atomic(page_kaddr);
2319
2320                        /* fallback to copy_from_user outside mmap_sem */
2321                        if (unlikely(ret)) {
2322                                *pagep = page;
2323                                shmem_inode_unacct_blocks(inode, 1);
2324                                /* don't free the page */
2325                                return -ENOENT;
2326                        }
2327                } else {                /* mfill_zeropage_atomic */
2328                        clear_highpage(page);
2329                }
2330        } else {
2331                page = *pagep;
2332                *pagep = NULL;
2333        }
2334
2335        VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2336        __SetPageLocked(page);
2337        __SetPageSwapBacked(page);
2338        __SetPageUptodate(page);
2339
2340        ret = -EFAULT;
2341        offset = linear_page_index(dst_vma, dst_addr);
2342        max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2343        if (unlikely(offset >= max_off))
2344                goto out_release;
2345
2346        ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
2347        if (ret)
2348                goto out_release;
2349
2350        ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2351                                                gfp & GFP_RECLAIM_MASK);
2352        if (ret)
2353                goto out_release_uncharge;
2354
2355        mem_cgroup_commit_charge(page, memcg, false, false);
2356
2357        _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2358        if (dst_vma->vm_flags & VM_WRITE)
2359                _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2360        else {
2361                /*
2362                 * We don't set the pte dirty if the vma has no
2363                 * VM_WRITE permission, so mark the page dirty or it
2364                 * could be freed from under us. We could do it
2365                 * unconditionally before unlock_page(), but doing it
2366                 * only if VM_WRITE is not set is faster.
2367                 */
2368                set_page_dirty(page);
2369        }
2370
2371        dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2372
2373        ret = -EFAULT;
2374        max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2375        if (unlikely(offset >= max_off))
2376                goto out_release_uncharge_unlock;
2377
2378        ret = -EEXIST;
2379        if (!pte_none(*dst_pte))
2380                goto out_release_uncharge_unlock;
2381
2382        lru_cache_add_anon(page);
2383
2384        spin_lock(&info->lock);
2385        info->alloced++;
2386        inode->i_blocks += BLOCKS_PER_PAGE;
2387        shmem_recalc_inode(inode);
2388        spin_unlock(&info->lock);
2389
2390        inc_mm_counter(dst_mm, mm_counter_file(page));
2391        page_add_file_rmap(page, false);
2392        set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2393
2394        /* No need to invalidate - it was non-present before */
2395        update_mmu_cache(dst_vma, dst_addr, dst_pte);
2396        pte_unmap_unlock(dst_pte, ptl);
2397        unlock_page(page);
2398        ret = 0;
2399out:
2400        return ret;
2401out_release_uncharge_unlock:
2402        pte_unmap_unlock(dst_pte, ptl);
2403        ClearPageDirty(page);
2404        delete_from_page_cache(page);
2405out_release_uncharge:
2406        mem_cgroup_cancel_charge(page, memcg, false);
2407out_release:
2408        unlock_page(page);
2409        put_page(page);
2410out_unacct_blocks:
2411        shmem_inode_unacct_blocks(inode, 1);
2412        goto out;
2413}
2414
2415int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2416                           pmd_t *dst_pmd,
2417                           struct vm_area_struct *dst_vma,
2418                           unsigned long dst_addr,
2419                           unsigned long src_addr,
2420                           struct page **pagep)
2421{
2422        return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2423                                      dst_addr, src_addr, false, pagep);
2424}
2425
2426int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
2427                             pmd_t *dst_pmd,
2428                             struct vm_area_struct *dst_vma,
2429                             unsigned long dst_addr)
2430{
2431        struct page *page = NULL;
2432
2433        return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2434                                      dst_addr, 0, true, &page);
2435}
2436
2437#ifdef CONFIG_TMPFS
2438static const struct inode_operations shmem_symlink_inode_operations;
2439static const struct inode_operations shmem_short_symlink_operations;
2440
2441#ifdef CONFIG_TMPFS_XATTR
2442static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2443#else
2444#define shmem_initxattrs NULL
2445#endif
2446
2447static int
2448shmem_write_begin(struct file *file, struct address_space *mapping,
2449                        loff_t pos, unsigned len, unsigned flags,
2450                        struct page **pagep, void **fsdata)
2451{
2452        struct inode *inode = mapping->host;
2453        struct shmem_inode_info *info = SHMEM_I(inode);
2454        pgoff_t index = pos >> PAGE_SHIFT;
2455
2456        /* i_mutex is held by caller */
2457        if (unlikely(info->seals & (F_SEAL_GROW |
2458                                   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2459                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2460                        return -EPERM;
2461                if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2462                        return -EPERM;
2463        }
2464
2465        return shmem_getpage(inode, index, pagep, SGP_WRITE);
2466}
2467
2468static int
2469shmem_write_end(struct file *file, struct address_space *mapping,
2470                        loff_t pos, unsigned len, unsigned copied,
2471                        struct page *page, void *fsdata)
2472{
2473        struct inode *inode = mapping->host;
2474
2475        if (pos + copied > inode->i_size)
2476                i_size_write(inode, pos + copied);
2477
2478        if (!PageUptodate(page)) {
2479                struct page *head = compound_head(page);
2480                if (PageTransCompound(page)) {
2481                        int i;
2482
2483                        for (i = 0; i < HPAGE_PMD_NR; i++) {
2484                                if (head + i == page)
2485                                        continue;
2486                                clear_highpage(head + i);
2487                                flush_dcache_page(head + i);
2488                        }
2489                }
2490                if (copied < PAGE_SIZE) {
2491                        unsigned from = pos & (PAGE_SIZE - 1);
2492                        zero_user_segments(page, 0, from,
2493                                        from + copied, PAGE_SIZE);
2494                }
2495                SetPageUptodate(head);
2496        }
2497        set_page_dirty(page);
2498        unlock_page(page);
2499        put_page(page);
2500
2501        return copied;
2502}
2503
2504static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2505{
2506        struct file *file = iocb->ki_filp;
2507        struct inode *inode = file_inode(file);
2508        struct address_space *mapping = inode->i_mapping;
2509        pgoff_t index;
2510        unsigned long offset;
2511        enum sgp_type sgp = SGP_READ;
2512        int error = 0;
2513        ssize_t retval = 0;
2514        loff_t *ppos = &iocb->ki_pos;
2515
2516        /*
2517         * Might this read be for a stacking filesystem?  Then when reading
2518         * holes of a sparse file, we actually need to allocate those pages,
2519         * and even mark them dirty, so it cannot exceed the max_blocks limit.
2520         */
2521        if (!iter_is_iovec(to))
2522                sgp = SGP_CACHE;
2523
2524        index = *ppos >> PAGE_SHIFT;
2525        offset = *ppos & ~PAGE_MASK;
2526
2527        for (;;) {
2528                struct page *page = NULL;
2529                pgoff_t end_index;
2530                unsigned long nr, ret;
2531                loff_t i_size = i_size_read(inode);
2532
2533                end_index = i_size >> PAGE_SHIFT;
2534                if (index > end_index)
2535                        break;
2536                if (index == end_index) {
2537                        nr = i_size & ~PAGE_MASK;
2538                        if (nr <= offset)
2539                                break;
2540                }
2541
2542                error = shmem_getpage(inode, index, &page, sgp);
2543                if (error) {
2544                        if (error == -EINVAL)
2545                                error = 0;
2546                        break;
2547                }
2548                if (page) {
2549                        if (sgp == SGP_CACHE)
2550                                set_page_dirty(page);
2551                        unlock_page(page);
2552                }
2553
2554                /*
2555                 * We must evaluate after, since reads (unlike writes)
2556                 * are called without i_mutex protection against truncate
2557                 */
2558                nr = PAGE_SIZE;
2559                i_size = i_size_read(inode);
2560                end_index = i_size >> PAGE_SHIFT;
2561                if (index == end_index) {
2562                        nr = i_size & ~PAGE_MASK;
2563                        if (nr <= offset) {
2564                                if (page)
2565                                        put_page(page);
2566                                break;
2567                        }
2568                }
2569                nr -= offset;
2570
2571                if (page) {
2572                        /*
2573                         * If users can be writing to this page using arbitrary
2574                         * virtual addresses, take care about potential aliasing
2575                         * before reading the page on the kernel side.
2576                         */
2577                        if (mapping_writably_mapped(mapping))
2578                                flush_dcache_page(page);
2579                        /*
2580                         * Mark the page accessed if we read the beginning.
2581                         */
2582                        if (!offset)
2583                                mark_page_accessed(page);
2584                } else {
2585                        page = ZERO_PAGE(0);
2586                        get_page(page);
2587                }
2588
2589                /*
2590                 * Ok, we have the page, and it's up-to-date, so
2591                 * now we can copy it to user space...
2592                 */
2593                ret = copy_page_to_iter(page, offset, nr, to);
2594                retval += ret;
2595                offset += ret;
2596                index += offset >> PAGE_SHIFT;
2597                offset &= ~PAGE_MASK;
2598
2599                put_page(page);
2600                if (!iov_iter_count(to))
2601                        break;
2602                if (ret < nr) {
2603                        error = -EFAULT;
2604                        break;
2605                }
2606                cond_resched();
2607        }
2608
2609        *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2610        file_accessed(file);
2611        return retval ? retval : error;
2612}
2613
2614/*
2615 * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2616 */
2617static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2618                                    pgoff_t index, pgoff_t end, int whence)
2619{
2620        struct page *page;
2621        struct pagevec pvec;
2622        pgoff_t indices[PAGEVEC_SIZE];
2623        bool done = false;
2624        int i;
2625
2626        pagevec_init(&pvec);
2627        pvec.nr = 1;            /* start small: we may be there already */
2628        while (!done) {
2629                pvec.nr = find_get_entries(mapping, index,
2630                                        pvec.nr, pvec.pages, indices);
2631                if (!pvec.nr) {
2632                        if (whence == SEEK_DATA)
2633                                index = end;
2634                        break;
2635                }
2636                for (i = 0; i < pvec.nr; i++, index++) {
2637                        if (index < indices[i]) {
2638                                if (whence == SEEK_HOLE) {
2639                                        done = true;
2640                                        break;
2641                                }
2642                                index = indices[i];
2643                        }
2644                        page = pvec.pages[i];
2645                        if (page && !xa_is_value(page)) {
2646                                if (!PageUptodate(page))
2647                                        page = NULL;
2648                        }
2649                        if (index >= end ||
2650                            (page && whence == SEEK_DATA) ||
2651                            (!page && whence == SEEK_HOLE)) {
2652                                done = true;
2653                                break;
2654                        }
2655                }
2656                pagevec_remove_exceptionals(&pvec);
2657                pagevec_release(&pvec);
2658                pvec.nr = PAGEVEC_SIZE;
2659                cond_resched();
2660        }
2661        return index;
2662}
2663
2664static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2665{
2666        struct address_space *mapping = file->f_mapping;
2667        struct inode *inode = mapping->host;
2668        pgoff_t start, end;
2669        loff_t new_offset;
2670
2671        if (whence != SEEK_DATA && whence != SEEK_HOLE)
2672                return generic_file_llseek_size(file, offset, whence,
2673                                        MAX_LFS_FILESIZE, i_size_read(inode));
2674        inode_lock(inode);
2675        /* We're holding i_mutex so we can access i_size directly */
2676
2677        if (offset < 0 || offset >= inode->i_size)
2678                offset = -ENXIO;
2679        else {
2680                start = offset >> PAGE_SHIFT;
2681                end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2682                new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2683                new_offset <<= PAGE_SHIFT;
2684                if (new_offset > offset) {
2685                        if (new_offset < inode->i_size)
2686                                offset = new_offset;
2687                        else if (whence == SEEK_DATA)
2688                                offset = -ENXIO;
2689                        else
2690                                offset = inode->i_size;
2691                }
2692        }
2693
2694        if (offset >= 0)
2695                offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2696        inode_unlock(inode);
2697        return offset;
2698}
2699
2700static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2701                                                         loff_t len)
2702{
2703        struct inode *inode = file_inode(file);
2704        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2705        struct shmem_inode_info *info = SHMEM_I(inode);
2706        struct shmem_falloc shmem_falloc;
2707        pgoff_t start, index, end;
2708        int error;
2709
2710        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2711                return -EOPNOTSUPP;
2712
2713        inode_lock(inode);
2714
2715        if (mode & FALLOC_FL_PUNCH_HOLE) {
2716                struct address_space *mapping = file->f_mapping;
2717                loff_t unmap_start = round_up(offset, PAGE_SIZE);
2718                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2719                DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2720
2721                /* protected by i_mutex */
2722                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2723                        error = -EPERM;
2724                        goto out;
2725                }
2726
2727                shmem_falloc.waitq = &shmem_falloc_waitq;
2728                shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2729                shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2730                spin_lock(&inode->i_lock);
2731                inode->i_private = &shmem_falloc;
2732                spin_unlock(&inode->i_lock);
2733
2734                if ((u64)unmap_end > (u64)unmap_start)
2735                        unmap_mapping_range(mapping, unmap_start,
2736                                            1 + unmap_end - unmap_start, 0);
2737                shmem_truncate_range(inode, offset, offset + len - 1);
2738                /* No need to unmap again: hole-punching leaves COWed pages */
2739
2740                spin_lock(&inode->i_lock);
2741                inode->i_private = NULL;
2742                wake_up_all(&shmem_falloc_waitq);
2743                WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2744                spin_unlock(&inode->i_lock);
2745                error = 0;
2746                goto out;
2747        }
2748
2749        /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2750        error = inode_newsize_ok(inode, offset + len);
2751        if (error)
2752                goto out;
2753
2754        if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2755                error = -EPERM;
2756                goto out;
2757        }
2758
2759        start = offset >> PAGE_SHIFT;
2760        end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2761        /* Try to avoid a swapstorm if len is impossible to satisfy */
2762        if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2763                error = -ENOSPC;
2764                goto out;
2765        }
2766
2767        shmem_falloc.waitq = NULL;
2768        shmem_falloc.start = start;
2769        shmem_falloc.next  = start;
2770        shmem_falloc.nr_falloced = 0;
2771        shmem_falloc.nr_unswapped = 0;
2772        spin_lock(&inode->i_lock);
2773        inode->i_private = &shmem_falloc;
2774        spin_unlock(&inode->i_lock);
2775
2776        for (index = start; index < end; index++) {
2777                struct page *page;
2778
2779                /*
2780                 * Good, the fallocate(2) manpage permits EINTR: we may have
2781                 * been interrupted because we are using up too much memory.
2782                 */
2783                if (signal_pending(current))
2784                        error = -EINTR;
2785                else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2786                        error = -ENOMEM;
2787                else
2788                        error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2789                if (error) {
2790                        /* Remove the !PageUptodate pages we added */
2791                        if (index > start) {
2792                                shmem_undo_range(inode,
2793                                    (loff_t)start << PAGE_SHIFT,
2794                                    ((loff_t)index << PAGE_SHIFT) - 1, true);
2795                        }
2796                        goto undone;
2797                }
2798
2799                /*
2800                 * Inform shmem_writepage() how far we have reached.
2801                 * No need for lock or barrier: we have the page lock.
2802                 */
2803                shmem_falloc.next++;
2804                if (!PageUptodate(page))
2805                        shmem_falloc.nr_falloced++;
2806
2807                /*
2808                 * If !PageUptodate, leave it that way so that freeable pages
2809                 * can be recognized if we need to rollback on error later.
2810                 * But set_page_dirty so that memory pressure will swap rather
2811                 * than free the pages we are allocating (and SGP_CACHE pages
2812                 * might still be clean: we now need to mark those dirty too).
2813                 */
2814                set_page_dirty(page);
2815                unlock_page(page);
2816                put_page(page);
2817                cond_resched();
2818        }
2819
2820        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2821                i_size_write(inode, offset + len);
2822        inode->i_ctime = current_time(inode);
2823undone:
2824        spin_lock(&inode->i_lock);
2825        inode->i_private = NULL;
2826        spin_unlock(&inode->i_lock);
2827out:
2828        inode_unlock(inode);
2829        return error;
2830}
2831
2832static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2833{
2834        struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2835
2836        buf->f_type = TMPFS_MAGIC;
2837        buf->f_bsize = PAGE_SIZE;
2838        buf->f_namelen = NAME_MAX;
2839        if (sbinfo->max_blocks) {
2840                buf->f_blocks = sbinfo->max_blocks;
2841                buf->f_bavail =
2842                buf->f_bfree  = sbinfo->max_blocks -
2843                                percpu_counter_sum(&sbinfo->used_blocks);
2844        }
2845        if (sbinfo->max_inodes) {
2846                buf->f_files = sbinfo->max_inodes;
2847                buf->f_ffree = sbinfo->free_inodes;
2848        }
2849        /* else leave those fields 0 like simple_statfs */
2850        return 0;
2851}
2852
2853/*
2854 * File creation. Allocate an inode, and we're done..
2855 */
2856static int
2857shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2858{
2859        struct inode *inode;
2860        int error = -ENOSPC;
2861
2862        inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2863        if (inode) {
2864                error = simple_acl_create(dir, inode);
2865                if (error)
2866                        goto out_iput;
2867                error = security_inode_init_security(inode, dir,
2868                                                     &dentry->d_name,
2869                                                     shmem_initxattrs, NULL);
2870                if (error && error != -EOPNOTSUPP)
2871                        goto out_iput;
2872
2873                error = 0;
2874                dir->i_size += BOGO_DIRENT_SIZE;
2875                dir->i_ctime = dir->i_mtime = current_time(dir);
2876                d_instantiate(dentry, inode);
2877                dget(dentry); /* Extra count - pin the dentry in core */
2878        }
2879        return error;
2880out_iput:
2881        iput(inode);
2882        return error;
2883}
2884
2885static int
2886shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2887{
2888        struct inode *inode;
2889        int error = -ENOSPC;
2890
2891        inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2892        if (inode) {
2893                error = security_inode_init_security(inode, dir,
2894                                                     NULL,
2895                                                     shmem_initxattrs, NULL);
2896                if (error && error != -EOPNOTSUPP)
2897                        goto out_iput;
2898                error = simple_acl_create(dir, inode);
2899                if (error)
2900                        goto out_iput;
2901                d_tmpfile(dentry, inode);
2902        }
2903        return error;
2904out_iput:
2905        iput(inode);
2906        return error;
2907}
2908
2909static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2910{
2911        int error;
2912
2913        if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2914                return error;
2915        inc_nlink(dir);
2916        return 0;
2917}
2918
2919static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2920                bool excl)
2921{
2922        return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2923}
2924
2925/*
2926 * Link a file..
2927 */
2928static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2929{
2930        struct inode *inode = d_inode(old_dentry);
2931        int ret = 0;
2932
2933        /*
2934         * No ordinary (disk based) filesystem counts links as inodes;
2935         * but each new link needs a new dentry, pinning lowmem, and
2936         * tmpfs dentries cannot be pruned until they are unlinked.
2937         * But if an O_TMPFILE file is linked into the tmpfs, the
2938         * first link must skip that, to get the accounting right.
2939         */
2940        if (inode->i_nlink) {
2941                ret = shmem_reserve_inode(inode->i_sb);
2942                if (ret)
2943                        goto out;
2944        }
2945
2946        dir->i_size += BOGO_DIRENT_SIZE;
2947        inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2948        inc_nlink(inode);
2949        ihold(inode);   /* New dentry reference */
2950        dget(dentry);           /* Extra pinning count for the created dentry */
2951        d_instantiate(dentry, inode);
2952out:
2953        return ret;
2954}
2955
2956static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2957{
2958        struct inode *inode = d_inode(dentry);
2959
2960        if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2961                shmem_free_inode(inode->i_sb);
2962
2963        dir->i_size -= BOGO_DIRENT_SIZE;
2964        inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2965        drop_nlink(inode);
2966        dput(dentry);   /* Undo the count from "create" - this does all the work */
2967        return 0;
2968}
2969
2970static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2971{
2972        if (!simple_empty(dentry))
2973                return -ENOTEMPTY;
2974
2975        drop_nlink(d_inode(dentry));
2976        drop_nlink(dir);
2977        return shmem_unlink(dir, dentry);
2978}
2979
2980static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2981{
2982        bool old_is_dir = d_is_dir(old_dentry);
2983        bool new_is_dir = d_is_dir(new_dentry);
2984
2985        if (old_dir != new_dir && old_is_dir != new_is_dir) {
2986                if (old_is_dir) {
2987                        drop_nlink(old_dir);
2988                        inc_nlink(new_dir);
2989                } else {
2990                        drop_nlink(new_dir);
2991                        inc_nlink(old_dir);
2992                }
2993        }
2994        old_dir->i_ctime = old_dir->i_mtime =
2995        new_dir->i_ctime = new_dir->i_mtime =
2996        d_inode(old_dentry)->i_ctime =
2997        d_inode(new_dentry)->i_ctime = current_time(old_dir);
2998
2999        return 0;
3000}
3001
3002static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3003{
3004        struct dentry *whiteout;
3005        int error;
3006
3007        whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3008        if (!whiteout)
3009                return -ENOMEM;
3010
3011        error = shmem_mknod(old_dir, whiteout,
3012                            S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3013        dput(whiteout);
3014        if (error)
3015                return error;
3016
3017        /*
3018         * Cheat and hash the whiteout while the old dentry is still in
3019         * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3020         *
3021         * d_lookup() will consistently find one of them at this point,
3022         * not sure which one, but that isn't even important.
3023         */
3024        d_rehash(whiteout);
3025        return 0;
3026}
3027
3028/*
3029 * The VFS layer already does all the dentry stuff for rename,
3030 * we just have to decrement the usage count for the target if
3031 * it exists so that the VFS layer correctly free's it when it
3032 * gets overwritten.
3033 */
3034static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3035{
3036        struct inode *inode = d_inode(old_dentry);
3037        int they_are_dirs = S_ISDIR(inode->i_mode);
3038
3039        if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3040                return -EINVAL;
3041
3042        if (flags & RENAME_EXCHANGE)
3043                return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3044
3045        if (!simple_empty(new_dentry))
3046                return -ENOTEMPTY;
3047
3048        if (flags & RENAME_WHITEOUT) {
3049                int error;
3050
3051                error = shmem_whiteout(old_dir, old_dentry);
3052                if (error)
3053                        return error;
3054        }
3055
3056        if (d_really_is_positive(new_dentry)) {
3057                (void) shmem_unlink(new_dir, new_dentry);
3058                if (they_are_dirs) {
3059                        drop_nlink(d_inode(new_dentry));
3060                        drop_nlink(old_dir);
3061                }
3062        } else if (they_are_dirs) {
3063                drop_nlink(old_dir);
3064                inc_nlink(new_dir);
3065        }
3066
3067        old_dir->i_size -= BOGO_DIRENT_SIZE;
3068        new_dir->i_size += BOGO_DIRENT_SIZE;
3069        old_dir->i_ctime = old_dir->i_mtime =
3070        new_dir->i_ctime = new_dir->i_mtime =
3071        inode->i_ctime = current_time(old_dir);
3072        return 0;
3073}
3074
3075static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3076{
3077        int error;
3078        int len;
3079        struct inode *inode;
3080        struct page *page;
3081
3082        len = strlen(symname) + 1;
3083        if (len > PAGE_SIZE)
3084                return -ENAMETOOLONG;
3085
3086        inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3087                                VM_NORESERVE);
3088        if (!inode)
3089                return -ENOSPC;
3090
3091        error = security_inode_init_security(inode, dir, &dentry->d_name,
3092                                             shmem_initxattrs, NULL);
3093        if (error) {
3094                if (error != -EOPNOTSUPP) {
3095                        iput(inode);
3096                        return error;
3097                }
3098                error = 0;
3099        }
3100
3101        inode->i_size = len-1;
3102        if (len <= SHORT_SYMLINK_LEN) {
3103                inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3104                if (!inode->i_link) {
3105                        iput(inode);
3106                        return -ENOMEM;
3107                }
3108                inode->i_op = &shmem_short_symlink_operations;
3109        } else {
3110                inode_nohighmem(inode);
3111                error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3112                if (error) {
3113                        iput(inode);
3114                        return error;
3115                }
3116                inode->i_mapping->a_ops = &shmem_aops;
3117                inode->i_op = &shmem_symlink_inode_operations;
3118                memcpy(page_address(page), symname, len);
3119                SetPageUptodate(page);
3120                set_page_dirty(page);
3121                unlock_page(page);
3122                put_page(page);
3123        }
3124        dir->i_size += BOGO_DIRENT_SIZE;
3125        dir->i_ctime = dir->i_mtime = current_time(dir);
3126        d_instantiate(dentry, inode);
3127        dget(dentry);
3128        return 0;
3129}
3130
3131static void shmem_put_link(void *arg)
3132{
3133        mark_page_accessed(arg);
3134        put_page(arg);
3135}
3136
3137static const char *shmem_get_link(struct dentry *dentry,
3138                                  struct inode *inode,
3139                                  struct delayed_call *done)
3140{
3141        struct page *page = NULL;
3142        int error;
3143        if (!dentry) {
3144                page = find_get_page(inode->i_mapping, 0);
3145                if (!page)
3146                        return ERR_PTR(-ECHILD);
3147                if (!PageUptodate(page)) {
3148                        put_page(page);
3149                        return ERR_PTR(-ECHILD);
3150                }
3151        } else {
3152                error = shmem_getpage(inode, 0, &page, SGP_READ);
3153                if (error)
3154                        return ERR_PTR(error);
3155                unlock_page(page);
3156        }
3157        set_delayed_call(done, shmem_put_link, page);
3158        return page_address(page);
3159}
3160
3161#ifdef CONFIG_TMPFS_XATTR
3162/*
3163 * Superblocks without xattr inode operations may get some security.* xattr
3164 * support from the LSM "for free". As soon as we have any other xattrs
3165 * like ACLs, we also need to implement the security.* handlers at
3166 * filesystem level, though.
3167 */
3168
3169/*
3170 * Callback for security_inode_init_security() for acquiring xattrs.
3171 */
3172static int shmem_initxattrs(struct inode *inode,
3173                            const struct xattr *xattr_array,
3174                            void *fs_info)
3175{
3176        struct shmem_inode_info *info = SHMEM_I(inode);
3177        const struct xattr *xattr;
3178        struct simple_xattr *new_xattr;
3179        size_t len;
3180
3181        for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3182                new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3183                if (!new_xattr)
3184                        return -ENOMEM;
3185
3186                len = strlen(xattr->name) + 1;
3187                new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3188                                          GFP_KERNEL);
3189                if (!new_xattr->name) {
3190                        kfree(new_xattr);
3191                        return -ENOMEM;
3192                }
3193
3194                memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3195                       XATTR_SECURITY_PREFIX_LEN);
3196                memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3197                       xattr->name, len);
3198
3199                simple_xattr_list_add(&info->xattrs, new_xattr);
3200        }
3201
3202        return 0;
3203}
3204
3205static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3206                                   struct dentry *unused, struct inode *inode,
3207                                   const char *name, void *buffer, size_t size)
3208{
3209        struct shmem_inode_info *info = SHMEM_I(inode);
3210
3211        name = xattr_full_name(handler, name);
3212        return simple_xattr_get(&info->xattrs, name, buffer, size);
3213}
3214
3215static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3216                                   struct dentry *unused, struct inode *inode,
3217                                   const char *name, const void *value,
3218                                   size_t size, int flags)
3219{
3220        struct shmem_inode_info *info = SHMEM_I(inode);
3221
3222        name = xattr_full_name(handler, name);
3223        return simple_xattr_set(&info->xattrs, name, value, size, flags);
3224}
3225
3226static const struct xattr_handler shmem_security_xattr_handler = {
3227        .prefix = XATTR_SECURITY_PREFIX,
3228        .get = shmem_xattr_handler_get,
3229        .set = shmem_xattr_handler_set,
3230};
3231
3232static const struct xattr_handler shmem_trusted_xattr_handler = {
3233        .prefix = XATTR_TRUSTED_PREFIX,
3234        .get = shmem_xattr_handler_get,
3235        .set = shmem_xattr_handler_set,
3236};
3237
3238static const struct xattr_handler *shmem_xattr_handlers[] = {
3239#ifdef CONFIG_TMPFS_POSIX_ACL
3240        &posix_acl_access_xattr_handler,
3241        &posix_acl_default_xattr_handler,
3242#endif
3243        &shmem_security_xattr_handler,
3244        &shmem_trusted_xattr_handler,
3245        NULL
3246};
3247
3248static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3249{
3250        struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3251        return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3252}
3253#endif /* CONFIG_TMPFS_XATTR */
3254
3255static const struct inode_operations shmem_short_symlink_operations = {
3256        .get_link       = simple_get_link,
3257#ifdef CONFIG_TMPFS_XATTR
3258        .listxattr      = shmem_listxattr,
3259#endif
3260};
3261
3262static const struct inode_operations shmem_symlink_inode_operations = {
3263        .get_link       = shmem_get_link,
3264#ifdef CONFIG_TMPFS_XATTR
3265        .listxattr      = shmem_listxattr,
3266#endif
3267};
3268
3269static struct dentry *shmem_get_parent(struct dentry *child)
3270{
3271        return ERR_PTR(-ESTALE);
3272}
3273
3274static int shmem_match(struct inode *ino, void *vfh)
3275{
3276        __u32 *fh = vfh;
3277        __u64 inum = fh[2];
3278        inum = (inum << 32) | fh[1];
3279        return ino->i_ino == inum && fh[0] == ino->i_generation;
3280}
3281
3282/* Find any alias of inode, but prefer a hashed alias */
3283static struct dentry *shmem_find_alias(struct inode *inode)
3284{
3285        struct dentry *alias = d_find_alias(inode);
3286
3287        return alias ?: d_find_any_alias(inode);
3288}
3289
3290
3291static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3292                struct fid *fid, int fh_len, int fh_type)
3293{
3294        struct inode *inode;
3295        struct dentry *dentry = NULL;
3296        u64 inum;
3297
3298        if (fh_len < 3)
3299                return NULL;
3300
3301        inum = fid->raw[2];
3302        inum = (inum << 32) | fid->raw[1];
3303
3304        inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3305                        shmem_match, fid->raw);
3306        if (inode) {
3307                dentry = shmem_find_alias(inode);
3308                iput(inode);
3309        }
3310
3311        return dentry;
3312}
3313
3314static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3315                                struct inode *parent)
3316{
3317        if (*len < 3) {
3318                *len = 3;
3319                return FILEID_INVALID;
3320        }
3321
3322        if (inode_unhashed(inode)) {
3323                /* Unfortunately insert_inode_hash is not idempotent,
3324                 * so as we hash inodes here rather than at creation
3325                 * time, we need a lock to ensure we only try
3326                 * to do it once
3327                 */
3328                static DEFINE_SPINLOCK(lock);
3329                spin_lock(&lock);
3330                if (inode_unhashed(inode))
3331                        __insert_inode_hash(inode,
3332                                            inode->i_ino + inode->i_generation);
3333                spin_unlock(&lock);
3334        }
3335
3336        fh[0] = inode->i_generation;
3337        fh[1] = inode->i_ino;
3338        fh[2] = ((__u64)inode->i_ino) >> 32;
3339
3340        *len = 3;
3341        return 1;
3342}
3343
3344static const struct export_operations shmem_export_ops = {
3345        .get_parent     = shmem_get_parent,
3346        .encode_fh      = shmem_encode_fh,
3347        .fh_to_dentry   = shmem_fh_to_dentry,
3348};
3349
3350static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3351                               bool remount)
3352{
3353        char *this_char, *value, *rest;
3354        struct mempolicy *mpol = NULL;
3355        uid_t uid;
3356        gid_t gid;
3357
3358        while (options != NULL) {
3359                this_char = options;
3360                for (;;) {
3361                        /*
3362                         * NUL-terminate this option: unfortunately,
3363                         * mount options form a comma-separated list,
3364                         * but mpol's nodelist may also contain commas.
3365                         */
3366                        options = strchr(options, ',');
3367                        if (options == NULL)
3368                                break;
3369                        options++;
3370                        if (!isdigit(*options)) {
3371                                options[-1] = '\0';
3372                                break;
3373                        }
3374                }
3375                if (!*this_char)
3376                        continue;
3377                if ((value = strchr(this_char,'=')) != NULL) {
3378                        *value++ = 0;
3379                } else {
3380                        pr_err("tmpfs: No value for mount option '%s'\n",
3381                               this_char);
3382                        goto error;
3383                }
3384
3385                if (!strcmp(this_char,"size")) {
3386                        unsigned long long size;
3387                        size = memparse(value,&rest);
3388                        if (*rest == '%') {
3389                                size <<= PAGE_SHIFT;
3390                                size *= totalram_pages();
3391                                do_div(size, 100);
3392                                rest++;
3393                        }
3394                        if (*rest)
3395                                goto bad_val;
3396                        sbinfo->max_blocks =
3397                                DIV_ROUND_UP(size, PAGE_SIZE);
3398                } else if (!strcmp(this_char,"nr_blocks")) {
3399                        sbinfo->max_blocks = memparse(value, &rest);
3400                        if (*rest)
3401                                goto bad_val;
3402                } else if (!strcmp(this_char,"nr_inodes")) {
3403                        sbinfo->max_inodes = memparse(value, &rest);
3404                        if (*rest)
3405                                goto bad_val;
3406                } else if (!strcmp(this_char,"mode")) {
3407                        if (remount)
3408                                continue;
3409                        sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
3410                        if (*rest)
3411                                goto bad_val;
3412                } else if (!strcmp(this_char,"uid")) {
3413                        if (remount)
3414                                continue;
3415                        uid = simple_strtoul(value, &rest, 0);
3416                        if (*rest)
3417                                goto bad_val;
3418                        sbinfo->uid = make_kuid(current_user_ns(), uid);
3419                        if (!uid_valid(sbinfo->uid))
3420                                goto bad_val;
3421                } else if (!strcmp(this_char,"gid")) {
3422                        if (remount)
3423                                continue;
3424                        gid = simple_strtoul(value, &rest, 0);
3425                        if (*rest)
3426                                goto bad_val;
3427                        sbinfo->gid = make_kgid(current_user_ns(), gid);
3428                        if (!gid_valid(sbinfo->gid))
3429                                goto bad_val;
3430#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3431                } else if (!strcmp(this_char, "huge")) {
3432                        int huge;
3433                        huge = shmem_parse_huge(value);
3434                        if (huge < 0)
3435                                goto bad_val;
3436                        if (!has_transparent_hugepage() &&
3437                                        huge != SHMEM_HUGE_NEVER)
3438                                goto bad_val;
3439                        sbinfo->huge = huge;
3440#endif
3441#ifdef CONFIG_NUMA
3442                } else if (!strcmp(this_char,"mpol")) {
3443                        mpol_put(mpol);
3444                        mpol = NULL;
3445                        if (mpol_parse_str(value, &mpol))
3446                                goto bad_val;
3447#endif
3448                } else {
3449                        pr_err("tmpfs: Bad mount option %s\n", this_char);
3450                        goto error;
3451                }
3452        }
3453        sbinfo->mpol = mpol;
3454        return 0;
3455
3456bad_val:
3457        pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
3458               value, this_char);
3459error:
3460        mpol_put(mpol);
3461        return 1;
3462
3463}
3464
3465static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
3466{
3467        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3468        struct shmem_sb_info config = *sbinfo;
3469        unsigned long inodes;
3470        int error = -EINVAL;
3471
3472        config.mpol = NULL;
3473        if (shmem_parse_options(data, &config, true))
3474                return error;
3475
3476        spin_lock(&sbinfo->stat_lock);
3477        inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3478        if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
3479                goto out;
3480        if (config.max_inodes < inodes)
3481                goto out;
3482        /*
3483         * Those tests disallow limited->unlimited while any are in use;
3484         * but we must separately disallow unlimited->limited, because
3485         * in that case we have no record of how much is already in use.
3486         */
3487        if (config.max_blocks && !sbinfo->max_blocks)
3488                goto out;
3489        if (config.max_inodes && !sbinfo->max_inodes)
3490                goto out;
3491
3492        error = 0;
3493        sbinfo->huge = config.huge;
3494        sbinfo->max_blocks  = config.max_blocks;
3495        sbinfo->max_inodes  = config.max_inodes;
3496        sbinfo->free_inodes = config.max_inodes - inodes;
3497
3498        /*
3499         * Preserve previous mempolicy unless mpol remount option was specified.
3500         */
3501        if (config.mpol) {
3502                mpol_put(sbinfo->mpol);
3503                sbinfo->mpol = config.mpol;     /* transfers initial ref */
3504        }
3505out:
3506        spin_unlock(&sbinfo->stat_lock);
3507        return error;
3508}
3509
3510static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3511{
3512        struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3513
3514        if (sbinfo->max_blocks != shmem_default_max_blocks())
3515                seq_printf(seq, ",size=%luk",
3516                        sbinfo->max_blocks << (PAGE_SHIFT - 10));
3517        if (sbinfo->max_inodes != shmem_default_max_inodes())
3518                seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3519        if (sbinfo->mode != (0777 | S_ISVTX))
3520                seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3521        if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3522                seq_printf(seq, ",uid=%u",
3523                                from_kuid_munged(&init_user_ns, sbinfo->uid));
3524        if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3525                seq_printf(seq, ",gid=%u",
3526                                from_kgid_munged(&init_user_ns, sbinfo->gid));
3527#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3528        /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3529        if (sbinfo->huge)
3530                seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3531#endif
3532        shmem_show_mpol(seq, sbinfo->mpol);
3533        return 0;
3534}
3535
3536#endif /* CONFIG_TMPFS */
3537
3538static void shmem_put_super(struct super_block *sb)
3539{
3540        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3541
3542        percpu_counter_destroy(&sbinfo->used_blocks);
3543        mpol_put(sbinfo->mpol);
3544        kfree(sbinfo);
3545        sb->s_fs_info = NULL;
3546}
3547
3548int shmem_fill_super(struct super_block *sb, void *data, int silent)
3549{
3550        struct inode *inode;
3551        struct shmem_sb_info *sbinfo;
3552        int err = -ENOMEM;
3553
3554        /* Round up to L1_CACHE_BYTES to resist false sharing */
3555        sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3556                                L1_CACHE_BYTES), GFP_KERNEL);
3557        if (!sbinfo)
3558                return -ENOMEM;
3559
3560        sbinfo->mode = 0777 | S_ISVTX;
3561        sbinfo->uid = current_fsuid();
3562        sbinfo->gid = current_fsgid();
3563        sb->s_fs_info = sbinfo;
3564
3565#ifdef CONFIG_TMPFS
3566        /*
3567         * Per default we only allow half of the physical ram per
3568         * tmpfs instance, limiting inodes to one per page of lowmem;
3569         * but the internal instance is left unlimited.
3570         */
3571        if (!(sb->s_flags & SB_KERNMOUNT)) {
3572                sbinfo->max_blocks = shmem_default_max_blocks();
3573                sbinfo->max_inodes = shmem_default_max_inodes();
3574                if (shmem_parse_options(data, sbinfo, false)) {
3575                        err = -EINVAL;
3576                        goto failed;
3577                }
3578        } else {
3579                sb->s_flags |= SB_NOUSER;
3580        }
3581        sb->s_export_op = &shmem_export_ops;
3582        sb->s_flags |= SB_NOSEC;
3583#else
3584        sb->s_flags |= SB_NOUSER;
3585#endif
3586
3587        spin_lock_init(&sbinfo->stat_lock);
3588        if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3589                goto failed;
3590        sbinfo->free_inodes = sbinfo->max_inodes;
3591        spin_lock_init(&sbinfo->shrinklist_lock);
3592        INIT_LIST_HEAD(&sbinfo->shrinklist);
3593
3594        sb->s_maxbytes = MAX_LFS_FILESIZE;
3595        sb->s_blocksize = PAGE_SIZE;
3596        sb->s_blocksize_bits = PAGE_SHIFT;
3597        sb->s_magic = TMPFS_MAGIC;
3598        sb->s_op = &shmem_ops;
3599        sb->s_time_gran = 1;
3600#ifdef CONFIG_TMPFS_XATTR
3601        sb->s_xattr = shmem_xattr_handlers;
3602#endif
3603#ifdef CONFIG_TMPFS_POSIX_ACL
3604        sb->s_flags |= SB_POSIXACL;
3605#endif
3606        uuid_gen(&sb->s_uuid);
3607
3608        inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3609        if (!inode)
3610                goto failed;
3611        inode->i_uid = sbinfo->uid;
3612        inode->i_gid = sbinfo->gid;
3613        sb->s_root = d_make_root(inode);
3614        if (!sb->s_root)
3615                goto failed;
3616        return 0;
3617
3618failed:
3619        shmem_put_super(sb);
3620        return err;
3621}
3622
3623static struct kmem_cache *shmem_inode_cachep;
3624
3625static struct inode *shmem_alloc_inode(struct super_block *sb)
3626{
3627        struct shmem_inode_info *info;
3628        info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3629        if (!info)
3630                return NULL;
3631        return &info->vfs_inode;
3632}
3633
3634static void shmem_destroy_callback(struct rcu_head *head)
3635{
3636        struct inode *inode = container_of(head, struct inode, i_rcu);
3637        if (S_ISLNK(inode->i_mode))
3638                kfree(inode->i_link);
3639        kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3640}
3641
3642static void shmem_destroy_inode(struct inode *inode)
3643{
3644        if (S_ISREG(inode->i_mode))
3645                mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3646        call_rcu(&inode->i_rcu, shmem_destroy_callback);
3647}
3648
3649static void shmem_init_inode(void *foo)
3650{
3651        struct shmem_inode_info *info = foo;
3652        inode_init_once(&info->vfs_inode);
3653}
3654
3655static void shmem_init_inodecache(void)
3656{
3657        shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3658                                sizeof(struct shmem_inode_info),
3659                                0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3660}
3661
3662static void shmem_destroy_inodecache(void)
3663{
3664        kmem_cache_destroy(shmem_inode_cachep);
3665}
3666
3667static const struct address_space_operations shmem_aops = {
3668        .writepage      = shmem_writepage,
3669        .set_page_dirty = __set_page_dirty_no_writeback,
3670#ifdef CONFIG_TMPFS
3671        .write_begin    = shmem_write_begin,
3672        .write_end      = shmem_write_end,
3673#endif
3674#ifdef CONFIG_MIGRATION
3675        .migratepage    = migrate_page,
3676#endif
3677        .error_remove_page = generic_error_remove_page,
3678};
3679
3680static const struct file_operations shmem_file_operations = {
3681        .mmap           = shmem_mmap,
3682        .get_unmapped_area = shmem_get_unmapped_area,
3683#ifdef CONFIG_TMPFS
3684        .llseek         = shmem_file_llseek,
3685        .read_iter      = shmem_file_read_iter,
3686        .write_iter     = generic_file_write_iter,
3687        .fsync          = noop_fsync,
3688        .splice_read    = generic_file_splice_read,
3689        .splice_write   = iter_file_splice_write,
3690        .fallocate      = shmem_fallocate,
3691#endif
3692};
3693
3694static const struct inode_operations shmem_inode_operations = {
3695        .getattr        = shmem_getattr,
3696        .setattr        = shmem_setattr,
3697#ifdef CONFIG_TMPFS_XATTR
3698        .listxattr      = shmem_listxattr,
3699        .set_acl        = simple_set_acl,
3700#endif
3701};
3702
3703static const struct inode_operations shmem_dir_inode_operations = {
3704#ifdef CONFIG_TMPFS
3705        .create         = shmem_create,
3706        .lookup         = simple_lookup,
3707        .link           = shmem_link,
3708        .unlink         = shmem_unlink,
3709        .symlink        = shmem_symlink,
3710        .mkdir          = shmem_mkdir,
3711        .rmdir          = shmem_rmdir,
3712        .mknod          = shmem_mknod,
3713        .rename         = shmem_rename2,
3714        .tmpfile        = shmem_tmpfile,
3715#endif
3716#ifdef CONFIG_TMPFS_XATTR
3717        .listxattr      = shmem_listxattr,
3718#endif
3719#ifdef CONFIG_TMPFS_POSIX_ACL
3720        .setattr        = shmem_setattr,
3721        .set_acl        = simple_set_acl,
3722#endif
3723};
3724
3725static const struct inode_operations shmem_special_inode_operations = {
3726#ifdef CONFIG_TMPFS_XATTR
3727        .listxattr      = shmem_listxattr,
3728#endif
3729#ifdef CONFIG_TMPFS_POSIX_ACL
3730        .setattr        = shmem_setattr,
3731        .set_acl        = simple_set_acl,
3732#endif
3733};
3734
3735static const struct super_operations shmem_ops = {
3736        .alloc_inode    = shmem_alloc_inode,
3737        .destroy_inode  = shmem_destroy_inode,
3738#ifdef CONFIG_TMPFS
3739        .statfs         = shmem_statfs,
3740        .remount_fs     = shmem_remount_fs,
3741        .show_options   = shmem_show_options,
3742#endif
3743        .evict_inode    = shmem_evict_inode,
3744        .drop_inode     = generic_delete_inode,
3745        .put_super      = shmem_put_super,
3746#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3747        .nr_cached_objects      = shmem_unused_huge_count,
3748        .free_cached_objects    = shmem_unused_huge_scan,
3749#endif
3750};
3751
3752static const struct vm_operations_struct shmem_vm_ops = {
3753        .fault          = shmem_fault,
3754        .map_pages      = filemap_map_pages,
3755#ifdef CONFIG_NUMA
3756        .set_policy     = shmem_set_policy,
3757        .get_policy     = shmem_get_policy,
3758#endif
3759};
3760
3761static struct dentry *shmem_mount(struct file_system_type *fs_type,
3762        int flags, const char *dev_name, void *data)
3763{
3764        return mount_nodev(fs_type, flags, data, shmem_fill_super);
3765}
3766
3767static struct file_system_type shmem_fs_type = {
3768        .owner          = THIS_MODULE,
3769        .name           = "tmpfs",
3770        .mount          = shmem_mount,
3771        .kill_sb        = kill_litter_super,
3772        .fs_flags       = FS_USERNS_MOUNT,
3773};
3774
3775int __init shmem_init(void)
3776{
3777        int error;
3778
3779        /* If rootfs called this, don't re-init */
3780        if (shmem_inode_cachep)
3781                return 0;
3782
3783        shmem_init_inodecache();
3784
3785        error = register_filesystem(&shmem_fs_type);
3786        if (error) {
3787                pr_err("Could not register tmpfs\n");
3788                goto out2;
3789        }
3790
3791        shm_mnt = kern_mount(&shmem_fs_type);
3792        if (IS_ERR(shm_mnt)) {
3793                error = PTR_ERR(shm_mnt);
3794                pr_err("Could not kern_mount tmpfs\n");
3795                goto out1;
3796        }
3797
3798#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3799        if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3800                SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3801        else
3802                shmem_huge = 0; /* just in case it was patched */
3803#endif
3804        return 0;
3805
3806out1:
3807        unregister_filesystem(&shmem_fs_type);
3808out2:
3809        shmem_destroy_inodecache();
3810        shm_mnt = ERR_PTR(error);
3811        return error;
3812}
3813
3814#if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
3815static ssize_t shmem_enabled_show(struct kobject *kobj,
3816                struct kobj_attribute *attr, char *buf)
3817{
3818        int values[] = {
3819                SHMEM_HUGE_ALWAYS,
3820                SHMEM_HUGE_WITHIN_SIZE,
3821                SHMEM_HUGE_ADVISE,
3822                SHMEM_HUGE_NEVER,
3823                SHMEM_HUGE_DENY,
3824                SHMEM_HUGE_FORCE,
3825        };
3826        int i, count;
3827
3828        for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
3829                const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
3830
3831                count += sprintf(buf + count, fmt,
3832                                shmem_format_huge(values[i]));
3833        }
3834        buf[count - 1] = '\n';
3835        return count;
3836}
3837
3838static ssize_t shmem_enabled_store(struct kobject *kobj,
3839                struct kobj_attribute *attr, const char *buf, size_t count)
3840{
3841        char tmp[16];
3842        int huge;
3843
3844        if (count + 1 > sizeof(tmp))
3845                return -EINVAL;
3846        memcpy(tmp, buf, count);
3847        tmp[count] = '\0';
3848        if (count && tmp[count - 1] == '\n')
3849                tmp[count - 1] = '\0';
3850
3851        huge = shmem_parse_huge(tmp);
3852        if (huge == -EINVAL)
3853                return -EINVAL;
3854        if (!has_transparent_hugepage() &&
3855                        huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3856                return -EINVAL;
3857
3858        shmem_huge = huge;
3859        if (shmem_huge > SHMEM_HUGE_DENY)
3860                SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3861        return count;
3862}
3863
3864struct kobj_attribute shmem_enabled_attr =
3865        __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3866#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3867
3868#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3869bool shmem_huge_enabled(struct vm_area_struct *vma)
3870{
3871        struct inode *inode = file_inode(vma->vm_file);
3872        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3873        loff_t i_size;
3874        pgoff_t off;
3875
3876        if (shmem_huge == SHMEM_HUGE_FORCE)
3877                return true;
3878        if (shmem_huge == SHMEM_HUGE_DENY)
3879                return false;
3880        switch (sbinfo->huge) {
3881                case SHMEM_HUGE_NEVER:
3882                        return false;
3883                case SHMEM_HUGE_ALWAYS:
3884                        return true;
3885                case SHMEM_HUGE_WITHIN_SIZE:
3886                        off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
3887                        i_size = round_up(i_size_read(inode), PAGE_SIZE);
3888                        if (i_size >= HPAGE_PMD_SIZE &&
3889                                        i_size >> PAGE_SHIFT >= off)
3890                                return true;
3891                        /* fall through */
3892                case SHMEM_HUGE_ADVISE:
3893                        /* TODO: implement fadvise() hints */
3894                        return (vma->vm_flags & VM_HUGEPAGE);
3895                default:
3896                        VM_BUG_ON(1);
3897                        return false;
3898        }
3899}
3900#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
3901
3902#else /* !CONFIG_SHMEM */
3903
3904/*
3905 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3906 *
3907 * This is intended for small system where the benefits of the full
3908 * shmem code (swap-backed and resource-limited) are outweighed by
3909 * their complexity. On systems without swap this code should be
3910 * effectively equivalent, but much lighter weight.
3911 */
3912
3913static struct file_system_type shmem_fs_type = {
3914        .name           = "tmpfs",
3915        .mount          = ramfs_mount,
3916        .kill_sb        = kill_litter_super,
3917        .fs_flags       = FS_USERNS_MOUNT,
3918};
3919
3920int __init shmem_init(void)
3921{
3922        BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3923
3924        shm_mnt = kern_mount(&shmem_fs_type);
3925        BUG_ON(IS_ERR(shm_mnt));
3926
3927        return 0;
3928}
3929
3930int shmem_unuse(unsigned int type, bool frontswap,
3931                unsigned long *fs_pages_to_unuse)
3932{
3933        return 0;
3934}
3935
3936int shmem_lock(struct file *file, int lock, struct user_struct *user)
3937{
3938        return 0;
3939}
3940
3941void shmem_unlock_mapping(struct address_space *mapping)
3942{
3943}
3944
3945#ifdef CONFIG_MMU
3946unsigned long shmem_get_unmapped_area(struct file *file,
3947                                      unsigned long addr, unsigned long len,
3948                                      unsigned long pgoff, unsigned long flags)
3949{
3950        return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
3951}
3952#endif
3953
3954void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
3955{
3956        truncate_inode_pages_range(inode->i_mapping, lstart, lend);
3957}
3958EXPORT_SYMBOL_GPL(shmem_truncate_range);
3959
3960#define shmem_vm_ops                            generic_file_vm_ops
3961#define shmem_file_operations                   ramfs_file_operations
3962#define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
3963#define shmem_acct_size(flags, size)            0
3964#define shmem_unacct_size(flags, size)          do {} while (0)
3965
3966#endif /* CONFIG_SHMEM */
3967
3968/* common code */
3969
3970static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
3971                                       unsigned long flags, unsigned int i_flags)
3972{
3973        struct inode *inode;
3974        struct file *res;
3975
3976        if (IS_ERR(mnt))
3977                return ERR_CAST(mnt);
3978
3979        if (size < 0 || size > MAX_LFS_FILESIZE)
3980                return ERR_PTR(-EINVAL);
3981
3982        if (shmem_acct_size(flags, size))
3983                return ERR_PTR(-ENOMEM);
3984
3985        inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
3986                                flags);
3987        if (unlikely(!inode)) {
3988                shmem_unacct_size(flags, size);
3989                return ERR_PTR(-ENOSPC);
3990        }
3991        inode->i_flags |= i_flags;
3992        inode->i_size = size;
3993        clear_nlink(inode);     /* It is unlinked */
3994        res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
3995        if (!IS_ERR(res))
3996                res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
3997                                &shmem_file_operations);
3998        if (IS_ERR(res))
3999                iput(inode);
4000        return res;
4001}
4002
4003/**
4004 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4005 *      kernel internal.  There will be NO LSM permission checks against the
4006 *      underlying inode.  So users of this interface must do LSM checks at a
4007 *      higher layer.  The users are the big_key and shm implementations.  LSM
4008 *      checks are provided at the key or shm level rather than the inode.
4009 * @name: name for dentry (to be seen in /proc/<pid>/maps
4010 * @size: size to be set for the file
4011 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4012 */
4013struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4014{
4015        return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4016}
4017
4018/**
4019 * shmem_file_setup - get an unlinked file living in tmpfs
4020 * @name: name for dentry (to be seen in /proc/<pid>/maps
4021 * @size: size to be set for the file
4022 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4023 */
4024struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4025{
4026        return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4027}
4028EXPORT_SYMBOL_GPL(shmem_file_setup);
4029
4030/**
4031 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4032 * @mnt: the tmpfs mount where the file will be created
4033 * @name: name for dentry (to be seen in /proc/<pid>/maps
4034 * @size: size to be set for the file
4035 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4036 */
4037struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4038                                       loff_t size, unsigned long flags)
4039{
4040        return __shmem_file_setup(mnt, name, size, flags, 0);
4041}
4042EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4043
4044/**
4045 * shmem_zero_setup - setup a shared anonymous mapping
4046 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
4047 */
4048int shmem_zero_setup(struct vm_area_struct *vma)
4049{
4050        struct file *file;
4051        loff_t size = vma->vm_end - vma->vm_start;
4052
4053        /*
4054         * Cloning a new file under mmap_sem leads to a lock ordering conflict
4055         * between XFS directory reading and selinux: since this file is only
4056         * accessible to the user through its mapping, use S_PRIVATE flag to
4057         * bypass file security, in the same way as shmem_kernel_file_setup().
4058         */
4059        file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4060        if (IS_ERR(file))
4061                return PTR_ERR(file);
4062
4063        if (vma->vm_file)
4064                fput(vma->vm_file);
4065        vma->vm_file = file;
4066        vma->vm_ops = &shmem_vm_ops;
4067
4068        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4069                        ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4070                        (vma->vm_end & HPAGE_PMD_MASK)) {
4071                khugepaged_enter(vma, vma->vm_flags);
4072        }
4073
4074        return 0;
4075}
4076
4077/**
4078 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4079 * @mapping:    the page's address_space
4080 * @index:      the page index
4081 * @gfp:        the page allocator flags to use if allocating
4082 *
4083 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4084 * with any new page allocations done using the specified allocation flags.
4085 * But read_cache_page_gfp() uses the ->readpage() method: which does not
4086 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4087 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4088 *
4089 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4090 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4091 */
4092struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4093                                         pgoff_t index, gfp_t gfp)
4094{
4095#ifdef CONFIG_SHMEM
4096        struct inode *inode = mapping->host;
4097        struct page *page;
4098        int error;
4099
4100        BUG_ON(mapping->a_ops != &shmem_aops);
4101        error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4102                                  gfp, NULL, NULL, NULL);
4103        if (error)
4104                page = ERR_PTR(error);
4105        else
4106                unlock_page(page);
4107        return page;
4108#else
4109        /*
4110         * The tiny !SHMEM case uses ramfs without swap
4111         */
4112        return read_cache_page_gfp(mapping, index, gfp);
4113#endif
4114}
4115EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4116