linux/mm/page_io.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/mm/page_io.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *
   7 *  Swap reorganised 29.12.95, 
   8 *  Asynchronous swapping added 30.12.95. Stephen Tweedie
   9 *  Removed race in async swapping. 14.4.1996. Bruno Haible
  10 *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
  11 *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
  12 */
  13
  14#include <linux/mm.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/gfp.h>
  17#include <linux/pagemap.h>
  18#include <linux/swap.h>
  19#include <linux/bio.h>
  20#include <linux/swapops.h>
  21#include <linux/buffer_head.h>
  22#include <linux/writeback.h>
  23#include <linux/frontswap.h>
  24#include <linux/blkdev.h>
  25#include <linux/psi.h>
  26#include <linux/uio.h>
  27#include <linux/sched/task.h>
  28
  29void end_swap_bio_write(struct bio *bio)
  30{
  31        struct page *page = bio_first_page_all(bio);
  32
  33        if (bio->bi_status) {
  34                SetPageError(page);
  35                /*
  36                 * We failed to write the page out to swap-space.
  37                 * Re-dirty the page in order to avoid it being reclaimed.
  38                 * Also print a dire warning that things will go BAD (tm)
  39                 * very quickly.
  40                 *
  41                 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
  42                 */
  43                set_page_dirty(page);
  44                pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
  45                                     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
  46                                     (unsigned long long)bio->bi_iter.bi_sector);
  47                ClearPageReclaim(page);
  48        }
  49        end_page_writeback(page);
  50        bio_put(bio);
  51}
  52
  53static void swap_slot_free_notify(struct page *page)
  54{
  55        struct swap_info_struct *sis;
  56        struct gendisk *disk;
  57        swp_entry_t entry;
  58
  59        /*
  60         * There is no guarantee that the page is in swap cache - the software
  61         * suspend code (at least) uses end_swap_bio_read() against a non-
  62         * swapcache page.  So we must check PG_swapcache before proceeding with
  63         * this optimization.
  64         */
  65        if (unlikely(!PageSwapCache(page)))
  66                return;
  67
  68        sis = page_swap_info(page);
  69        if (data_race(!(sis->flags & SWP_BLKDEV)))
  70                return;
  71
  72        /*
  73         * The swap subsystem performs lazy swap slot freeing,
  74         * expecting that the page will be swapped out again.
  75         * So we can avoid an unnecessary write if the page
  76         * isn't redirtied.
  77         * This is good for real swap storage because we can
  78         * reduce unnecessary I/O and enhance wear-leveling
  79         * if an SSD is used as the as swap device.
  80         * But if in-memory swap device (eg zram) is used,
  81         * this causes a duplicated copy between uncompressed
  82         * data in VM-owned memory and compressed data in
  83         * zram-owned memory.  So let's free zram-owned memory
  84         * and make the VM-owned decompressed page *dirty*,
  85         * so the page should be swapped out somewhere again if
  86         * we again wish to reclaim it.
  87         */
  88        disk = sis->bdev->bd_disk;
  89        entry.val = page_private(page);
  90        if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
  91                unsigned long offset;
  92
  93                offset = swp_offset(entry);
  94
  95                SetPageDirty(page);
  96                disk->fops->swap_slot_free_notify(sis->bdev,
  97                                offset);
  98        }
  99}
 100
 101static void end_swap_bio_read(struct bio *bio)
 102{
 103        struct page *page = bio_first_page_all(bio);
 104        struct task_struct *waiter = bio->bi_private;
 105
 106        if (bio->bi_status) {
 107                SetPageError(page);
 108                ClearPageUptodate(page);
 109                pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
 110                                     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 111                                     (unsigned long long)bio->bi_iter.bi_sector);
 112                goto out;
 113        }
 114
 115        SetPageUptodate(page);
 116        swap_slot_free_notify(page);
 117out:
 118        unlock_page(page);
 119        WRITE_ONCE(bio->bi_private, NULL);
 120        bio_put(bio);
 121        if (waiter) {
 122                blk_wake_io_task(waiter);
 123                put_task_struct(waiter);
 124        }
 125}
 126
 127int generic_swapfile_activate(struct swap_info_struct *sis,
 128                                struct file *swap_file,
 129                                sector_t *span)
 130{
 131        struct address_space *mapping = swap_file->f_mapping;
 132        struct inode *inode = mapping->host;
 133        unsigned blocks_per_page;
 134        unsigned long page_no;
 135        unsigned blkbits;
 136        sector_t probe_block;
 137        sector_t last_block;
 138        sector_t lowest_block = -1;
 139        sector_t highest_block = 0;
 140        int nr_extents = 0;
 141        int ret;
 142
 143        blkbits = inode->i_blkbits;
 144        blocks_per_page = PAGE_SIZE >> blkbits;
 145
 146        /*
 147         * Map all the blocks into the extent tree.  This code doesn't try
 148         * to be very smart.
 149         */
 150        probe_block = 0;
 151        page_no = 0;
 152        last_block = i_size_read(inode) >> blkbits;
 153        while ((probe_block + blocks_per_page) <= last_block &&
 154                        page_no < sis->max) {
 155                unsigned block_in_page;
 156                sector_t first_block;
 157
 158                cond_resched();
 159
 160                first_block = probe_block;
 161                ret = bmap(inode, &first_block);
 162                if (ret || !first_block)
 163                        goto bad_bmap;
 164
 165                /*
 166                 * It must be PAGE_SIZE aligned on-disk
 167                 */
 168                if (first_block & (blocks_per_page - 1)) {
 169                        probe_block++;
 170                        goto reprobe;
 171                }
 172
 173                for (block_in_page = 1; block_in_page < blocks_per_page;
 174                                        block_in_page++) {
 175                        sector_t block;
 176
 177                        block = probe_block + block_in_page;
 178                        ret = bmap(inode, &block);
 179                        if (ret || !block)
 180                                goto bad_bmap;
 181
 182                        if (block != first_block + block_in_page) {
 183                                /* Discontiguity */
 184                                probe_block++;
 185                                goto reprobe;
 186                        }
 187                }
 188
 189                first_block >>= (PAGE_SHIFT - blkbits);
 190                if (page_no) {  /* exclude the header page */
 191                        if (first_block < lowest_block)
 192                                lowest_block = first_block;
 193                        if (first_block > highest_block)
 194                                highest_block = first_block;
 195                }
 196
 197                /*
 198                 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
 199                 */
 200                ret = add_swap_extent(sis, page_no, 1, first_block);
 201                if (ret < 0)
 202                        goto out;
 203                nr_extents += ret;
 204                page_no++;
 205                probe_block += blocks_per_page;
 206reprobe:
 207                continue;
 208        }
 209        ret = nr_extents;
 210        *span = 1 + highest_block - lowest_block;
 211        if (page_no == 0)
 212                page_no = 1;    /* force Empty message */
 213        sis->max = page_no;
 214        sis->pages = page_no - 1;
 215        sis->highest_bit = page_no - 1;
 216out:
 217        return ret;
 218bad_bmap:
 219        pr_err("swapon: swapfile has holes\n");
 220        ret = -EINVAL;
 221        goto out;
 222}
 223
 224/*
 225 * We may have stale swap cache pages in memory: notice
 226 * them here and get rid of the unnecessary final write.
 227 */
 228int swap_writepage(struct page *page, struct writeback_control *wbc)
 229{
 230        int ret = 0;
 231
 232        if (try_to_free_swap(page)) {
 233                unlock_page(page);
 234                goto out;
 235        }
 236        /*
 237         * Arch code may have to preserve more data than just the page
 238         * contents, e.g. memory tags.
 239         */
 240        ret = arch_prepare_to_swap(page);
 241        if (ret) {
 242                set_page_dirty(page);
 243                unlock_page(page);
 244                goto out;
 245        }
 246        if (frontswap_store(page) == 0) {
 247                set_page_writeback(page);
 248                unlock_page(page);
 249                end_page_writeback(page);
 250                goto out;
 251        }
 252        ret = __swap_writepage(page, wbc, end_swap_bio_write);
 253out:
 254        return ret;
 255}
 256
 257static inline void count_swpout_vm_event(struct page *page)
 258{
 259#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 260        if (unlikely(PageTransHuge(page)))
 261                count_vm_event(THP_SWPOUT);
 262#endif
 263        count_vm_events(PSWPOUT, thp_nr_pages(page));
 264}
 265
 266#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
 267static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
 268{
 269        struct cgroup_subsys_state *css;
 270        struct mem_cgroup *memcg;
 271
 272        memcg = page_memcg(page);
 273        if (!memcg)
 274                return;
 275
 276        rcu_read_lock();
 277        css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
 278        bio_associate_blkg_from_css(bio, css);
 279        rcu_read_unlock();
 280}
 281#else
 282#define bio_associate_blkg_from_page(bio, page)         do { } while (0)
 283#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
 284
 285int __swap_writepage(struct page *page, struct writeback_control *wbc,
 286                bio_end_io_t end_write_func)
 287{
 288        struct bio *bio;
 289        int ret;
 290        struct swap_info_struct *sis = page_swap_info(page);
 291
 292        VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 293        if (data_race(sis->flags & SWP_FS_OPS)) {
 294                struct kiocb kiocb;
 295                struct file *swap_file = sis->swap_file;
 296                struct address_space *mapping = swap_file->f_mapping;
 297                struct bio_vec bv = {
 298                        .bv_page = page,
 299                        .bv_len  = PAGE_SIZE,
 300                        .bv_offset = 0
 301                };
 302                struct iov_iter from;
 303
 304                iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
 305                init_sync_kiocb(&kiocb, swap_file);
 306                kiocb.ki_pos = page_file_offset(page);
 307
 308                set_page_writeback(page);
 309                unlock_page(page);
 310                ret = mapping->a_ops->direct_IO(&kiocb, &from);
 311                if (ret == PAGE_SIZE) {
 312                        count_vm_event(PSWPOUT);
 313                        ret = 0;
 314                } else {
 315                        /*
 316                         * In the case of swap-over-nfs, this can be a
 317                         * temporary failure if the system has limited
 318                         * memory for allocating transmit buffers.
 319                         * Mark the page dirty and avoid
 320                         * rotate_reclaimable_page but rate-limit the
 321                         * messages but do not flag PageError like
 322                         * the normal direct-to-bio case as it could
 323                         * be temporary.
 324                         */
 325                        set_page_dirty(page);
 326                        ClearPageReclaim(page);
 327                        pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
 328                                           page_file_offset(page));
 329                }
 330                end_page_writeback(page);
 331                return ret;
 332        }
 333
 334        ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
 335        if (!ret) {
 336                count_swpout_vm_event(page);
 337                return 0;
 338        }
 339
 340        bio = bio_alloc(GFP_NOIO, 1);
 341        bio_set_dev(bio, sis->bdev);
 342        bio->bi_iter.bi_sector = swap_page_sector(page);
 343        bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
 344        bio->bi_end_io = end_write_func;
 345        bio_add_page(bio, page, thp_size(page), 0);
 346
 347        bio_associate_blkg_from_page(bio, page);
 348        count_swpout_vm_event(page);
 349        set_page_writeback(page);
 350        unlock_page(page);
 351        submit_bio(bio);
 352
 353        return 0;
 354}
 355
 356int swap_readpage(struct page *page, bool synchronous)
 357{
 358        struct bio *bio;
 359        int ret = 0;
 360        struct swap_info_struct *sis = page_swap_info(page);
 361        blk_qc_t qc;
 362        struct gendisk *disk;
 363        unsigned long pflags;
 364
 365        VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
 366        VM_BUG_ON_PAGE(!PageLocked(page), page);
 367        VM_BUG_ON_PAGE(PageUptodate(page), page);
 368
 369        /*
 370         * Count submission time as memory stall. When the device is congested,
 371         * or the submitting cgroup IO-throttled, submission can be a
 372         * significant part of overall IO time.
 373         */
 374        psi_memstall_enter(&pflags);
 375
 376        if (frontswap_load(page) == 0) {
 377                SetPageUptodate(page);
 378                unlock_page(page);
 379                goto out;
 380        }
 381
 382        if (data_race(sis->flags & SWP_FS_OPS)) {
 383                struct file *swap_file = sis->swap_file;
 384                struct address_space *mapping = swap_file->f_mapping;
 385
 386                ret = mapping->a_ops->readpage(swap_file, page);
 387                if (!ret)
 388                        count_vm_event(PSWPIN);
 389                goto out;
 390        }
 391
 392        if (sis->flags & SWP_SYNCHRONOUS_IO) {
 393                ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
 394                if (!ret) {
 395                        if (trylock_page(page)) {
 396                                swap_slot_free_notify(page);
 397                                unlock_page(page);
 398                        }
 399
 400                        count_vm_event(PSWPIN);
 401                        goto out;
 402                }
 403        }
 404
 405        ret = 0;
 406        bio = bio_alloc(GFP_KERNEL, 1);
 407        bio_set_dev(bio, sis->bdev);
 408        bio->bi_opf = REQ_OP_READ;
 409        bio->bi_iter.bi_sector = swap_page_sector(page);
 410        bio->bi_end_io = end_swap_bio_read;
 411        bio_add_page(bio, page, thp_size(page), 0);
 412
 413        disk = bio->bi_bdev->bd_disk;
 414        /*
 415         * Keep this task valid during swap readpage because the oom killer may
 416         * attempt to access it in the page fault retry time check.
 417         */
 418        if (synchronous) {
 419                bio->bi_opf |= REQ_HIPRI;
 420                get_task_struct(current);
 421                bio->bi_private = current;
 422        }
 423        count_vm_event(PSWPIN);
 424        bio_get(bio);
 425        qc = submit_bio(bio);
 426        while (synchronous) {
 427                set_current_state(TASK_UNINTERRUPTIBLE);
 428                if (!READ_ONCE(bio->bi_private))
 429                        break;
 430
 431                if (!blk_poll(disk->queue, qc, true))
 432                        blk_io_schedule();
 433        }
 434        __set_current_state(TASK_RUNNING);
 435        bio_put(bio);
 436
 437out:
 438        psi_memstall_leave(&pflags);
 439        return ret;
 440}
 441
 442int swap_set_page_dirty(struct page *page)
 443{
 444        struct swap_info_struct *sis = page_swap_info(page);
 445
 446        if (data_race(sis->flags & SWP_FS_OPS)) {
 447                struct address_space *mapping = sis->swap_file->f_mapping;
 448
 449                VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 450                return mapping->a_ops->set_page_dirty(page);
 451        } else {
 452                return __set_page_dirty_no_writeback(page);
 453        }
 454}
 455