linux/fs/jfs/jfs_metapage.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *   Copyright (C) International Business Machines Corp., 2000-2005
   4 *   Portions Copyright (C) Christoph Hellwig, 2001-2002
   5 */
   6
   7#include <linux/fs.h>
   8#include <linux/mm.h>
   9#include <linux/module.h>
  10#include <linux/bio.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/buffer_head.h>
  14#include <linux/mempool.h>
  15#include <linux/seq_file.h>
  16#include <linux/writeback.h>
  17#include "jfs_incore.h"
  18#include "jfs_superblock.h"
  19#include "jfs_filsys.h"
  20#include "jfs_metapage.h"
  21#include "jfs_txnmgr.h"
  22#include "jfs_debug.h"
  23
  24#ifdef CONFIG_JFS_STATISTICS
  25static struct {
  26        uint    pagealloc;      /* # of page allocations */
  27        uint    pagefree;       /* # of page frees */
  28        uint    lockwait;       /* # of sleeping lock_metapage() calls */
  29} mpStat;
  30#endif
  31
  32#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
  33#define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
  34
  35static inline void unlock_metapage(struct metapage *mp)
  36{
  37        clear_bit_unlock(META_locked, &mp->flag);
  38        wake_up(&mp->wait);
  39}
  40
  41static inline void __lock_metapage(struct metapage *mp)
  42{
  43        DECLARE_WAITQUEUE(wait, current);
  44        INCREMENT(mpStat.lockwait);
  45        add_wait_queue_exclusive(&mp->wait, &wait);
  46        do {
  47                set_current_state(TASK_UNINTERRUPTIBLE);
  48                if (metapage_locked(mp)) {
  49                        unlock_page(mp->page);
  50                        io_schedule();
  51                        lock_page(mp->page);
  52                }
  53        } while (trylock_metapage(mp));
  54        __set_current_state(TASK_RUNNING);
  55        remove_wait_queue(&mp->wait, &wait);
  56}
  57
  58/*
  59 * Must have mp->page locked
  60 */
  61static inline void lock_metapage(struct metapage *mp)
  62{
  63        if (trylock_metapage(mp))
  64                __lock_metapage(mp);
  65}
  66
  67#define METAPOOL_MIN_PAGES 32
  68static struct kmem_cache *metapage_cache;
  69static mempool_t *metapage_mempool;
  70
  71#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
  72
  73#if MPS_PER_PAGE > 1
  74
  75struct meta_anchor {
  76        int mp_count;
  77        atomic_t io_count;
  78        struct metapage *mp[MPS_PER_PAGE];
  79};
  80#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
  81
  82static inline struct metapage *page_to_mp(struct page *page, int offset)
  83{
  84        if (!PagePrivate(page))
  85                return NULL;
  86        return mp_anchor(page)->mp[offset >> L2PSIZE];
  87}
  88
  89static inline int insert_metapage(struct page *page, struct metapage *mp)
  90{
  91        struct meta_anchor *a;
  92        int index;
  93        int l2mp_blocks;        /* log2 blocks per metapage */
  94
  95        if (PagePrivate(page))
  96                a = mp_anchor(page);
  97        else {
  98                a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
  99                if (!a)
 100                        return -ENOMEM;
 101                set_page_private(page, (unsigned long)a);
 102                SetPagePrivate(page);
 103                kmap(page);
 104        }
 105
 106        if (mp) {
 107                l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
 108                index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
 109                a->mp_count++;
 110                a->mp[index] = mp;
 111        }
 112
 113        return 0;
 114}
 115
 116static inline void remove_metapage(struct page *page, struct metapage *mp)
 117{
 118        struct meta_anchor *a = mp_anchor(page);
 119        int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
 120        int index;
 121
 122        index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
 123
 124        BUG_ON(a->mp[index] != mp);
 125
 126        a->mp[index] = NULL;
 127        if (--a->mp_count == 0) {
 128                kfree(a);
 129                set_page_private(page, 0);
 130                ClearPagePrivate(page);
 131                kunmap(page);
 132        }
 133}
 134
 135static inline void inc_io(struct page *page)
 136{
 137        atomic_inc(&mp_anchor(page)->io_count);
 138}
 139
 140static inline void dec_io(struct page *page, void (*handler) (struct page *))
 141{
 142        if (atomic_dec_and_test(&mp_anchor(page)->io_count))
 143                handler(page);
 144}
 145
 146#else
 147static inline struct metapage *page_to_mp(struct page *page, int offset)
 148{
 149        return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
 150}
 151
 152static inline int insert_metapage(struct page *page, struct metapage *mp)
 153{
 154        if (mp) {
 155                set_page_private(page, (unsigned long)mp);
 156                SetPagePrivate(page);
 157                kmap(page);
 158        }
 159        return 0;
 160}
 161
 162static inline void remove_metapage(struct page *page, struct metapage *mp)
 163{
 164        set_page_private(page, 0);
 165        ClearPagePrivate(page);
 166        kunmap(page);
 167}
 168
 169#define inc_io(page) do {} while(0)
 170#define dec_io(page, handler) handler(page)
 171
 172#endif
 173
 174static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
 175{
 176        struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
 177
 178        if (mp) {
 179                mp->lid = 0;
 180                mp->lsn = 0;
 181                mp->data = NULL;
 182                mp->clsn = 0;
 183                mp->log = NULL;
 184                init_waitqueue_head(&mp->wait);
 185        }
 186        return mp;
 187}
 188
 189static inline void free_metapage(struct metapage *mp)
 190{
 191        mempool_free(mp, metapage_mempool);
 192}
 193
 194int __init metapage_init(void)
 195{
 196        /*
 197         * Allocate the metapage structures
 198         */
 199        metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
 200                                           0, 0, NULL);
 201        if (metapage_cache == NULL)
 202                return -ENOMEM;
 203
 204        metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
 205                                                    metapage_cache);
 206
 207        if (metapage_mempool == NULL) {
 208                kmem_cache_destroy(metapage_cache);
 209                return -ENOMEM;
 210        }
 211
 212        return 0;
 213}
 214
 215void metapage_exit(void)
 216{
 217        mempool_destroy(metapage_mempool);
 218        kmem_cache_destroy(metapage_cache);
 219}
 220
 221static inline void drop_metapage(struct page *page, struct metapage *mp)
 222{
 223        if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
 224            test_bit(META_io, &mp->flag))
 225                return;
 226        remove_metapage(page, mp);
 227        INCREMENT(mpStat.pagefree);
 228        free_metapage(mp);
 229}
 230
 231/*
 232 * Metapage address space operations
 233 */
 234
 235static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
 236                                    int *len)
 237{
 238        int rc = 0;
 239        int xflag;
 240        s64 xaddr;
 241        sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
 242                               inode->i_blkbits;
 243
 244        if (lblock >= file_blocks)
 245                return 0;
 246        if (lblock + *len > file_blocks)
 247                *len = file_blocks - lblock;
 248
 249        if (inode->i_ino) {
 250                rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
 251                if ((rc == 0) && *len)
 252                        lblock = (sector_t)xaddr;
 253                else
 254                        lblock = 0;
 255        } /* else no mapping */
 256
 257        return lblock;
 258}
 259
 260static void last_read_complete(struct page *page)
 261{
 262        if (!PageError(page))
 263                SetPageUptodate(page);
 264        unlock_page(page);
 265}
 266
 267static void metapage_read_end_io(struct bio *bio)
 268{
 269        struct page *page = bio->bi_private;
 270
 271        if (bio->bi_status) {
 272                printk(KERN_ERR "metapage_read_end_io: I/O error\n");
 273                SetPageError(page);
 274        }
 275
 276        dec_io(page, last_read_complete);
 277        bio_put(bio);
 278}
 279
 280static void remove_from_logsync(struct metapage *mp)
 281{
 282        struct jfs_log *log = mp->log;
 283        unsigned long flags;
 284/*
 285 * This can race.  Recheck that log hasn't been set to null, and after
 286 * acquiring logsync lock, recheck lsn
 287 */
 288        if (!log)
 289                return;
 290
 291        LOGSYNC_LOCK(log, flags);
 292        if (mp->lsn) {
 293                mp->log = NULL;
 294                mp->lsn = 0;
 295                mp->clsn = 0;
 296                log->count--;
 297                list_del(&mp->synclist);
 298        }
 299        LOGSYNC_UNLOCK(log, flags);
 300}
 301
 302static void last_write_complete(struct page *page)
 303{
 304        struct metapage *mp;
 305        unsigned int offset;
 306
 307        for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 308                mp = page_to_mp(page, offset);
 309                if (mp && test_bit(META_io, &mp->flag)) {
 310                        if (mp->lsn)
 311                                remove_from_logsync(mp);
 312                        clear_bit(META_io, &mp->flag);
 313                }
 314                /*
 315                 * I'd like to call drop_metapage here, but I don't think it's
 316                 * safe unless I have the page locked
 317                 */
 318        }
 319        end_page_writeback(page);
 320}
 321
 322static void metapage_write_end_io(struct bio *bio)
 323{
 324        struct page *page = bio->bi_private;
 325
 326        BUG_ON(!PagePrivate(page));
 327
 328        if (bio->bi_status) {
 329                printk(KERN_ERR "metapage_write_end_io: I/O error\n");
 330                SetPageError(page);
 331        }
 332        dec_io(page, last_write_complete);
 333        bio_put(bio);
 334}
 335
 336static int metapage_writepage(struct page *page, struct writeback_control *wbc)
 337{
 338        struct bio *bio = NULL;
 339        int block_offset;       /* block offset of mp within page */
 340        struct inode *inode = page->mapping->host;
 341        int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
 342        int len;
 343        int xlen;
 344        struct metapage *mp;
 345        int redirty = 0;
 346        sector_t lblock;
 347        int nr_underway = 0;
 348        sector_t pblock;
 349        sector_t next_block = 0;
 350        sector_t page_start;
 351        unsigned long bio_bytes = 0;
 352        unsigned long bio_offset = 0;
 353        int offset;
 354        int bad_blocks = 0;
 355
 356        page_start = (sector_t)page->index <<
 357                     (PAGE_SHIFT - inode->i_blkbits);
 358        BUG_ON(!PageLocked(page));
 359        BUG_ON(PageWriteback(page));
 360        set_page_writeback(page);
 361
 362        for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 363                mp = page_to_mp(page, offset);
 364
 365                if (!mp || !test_bit(META_dirty, &mp->flag))
 366                        continue;
 367
 368                if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
 369                        redirty = 1;
 370                        /*
 371                         * Make sure this page isn't blocked indefinitely.
 372                         * If the journal isn't undergoing I/O, push it
 373                         */
 374                        if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
 375                                jfs_flush_journal(mp->log, 0);
 376                        continue;
 377                }
 378
 379                clear_bit(META_dirty, &mp->flag);
 380                set_bit(META_io, &mp->flag);
 381                block_offset = offset >> inode->i_blkbits;
 382                lblock = page_start + block_offset;
 383                if (bio) {
 384                        if (xlen && lblock == next_block) {
 385                                /* Contiguous, in memory & on disk */
 386                                len = min(xlen, blocks_per_mp);
 387                                xlen -= len;
 388                                bio_bytes += len << inode->i_blkbits;
 389                                continue;
 390                        }
 391                        /* Not contiguous */
 392                        if (bio_add_page(bio, page, bio_bytes, bio_offset) <
 393                            bio_bytes)
 394                                goto add_failed;
 395                        /*
 396                         * Increment counter before submitting i/o to keep
 397                         * count from hitting zero before we're through
 398                         */
 399                        inc_io(page);
 400                        if (!bio->bi_iter.bi_size)
 401                                goto dump_bio;
 402                        submit_bio(bio);
 403                        nr_underway++;
 404                        bio = NULL;
 405                } else
 406                        inc_io(page);
 407                xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
 408                pblock = metapage_get_blocks(inode, lblock, &xlen);
 409                if (!pblock) {
 410                        printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
 411                        /*
 412                         * We already called inc_io(), but can't cancel it
 413                         * with dec_io() until we're done with the page
 414                         */
 415                        bad_blocks++;
 416                        continue;
 417                }
 418                len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
 419
 420                bio = bio_alloc(GFP_NOFS, 1);
 421                bio_set_dev(bio, inode->i_sb->s_bdev);
 422                bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
 423                bio->bi_end_io = metapage_write_end_io;
 424                bio->bi_private = page;
 425                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 426
 427                /* Don't call bio_add_page yet, we may add to this vec */
 428                bio_offset = offset;
 429                bio_bytes = len << inode->i_blkbits;
 430
 431                xlen -= len;
 432                next_block = lblock + len;
 433        }
 434        if (bio) {
 435                if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
 436                                goto add_failed;
 437                if (!bio->bi_iter.bi_size)
 438                        goto dump_bio;
 439
 440                submit_bio(bio);
 441                nr_underway++;
 442        }
 443        if (redirty)
 444                redirty_page_for_writepage(wbc, page);
 445
 446        unlock_page(page);
 447
 448        if (bad_blocks)
 449                goto err_out;
 450
 451        if (nr_underway == 0)
 452                end_page_writeback(page);
 453
 454        return 0;
 455add_failed:
 456        /* We should never reach here, since we're only adding one vec */
 457        printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
 458        goto skip;
 459dump_bio:
 460        print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
 461                       4, bio, sizeof(*bio), 0);
 462skip:
 463        bio_put(bio);
 464        unlock_page(page);
 465        dec_io(page, last_write_complete);
 466err_out:
 467        while (bad_blocks--)
 468                dec_io(page, last_write_complete);
 469        return -EIO;
 470}
 471
 472static int metapage_readpage(struct file *fp, struct page *page)
 473{
 474        struct inode *inode = page->mapping->host;
 475        struct bio *bio = NULL;
 476        int block_offset;
 477        int blocks_per_page = i_blocks_per_page(inode, page);
 478        sector_t page_start;    /* address of page in fs blocks */
 479        sector_t pblock;
 480        int xlen;
 481        unsigned int len;
 482        int offset;
 483
 484        BUG_ON(!PageLocked(page));
 485        page_start = (sector_t)page->index <<
 486                     (PAGE_SHIFT - inode->i_blkbits);
 487
 488        block_offset = 0;
 489        while (block_offset < blocks_per_page) {
 490                xlen = blocks_per_page - block_offset;
 491                pblock = metapage_get_blocks(inode, page_start + block_offset,
 492                                             &xlen);
 493                if (pblock) {
 494                        if (!PagePrivate(page))
 495                                insert_metapage(page, NULL);
 496                        inc_io(page);
 497                        if (bio)
 498                                submit_bio(bio);
 499
 500                        bio = bio_alloc(GFP_NOFS, 1);
 501                        bio_set_dev(bio, inode->i_sb->s_bdev);
 502                        bio->bi_iter.bi_sector =
 503                                pblock << (inode->i_blkbits - 9);
 504                        bio->bi_end_io = metapage_read_end_io;
 505                        bio->bi_private = page;
 506                        bio_set_op_attrs(bio, REQ_OP_READ, 0);
 507                        len = xlen << inode->i_blkbits;
 508                        offset = block_offset << inode->i_blkbits;
 509                        if (bio_add_page(bio, page, len, offset) < len)
 510                                goto add_failed;
 511                        block_offset += xlen;
 512                } else
 513                        block_offset++;
 514        }
 515        if (bio)
 516                submit_bio(bio);
 517        else
 518                unlock_page(page);
 519
 520        return 0;
 521
 522add_failed:
 523        printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
 524        bio_put(bio);
 525        dec_io(page, last_read_complete);
 526        return -EIO;
 527}
 528
 529static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
 530{
 531        struct metapage *mp;
 532        int ret = 1;
 533        int offset;
 534
 535        for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 536                mp = page_to_mp(page, offset);
 537
 538                if (!mp)
 539                        continue;
 540
 541                jfs_info("metapage_releasepage: mp = 0x%p", mp);
 542                if (mp->count || mp->nohomeok ||
 543                    test_bit(META_dirty, &mp->flag)) {
 544                        jfs_info("count = %ld, nohomeok = %d", mp->count,
 545                                 mp->nohomeok);
 546                        ret = 0;
 547                        continue;
 548                }
 549                if (mp->lsn)
 550                        remove_from_logsync(mp);
 551                remove_metapage(page, mp);
 552                INCREMENT(mpStat.pagefree);
 553                free_metapage(mp);
 554        }
 555        return ret;
 556}
 557
 558static void metapage_invalidatepage(struct page *page, unsigned int offset,
 559                                    unsigned int length)
 560{
 561        BUG_ON(offset || length < PAGE_SIZE);
 562
 563        BUG_ON(PageWriteback(page));
 564
 565        metapage_releasepage(page, 0);
 566}
 567
 568const struct address_space_operations jfs_metapage_aops = {
 569        .readpage       = metapage_readpage,
 570        .writepage      = metapage_writepage,
 571        .releasepage    = metapage_releasepage,
 572        .invalidatepage = metapage_invalidatepage,
 573        .set_page_dirty = __set_page_dirty_nobuffers,
 574};
 575
 576struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
 577                                unsigned int size, int absolute,
 578                                unsigned long new)
 579{
 580        int l2BlocksPerPage;
 581        int l2bsize;
 582        struct address_space *mapping;
 583        struct metapage *mp = NULL;
 584        struct page *page;
 585        unsigned long page_index;
 586        unsigned long page_offset;
 587
 588        jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
 589                 inode->i_ino, lblock, absolute);
 590
 591        l2bsize = inode->i_blkbits;
 592        l2BlocksPerPage = PAGE_SHIFT - l2bsize;
 593        page_index = lblock >> l2BlocksPerPage;
 594        page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
 595        if ((page_offset + size) > PAGE_SIZE) {
 596                jfs_err("MetaData crosses page boundary!!");
 597                jfs_err("lblock = %lx, size  = %d", lblock, size);
 598                dump_stack();
 599                return NULL;
 600        }
 601        if (absolute)
 602                mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
 603        else {
 604                /*
 605                 * If an nfs client tries to read an inode that is larger
 606                 * than any existing inodes, we may try to read past the
 607                 * end of the inode map
 608                 */
 609                if ((lblock << inode->i_blkbits) >= inode->i_size)
 610                        return NULL;
 611                mapping = inode->i_mapping;
 612        }
 613
 614        if (new && (PSIZE == PAGE_SIZE)) {
 615                page = grab_cache_page(mapping, page_index);
 616                if (!page) {
 617                        jfs_err("grab_cache_page failed!");
 618                        return NULL;
 619                }
 620                SetPageUptodate(page);
 621        } else {
 622                page = read_mapping_page(mapping, page_index, NULL);
 623                if (IS_ERR(page) || !PageUptodate(page)) {
 624                        jfs_err("read_mapping_page failed!");
 625                        return NULL;
 626                }
 627                lock_page(page);
 628        }
 629
 630        mp = page_to_mp(page, page_offset);
 631        if (mp) {
 632                if (mp->logical_size != size) {
 633                        jfs_error(inode->i_sb,
 634                                  "get_mp->logical_size != size\n");
 635                        jfs_err("logical_size = %d, size = %d",
 636                                mp->logical_size, size);
 637                        dump_stack();
 638                        goto unlock;
 639                }
 640                mp->count++;
 641                lock_metapage(mp);
 642                if (test_bit(META_discard, &mp->flag)) {
 643                        if (!new) {
 644                                jfs_error(inode->i_sb,
 645                                          "using a discarded metapage\n");
 646                                discard_metapage(mp);
 647                                goto unlock;
 648                        }
 649                        clear_bit(META_discard, &mp->flag);
 650                }
 651        } else {
 652                INCREMENT(mpStat.pagealloc);
 653                mp = alloc_metapage(GFP_NOFS);
 654                if (!mp)
 655                        goto unlock;
 656                mp->page = page;
 657                mp->sb = inode->i_sb;
 658                mp->flag = 0;
 659                mp->xflag = COMMIT_PAGE;
 660                mp->count = 1;
 661                mp->nohomeok = 0;
 662                mp->logical_size = size;
 663                mp->data = page_address(page) + page_offset;
 664                mp->index = lblock;
 665                if (unlikely(insert_metapage(page, mp))) {
 666                        free_metapage(mp);
 667                        goto unlock;
 668                }
 669                lock_metapage(mp);
 670        }
 671
 672        if (new) {
 673                jfs_info("zeroing mp = 0x%p", mp);
 674                memset(mp->data, 0, PSIZE);
 675        }
 676
 677        unlock_page(page);
 678        jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
 679        return mp;
 680
 681unlock:
 682        unlock_page(page);
 683        return NULL;
 684}
 685
 686void grab_metapage(struct metapage * mp)
 687{
 688        jfs_info("grab_metapage: mp = 0x%p", mp);
 689        get_page(mp->page);
 690        lock_page(mp->page);
 691        mp->count++;
 692        lock_metapage(mp);
 693        unlock_page(mp->page);
 694}
 695
 696void force_metapage(struct metapage *mp)
 697{
 698        struct page *page = mp->page;
 699        jfs_info("force_metapage: mp = 0x%p", mp);
 700        set_bit(META_forcewrite, &mp->flag);
 701        clear_bit(META_sync, &mp->flag);
 702        get_page(page);
 703        lock_page(page);
 704        set_page_dirty(page);
 705        if (write_one_page(page))
 706                jfs_error(mp->sb, "write_one_page() failed\n");
 707        clear_bit(META_forcewrite, &mp->flag);
 708        put_page(page);
 709}
 710
 711void hold_metapage(struct metapage *mp)
 712{
 713        lock_page(mp->page);
 714}
 715
 716void put_metapage(struct metapage *mp)
 717{
 718        if (mp->count || mp->nohomeok) {
 719                /* Someone else will release this */
 720                unlock_page(mp->page);
 721                return;
 722        }
 723        get_page(mp->page);
 724        mp->count++;
 725        lock_metapage(mp);
 726        unlock_page(mp->page);
 727        release_metapage(mp);
 728}
 729
 730void release_metapage(struct metapage * mp)
 731{
 732        struct page *page = mp->page;
 733        jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
 734
 735        BUG_ON(!page);
 736
 737        lock_page(page);
 738        unlock_metapage(mp);
 739
 740        assert(mp->count);
 741        if (--mp->count || mp->nohomeok) {
 742                unlock_page(page);
 743                put_page(page);
 744                return;
 745        }
 746
 747        if (test_bit(META_dirty, &mp->flag)) {
 748                set_page_dirty(page);
 749                if (test_bit(META_sync, &mp->flag)) {
 750                        clear_bit(META_sync, &mp->flag);
 751                        if (write_one_page(page))
 752                                jfs_error(mp->sb, "write_one_page() failed\n");
 753                        lock_page(page); /* write_one_page unlocks the page */
 754                }
 755        } else if (mp->lsn)     /* discard_metapage doesn't remove it */
 756                remove_from_logsync(mp);
 757
 758        /* Try to keep metapages from using up too much memory */
 759        drop_metapage(page, mp);
 760
 761        unlock_page(page);
 762        put_page(page);
 763}
 764
 765void __invalidate_metapages(struct inode *ip, s64 addr, int len)
 766{
 767        sector_t lblock;
 768        int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
 769        int BlocksPerPage = 1 << l2BlocksPerPage;
 770        /* All callers are interested in block device's mapping */
 771        struct address_space *mapping =
 772                JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
 773        struct metapage *mp;
 774        struct page *page;
 775        unsigned int offset;
 776
 777        /*
 778         * Mark metapages to discard.  They will eventually be
 779         * released, but should not be written.
 780         */
 781        for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
 782             lblock += BlocksPerPage) {
 783                page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
 784                if (!page)
 785                        continue;
 786                for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 787                        mp = page_to_mp(page, offset);
 788                        if (!mp)
 789                                continue;
 790                        if (mp->index < addr)
 791                                continue;
 792                        if (mp->index >= addr + len)
 793                                break;
 794
 795                        clear_bit(META_dirty, &mp->flag);
 796                        set_bit(META_discard, &mp->flag);
 797                        if (mp->lsn)
 798                                remove_from_logsync(mp);
 799                }
 800                unlock_page(page);
 801                put_page(page);
 802        }
 803}
 804
 805#ifdef CONFIG_JFS_STATISTICS
 806int jfs_mpstat_proc_show(struct seq_file *m, void *v)
 807{
 808        seq_printf(m,
 809                       "JFS Metapage statistics\n"
 810                       "=======================\n"
 811                       "page allocations = %d\n"
 812                       "page frees = %d\n"
 813                       "lock waits = %d\n",
 814                       mpStat.pagealloc,
 815                       mpStat.pagefree,
 816                       mpStat.lockwait);
 817        return 0;
 818}
 819#endif
 820