linux/fs/jfs/jfs_metapage.c
<<
>>
Prefs
   1/*
   2 *   Copyright (C) International Business Machines Corp., 2000-2005
   3 *   Portions Copyright (C) Christoph Hellwig, 2001-2002
   4 *
   5 *   This program is free software;  you can redistribute it and/or modify
   6 *   it under the terms of the GNU General Public License as published by
   7 *   the Free Software Foundation; either version 2 of the License, or
   8 *   (at your option) any later version.
   9 *
  10 *   This program is distributed in the hope that it will be useful,
  11 *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
  12 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  13 *   the GNU General Public License for more details.
  14 *
  15 *   You should have received a copy of the GNU General Public License
  16 *   along with this program;  if not, write to the Free Software
  17 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18 */
  19
  20#include <linux/fs.h>
  21#include <linux/mm.h>
  22#include <linux/module.h>
  23#include <linux/bio.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/buffer_head.h>
  27#include <linux/mempool.h>
  28#include <linux/seq_file.h>
  29#include "jfs_incore.h"
  30#include "jfs_superblock.h"
  31#include "jfs_filsys.h"
  32#include "jfs_metapage.h"
  33#include "jfs_txnmgr.h"
  34#include "jfs_debug.h"
  35
  36#ifdef CONFIG_JFS_STATISTICS
  37static struct {
  38        uint    pagealloc;      /* # of page allocations */
  39        uint    pagefree;       /* # of page frees */
  40        uint    lockwait;       /* # of sleeping lock_metapage() calls */
  41} mpStat;
  42#endif
  43
  44#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
  45#define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
  46
  47static inline void unlock_metapage(struct metapage *mp)
  48{
  49        clear_bit_unlock(META_locked, &mp->flag);
  50        wake_up(&mp->wait);
  51}
  52
  53static inline void __lock_metapage(struct metapage *mp)
  54{
  55        DECLARE_WAITQUEUE(wait, current);
  56        INCREMENT(mpStat.lockwait);
  57        add_wait_queue_exclusive(&mp->wait, &wait);
  58        do {
  59                set_current_state(TASK_UNINTERRUPTIBLE);
  60                if (metapage_locked(mp)) {
  61                        unlock_page(mp->page);
  62                        io_schedule();
  63                        lock_page(mp->page);
  64                }
  65        } while (trylock_metapage(mp));
  66        __set_current_state(TASK_RUNNING);
  67        remove_wait_queue(&mp->wait, &wait);
  68}
  69
  70/*
  71 * Must have mp->page locked
  72 */
  73static inline void lock_metapage(struct metapage *mp)
  74{
  75        if (trylock_metapage(mp))
  76                __lock_metapage(mp);
  77}
  78
  79#define METAPOOL_MIN_PAGES 32
  80static struct kmem_cache *metapage_cache;
  81static mempool_t *metapage_mempool;
  82
  83#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
  84
  85#if MPS_PER_PAGE > 1
  86
  87struct meta_anchor {
  88        int mp_count;
  89        atomic_t io_count;
  90        struct metapage *mp[MPS_PER_PAGE];
  91};
  92#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
  93
  94static inline struct metapage *page_to_mp(struct page *page, int offset)
  95{
  96        if (!PagePrivate(page))
  97                return NULL;
  98        return mp_anchor(page)->mp[offset >> L2PSIZE];
  99}
 100
 101static inline int insert_metapage(struct page *page, struct metapage *mp)
 102{
 103        struct meta_anchor *a;
 104        int index;
 105        int l2mp_blocks;        /* log2 blocks per metapage */
 106
 107        if (PagePrivate(page))
 108                a = mp_anchor(page);
 109        else {
 110                a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
 111                if (!a)
 112                        return -ENOMEM;
 113                set_page_private(page, (unsigned long)a);
 114                SetPagePrivate(page);
 115                kmap(page);
 116        }
 117
 118        if (mp) {
 119                l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
 120                index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
 121                a->mp_count++;
 122                a->mp[index] = mp;
 123        }
 124
 125        return 0;
 126}
 127
 128static inline void remove_metapage(struct page *page, struct metapage *mp)
 129{
 130        struct meta_anchor *a = mp_anchor(page);
 131        int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
 132        int index;
 133
 134        index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
 135
 136        BUG_ON(a->mp[index] != mp);
 137
 138        a->mp[index] = NULL;
 139        if (--a->mp_count == 0) {
 140                kfree(a);
 141                set_page_private(page, 0);
 142                ClearPagePrivate(page);
 143                kunmap(page);
 144        }
 145}
 146
 147static inline void inc_io(struct page *page)
 148{
 149        atomic_inc(&mp_anchor(page)->io_count);
 150}
 151
 152static inline void dec_io(struct page *page, void (*handler) (struct page *))
 153{
 154        if (atomic_dec_and_test(&mp_anchor(page)->io_count))
 155                handler(page);
 156}
 157
 158#else
 159static inline struct metapage *page_to_mp(struct page *page, int offset)
 160{
 161        return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
 162}
 163
 164static inline int insert_metapage(struct page *page, struct metapage *mp)
 165{
 166        if (mp) {
 167                set_page_private(page, (unsigned long)mp);
 168                SetPagePrivate(page);
 169                kmap(page);
 170        }
 171        return 0;
 172}
 173
 174static inline void remove_metapage(struct page *page, struct metapage *mp)
 175{
 176        set_page_private(page, 0);
 177        ClearPagePrivate(page);
 178        kunmap(page);
 179}
 180
 181#define inc_io(page) do {} while(0)
 182#define dec_io(page, handler) handler(page)
 183
 184#endif
 185
 186static void init_once(void *foo)
 187{
 188        struct metapage *mp = (struct metapage *)foo;
 189
 190        mp->lid = 0;
 191        mp->lsn = 0;
 192        mp->flag = 0;
 193        mp->data = NULL;
 194        mp->clsn = 0;
 195        mp->log = NULL;
 196        set_bit(META_free, &mp->flag);
 197        init_waitqueue_head(&mp->wait);
 198}
 199
 200static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
 201{
 202        return mempool_alloc(metapage_mempool, gfp_mask);
 203}
 204
 205static inline void free_metapage(struct metapage *mp)
 206{
 207        mp->flag = 0;
 208        set_bit(META_free, &mp->flag);
 209
 210        mempool_free(mp, metapage_mempool);
 211}
 212
 213int __init metapage_init(void)
 214{
 215        /*
 216         * Allocate the metapage structures
 217         */
 218        metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
 219                                           0, 0, init_once);
 220        if (metapage_cache == NULL)
 221                return -ENOMEM;
 222
 223        metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
 224                                                    metapage_cache);
 225
 226        if (metapage_mempool == NULL) {
 227                kmem_cache_destroy(metapage_cache);
 228                return -ENOMEM;
 229        }
 230
 231        return 0;
 232}
 233
 234void metapage_exit(void)
 235{
 236        mempool_destroy(metapage_mempool);
 237        kmem_cache_destroy(metapage_cache);
 238}
 239
 240static inline void drop_metapage(struct page *page, struct metapage *mp)
 241{
 242        if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
 243            test_bit(META_io, &mp->flag))
 244                return;
 245        remove_metapage(page, mp);
 246        INCREMENT(mpStat.pagefree);
 247        free_metapage(mp);
 248}
 249
 250/*
 251 * Metapage address space operations
 252 */
 253
 254static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
 255                                    int *len)
 256{
 257        int rc = 0;
 258        int xflag;
 259        s64 xaddr;
 260        sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
 261                               inode->i_blkbits;
 262
 263        if (lblock >= file_blocks)
 264                return 0;
 265        if (lblock + *len > file_blocks)
 266                *len = file_blocks - lblock;
 267
 268        if (inode->i_ino) {
 269                rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
 270                if ((rc == 0) && *len)
 271                        lblock = (sector_t)xaddr;
 272                else
 273                        lblock = 0;
 274        } /* else no mapping */
 275
 276        return lblock;
 277}
 278
 279static void last_read_complete(struct page *page)
 280{
 281        if (!PageError(page))
 282                SetPageUptodate(page);
 283        unlock_page(page);
 284}
 285
 286static void metapage_read_end_io(struct bio *bio, int err)
 287{
 288        struct page *page = bio->bi_private;
 289
 290        if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
 291                printk(KERN_ERR "metapage_read_end_io: I/O error\n");
 292                SetPageError(page);
 293        }
 294
 295        dec_io(page, last_read_complete);
 296        bio_put(bio);
 297}
 298
 299static void remove_from_logsync(struct metapage *mp)
 300{
 301        struct jfs_log *log = mp->log;
 302        unsigned long flags;
 303/*
 304 * This can race.  Recheck that log hasn't been set to null, and after
 305 * acquiring logsync lock, recheck lsn
 306 */
 307        if (!log)
 308                return;
 309
 310        LOGSYNC_LOCK(log, flags);
 311        if (mp->lsn) {
 312                mp->log = NULL;
 313                mp->lsn = 0;
 314                mp->clsn = 0;
 315                log->count--;
 316                list_del(&mp->synclist);
 317        }
 318        LOGSYNC_UNLOCK(log, flags);
 319}
 320
 321static void last_write_complete(struct page *page)
 322{
 323        struct metapage *mp;
 324        unsigned int offset;
 325
 326        for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
 327                mp = page_to_mp(page, offset);
 328                if (mp && test_bit(META_io, &mp->flag)) {
 329                        if (mp->lsn)
 330                                remove_from_logsync(mp);
 331                        clear_bit(META_io, &mp->flag);
 332                }
 333                /*
 334                 * I'd like to call drop_metapage here, but I don't think it's
 335                 * safe unless I have the page locked
 336                 */
 337        }
 338        end_page_writeback(page);
 339}
 340
 341static void metapage_write_end_io(struct bio *bio, int err)
 342{
 343        struct page *page = bio->bi_private;
 344
 345        BUG_ON(!PagePrivate(page));
 346
 347        if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
 348                printk(KERN_ERR "metapage_write_end_io: I/O error\n");
 349                SetPageError(page);
 350        }
 351        dec_io(page, last_write_complete);
 352        bio_put(bio);
 353}
 354
 355static int metapage_writepage(struct page *page, struct writeback_control *wbc)
 356{
 357        struct bio *bio = NULL;
 358        int block_offset;       /* block offset of mp within page */
 359        struct inode *inode = page->mapping->host;
 360        int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
 361        int len;
 362        int xlen;
 363        struct metapage *mp;
 364        int redirty = 0;
 365        sector_t lblock;
 366        int nr_underway = 0;
 367        sector_t pblock;
 368        sector_t next_block = 0;
 369        sector_t page_start;
 370        unsigned long bio_bytes = 0;
 371        unsigned long bio_offset = 0;
 372        int offset;
 373        int bad_blocks = 0;
 374
 375        page_start = (sector_t)page->index <<
 376                     (PAGE_CACHE_SHIFT - inode->i_blkbits);
 377        BUG_ON(!PageLocked(page));
 378        BUG_ON(PageWriteback(page));
 379        set_page_writeback(page);
 380
 381        for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
 382                mp = page_to_mp(page, offset);
 383
 384                if (!mp || !test_bit(META_dirty, &mp->flag))
 385                        continue;
 386
 387                if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
 388                        redirty = 1;
 389                        /*
 390                         * Make sure this page isn't blocked indefinitely.
 391                         * If the journal isn't undergoing I/O, push it
 392                         */
 393                        if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
 394                                jfs_flush_journal(mp->log, 0);
 395                        continue;
 396                }
 397
 398                clear_bit(META_dirty, &mp->flag);
 399                set_bit(META_io, &mp->flag);
 400                block_offset = offset >> inode->i_blkbits;
 401                lblock = page_start + block_offset;
 402                if (bio) {
 403                        if (xlen && lblock == next_block) {
 404                                /* Contiguous, in memory & on disk */
 405                                len = min(xlen, blocks_per_mp);
 406                                xlen -= len;
 407                                bio_bytes += len << inode->i_blkbits;
 408                                continue;
 409                        }
 410                        /* Not contiguous */
 411                        if (bio_add_page(bio, page, bio_bytes, bio_offset) <
 412                            bio_bytes)
 413                                goto add_failed;
 414                        /*
 415                         * Increment counter before submitting i/o to keep
 416                         * count from hitting zero before we're through
 417                         */
 418                        inc_io(page);
 419                        if (!bio->bi_iter.bi_size)
 420                                goto dump_bio;
 421                        submit_bio(WRITE, bio);
 422                        nr_underway++;
 423                        bio = NULL;
 424                } else
 425                        inc_io(page);
 426                xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
 427                pblock = metapage_get_blocks(inode, lblock, &xlen);
 428                if (!pblock) {
 429                        printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
 430                        /*
 431                         * We already called inc_io(), but can't cancel it
 432                         * with dec_io() until we're done with the page
 433                         */
 434                        bad_blocks++;
 435                        continue;
 436                }
 437                len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
 438
 439                bio = bio_alloc(GFP_NOFS, 1);
 440                bio->bi_bdev = inode->i_sb->s_bdev;
 441                bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
 442                bio->bi_end_io = metapage_write_end_io;
 443                bio->bi_private = page;
 444
 445                /* Don't call bio_add_page yet, we may add to this vec */
 446                bio_offset = offset;
 447                bio_bytes = len << inode->i_blkbits;
 448
 449                xlen -= len;
 450                next_block = lblock + len;
 451        }
 452        if (bio) {
 453                if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
 454                                goto add_failed;
 455                if (!bio->bi_iter.bi_size)
 456                        goto dump_bio;
 457
 458                submit_bio(WRITE, bio);
 459                nr_underway++;
 460        }
 461        if (redirty)
 462                redirty_page_for_writepage(wbc, page);
 463
 464        unlock_page(page);
 465
 466        if (bad_blocks)
 467                goto err_out;
 468
 469        if (nr_underway == 0)
 470                end_page_writeback(page);
 471
 472        return 0;
 473add_failed:
 474        /* We should never reach here, since we're only adding one vec */
 475        printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
 476        goto skip;
 477dump_bio:
 478        print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
 479                       4, bio, sizeof(*bio), 0);
 480skip:
 481        bio_put(bio);
 482        unlock_page(page);
 483        dec_io(page, last_write_complete);
 484err_out:
 485        while (bad_blocks--)
 486                dec_io(page, last_write_complete);
 487        return -EIO;
 488}
 489
 490static int metapage_readpage(struct file *fp, struct page *page)
 491{
 492        struct inode *inode = page->mapping->host;
 493        struct bio *bio = NULL;
 494        int block_offset;
 495        int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
 496        sector_t page_start;    /* address of page in fs blocks */
 497        sector_t pblock;
 498        int xlen;
 499        unsigned int len;
 500        int offset;
 501
 502        BUG_ON(!PageLocked(page));
 503        page_start = (sector_t)page->index <<
 504                     (PAGE_CACHE_SHIFT - inode->i_blkbits);
 505
 506        block_offset = 0;
 507        while (block_offset < blocks_per_page) {
 508                xlen = blocks_per_page - block_offset;
 509                pblock = metapage_get_blocks(inode, page_start + block_offset,
 510                                             &xlen);
 511                if (pblock) {
 512                        if (!PagePrivate(page))
 513                                insert_metapage(page, NULL);
 514                        inc_io(page);
 515                        if (bio)
 516                                submit_bio(READ, bio);
 517
 518                        bio = bio_alloc(GFP_NOFS, 1);
 519                        bio->bi_bdev = inode->i_sb->s_bdev;
 520                        bio->bi_iter.bi_sector =
 521                                pblock << (inode->i_blkbits - 9);
 522                        bio->bi_end_io = metapage_read_end_io;
 523                        bio->bi_private = page;
 524                        len = xlen << inode->i_blkbits;
 525                        offset = block_offset << inode->i_blkbits;
 526                        if (bio_add_page(bio, page, len, offset) < len)
 527                                goto add_failed;
 528                        block_offset += xlen;
 529                } else
 530                        block_offset++;
 531        }
 532        if (bio)
 533                submit_bio(READ, bio);
 534        else
 535                unlock_page(page);
 536
 537        return 0;
 538
 539add_failed:
 540        printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
 541        bio_put(bio);
 542        dec_io(page, last_read_complete);
 543        return -EIO;
 544}
 545
 546static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
 547{
 548        struct metapage *mp;
 549        int ret = 1;
 550        int offset;
 551
 552        for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
 553                mp = page_to_mp(page, offset);
 554
 555                if (!mp)
 556                        continue;
 557
 558                jfs_info("metapage_releasepage: mp = 0x%p", mp);
 559                if (mp->count || mp->nohomeok ||
 560                    test_bit(META_dirty, &mp->flag)) {
 561                        jfs_info("count = %ld, nohomeok = %d", mp->count,
 562                                 mp->nohomeok);
 563                        ret = 0;
 564                        continue;
 565                }
 566                if (mp->lsn)
 567                        remove_from_logsync(mp);
 568                remove_metapage(page, mp);
 569                INCREMENT(mpStat.pagefree);
 570                free_metapage(mp);
 571        }
 572        return ret;
 573}
 574
 575static void metapage_invalidatepage(struct page *page, unsigned int offset,
 576                                    unsigned int length)
 577{
 578        BUG_ON(offset || length < PAGE_CACHE_SIZE);
 579
 580        BUG_ON(PageWriteback(page));
 581
 582        metapage_releasepage(page, 0);
 583}
 584
 585const struct address_space_operations jfs_metapage_aops = {
 586        .readpage       = metapage_readpage,
 587        .writepage      = metapage_writepage,
 588        .releasepage    = metapage_releasepage,
 589        .invalidatepage = metapage_invalidatepage,
 590        .set_page_dirty = __set_page_dirty_nobuffers,
 591};
 592
 593struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
 594                                unsigned int size, int absolute,
 595                                unsigned long new)
 596{
 597        int l2BlocksPerPage;
 598        int l2bsize;
 599        struct address_space *mapping;
 600        struct metapage *mp = NULL;
 601        struct page *page;
 602        unsigned long page_index;
 603        unsigned long page_offset;
 604
 605        jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
 606                 inode->i_ino, lblock, absolute);
 607
 608        l2bsize = inode->i_blkbits;
 609        l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
 610        page_index = lblock >> l2BlocksPerPage;
 611        page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
 612        if ((page_offset + size) > PAGE_CACHE_SIZE) {
 613                jfs_err("MetaData crosses page boundary!!");
 614                jfs_err("lblock = %lx, size  = %d", lblock, size);
 615                dump_stack();
 616                return NULL;
 617        }
 618        if (absolute)
 619                mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
 620        else {
 621                /*
 622                 * If an nfs client tries to read an inode that is larger
 623                 * than any existing inodes, we may try to read past the
 624                 * end of the inode map
 625                 */
 626                if ((lblock << inode->i_blkbits) >= inode->i_size)
 627                        return NULL;
 628                mapping = inode->i_mapping;
 629        }
 630
 631        if (new && (PSIZE == PAGE_CACHE_SIZE)) {
 632                page = grab_cache_page(mapping, page_index);
 633                if (!page) {
 634                        jfs_err("grab_cache_page failed!");
 635                        return NULL;
 636                }
 637                SetPageUptodate(page);
 638        } else {
 639                page = read_mapping_page(mapping, page_index, NULL);
 640                if (IS_ERR(page) || !PageUptodate(page)) {
 641                        jfs_err("read_mapping_page failed!");
 642                        return NULL;
 643                }
 644                lock_page(page);
 645        }
 646
 647        mp = page_to_mp(page, page_offset);
 648        if (mp) {
 649                if (mp->logical_size != size) {
 650                        jfs_error(inode->i_sb,
 651                                  "get_mp->logical_size != size\n");
 652                        jfs_err("logical_size = %d, size = %d",
 653                                mp->logical_size, size);
 654                        dump_stack();
 655                        goto unlock;
 656                }
 657                mp->count++;
 658                lock_metapage(mp);
 659                if (test_bit(META_discard, &mp->flag)) {
 660                        if (!new) {
 661                                jfs_error(inode->i_sb,
 662                                          "using a discarded metapage\n");
 663                                discard_metapage(mp);
 664                                goto unlock;
 665                        }
 666                        clear_bit(META_discard, &mp->flag);
 667                }
 668        } else {
 669                INCREMENT(mpStat.pagealloc);
 670                mp = alloc_metapage(GFP_NOFS);
 671                mp->page = page;
 672                mp->flag = 0;
 673                mp->xflag = COMMIT_PAGE;
 674                mp->count = 1;
 675                mp->nohomeok = 0;
 676                mp->logical_size = size;
 677                mp->data = page_address(page) + page_offset;
 678                mp->index = lblock;
 679                if (unlikely(insert_metapage(page, mp))) {
 680                        free_metapage(mp);
 681                        goto unlock;
 682                }
 683                lock_metapage(mp);
 684        }
 685
 686        if (new) {
 687                jfs_info("zeroing mp = 0x%p", mp);
 688                memset(mp->data, 0, PSIZE);
 689        }
 690
 691        unlock_page(page);
 692        jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
 693        return mp;
 694
 695unlock:
 696        unlock_page(page);
 697        return NULL;
 698}
 699
 700void grab_metapage(struct metapage * mp)
 701{
 702        jfs_info("grab_metapage: mp = 0x%p", mp);
 703        page_cache_get(mp->page);
 704        lock_page(mp->page);
 705        mp->count++;
 706        lock_metapage(mp);
 707        unlock_page(mp->page);
 708}
 709
 710void force_metapage(struct metapage *mp)
 711{
 712        struct page *page = mp->page;
 713        jfs_info("force_metapage: mp = 0x%p", mp);
 714        set_bit(META_forcewrite, &mp->flag);
 715        clear_bit(META_sync, &mp->flag);
 716        page_cache_get(page);
 717        lock_page(page);
 718        set_page_dirty(page);
 719        write_one_page(page, 1);
 720        clear_bit(META_forcewrite, &mp->flag);
 721        page_cache_release(page);
 722}
 723
 724void hold_metapage(struct metapage *mp)
 725{
 726        lock_page(mp->page);
 727}
 728
 729void put_metapage(struct metapage *mp)
 730{
 731        if (mp->count || mp->nohomeok) {
 732                /* Someone else will release this */
 733                unlock_page(mp->page);
 734                return;
 735        }
 736        page_cache_get(mp->page);
 737        mp->count++;
 738        lock_metapage(mp);
 739        unlock_page(mp->page);
 740        release_metapage(mp);
 741}
 742
 743void release_metapage(struct metapage * mp)
 744{
 745        struct page *page = mp->page;
 746        jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
 747
 748        BUG_ON(!page);
 749
 750        lock_page(page);
 751        unlock_metapage(mp);
 752
 753        assert(mp->count);
 754        if (--mp->count || mp->nohomeok) {
 755                unlock_page(page);
 756                page_cache_release(page);
 757                return;
 758        }
 759
 760        if (test_bit(META_dirty, &mp->flag)) {
 761                set_page_dirty(page);
 762                if (test_bit(META_sync, &mp->flag)) {
 763                        clear_bit(META_sync, &mp->flag);
 764                        write_one_page(page, 1);
 765                        lock_page(page); /* write_one_page unlocks the page */
 766                }
 767        } else if (mp->lsn)     /* discard_metapage doesn't remove it */
 768                remove_from_logsync(mp);
 769
 770        /* Try to keep metapages from using up too much memory */
 771        drop_metapage(page, mp);
 772
 773        unlock_page(page);
 774        page_cache_release(page);
 775}
 776
 777void __invalidate_metapages(struct inode *ip, s64 addr, int len)
 778{
 779        sector_t lblock;
 780        int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
 781        int BlocksPerPage = 1 << l2BlocksPerPage;
 782        /* All callers are interested in block device's mapping */
 783        struct address_space *mapping =
 784                JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
 785        struct metapage *mp;
 786        struct page *page;
 787        unsigned int offset;
 788
 789        /*
 790         * Mark metapages to discard.  They will eventually be
 791         * released, but should not be written.
 792         */
 793        for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
 794             lblock += BlocksPerPage) {
 795                page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
 796                if (!page)
 797                        continue;
 798                for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
 799                        mp = page_to_mp(page, offset);
 800                        if (!mp)
 801                                continue;
 802                        if (mp->index < addr)
 803                                continue;
 804                        if (mp->index >= addr + len)
 805                                break;
 806
 807                        clear_bit(META_dirty, &mp->flag);
 808                        set_bit(META_discard, &mp->flag);
 809                        if (mp->lsn)
 810                                remove_from_logsync(mp);
 811                }
 812                unlock_page(page);
 813                page_cache_release(page);
 814        }
 815}
 816
 817#ifdef CONFIG_JFS_STATISTICS
 818static int jfs_mpstat_proc_show(struct seq_file *m, void *v)
 819{
 820        seq_printf(m,
 821                       "JFS Metapage statistics\n"
 822                       "=======================\n"
 823                       "page allocations = %d\n"
 824                       "page frees = %d\n"
 825                       "lock waits = %d\n",
 826                       mpStat.pagealloc,
 827                       mpStat.pagefree,
 828                       mpStat.lockwait);
 829        return 0;
 830}
 831
 832static int jfs_mpstat_proc_open(struct inode *inode, struct file *file)
 833{
 834        return single_open(file, jfs_mpstat_proc_show, NULL);
 835}
 836
 837const struct file_operations jfs_mpstat_proc_fops = {
 838        .owner          = THIS_MODULE,
 839        .open           = jfs_mpstat_proc_open,
 840        .read           = seq_read,
 841        .llseek         = seq_lseek,
 842        .release        = single_release,
 843};
 844#endif
 845