linux/fs/jfs/jfs_metapage.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *   Copyright (C) International Business Machines Corp., 2000-2005
   4 *   Portions Copyright (C) Christoph Hellwig, 2001-2002
   5 */
   6
   7#include <linux/fs.h>
   8#include <linux/mm.h>
   9#include <linux/module.h>
  10#include <linux/bio.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/buffer_head.h>
  14#include <linux/mempool.h>
  15#include <linux/seq_file.h>
  16#include "jfs_incore.h"
  17#include "jfs_superblock.h"
  18#include "jfs_filsys.h"
  19#include "jfs_metapage.h"
  20#include "jfs_txnmgr.h"
  21#include "jfs_debug.h"
  22
  23#ifdef CONFIG_JFS_STATISTICS
  24static struct {
  25        uint    pagealloc;      /* # of page allocations */
  26        uint    pagefree;       /* # of page frees */
  27        uint    lockwait;       /* # of sleeping lock_metapage() calls */
  28} mpStat;
  29#endif
  30
  31#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
  32#define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
  33
  34static inline void unlock_metapage(struct metapage *mp)
  35{
  36        clear_bit_unlock(META_locked, &mp->flag);
  37        wake_up(&mp->wait);
  38}
  39
  40static inline void __lock_metapage(struct metapage *mp)
  41{
  42        DECLARE_WAITQUEUE(wait, current);
  43        INCREMENT(mpStat.lockwait);
  44        add_wait_queue_exclusive(&mp->wait, &wait);
  45        do {
  46                set_current_state(TASK_UNINTERRUPTIBLE);
  47                if (metapage_locked(mp)) {
  48                        unlock_page(mp->page);
  49                        io_schedule();
  50                        lock_page(mp->page);
  51                }
  52        } while (trylock_metapage(mp));
  53        __set_current_state(TASK_RUNNING);
  54        remove_wait_queue(&mp->wait, &wait);
  55}
  56
  57/*
  58 * Must have mp->page locked
  59 */
  60static inline void lock_metapage(struct metapage *mp)
  61{
  62        if (trylock_metapage(mp))
  63                __lock_metapage(mp);
  64}
  65
  66#define METAPOOL_MIN_PAGES 32
  67static struct kmem_cache *metapage_cache;
  68static mempool_t *metapage_mempool;
  69
  70#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
  71
  72#if MPS_PER_PAGE > 1
  73
  74struct meta_anchor {
  75        int mp_count;
  76        atomic_t io_count;
  77        struct metapage *mp[MPS_PER_PAGE];
  78};
  79#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
  80
  81static inline struct metapage *page_to_mp(struct page *page, int offset)
  82{
  83        if (!PagePrivate(page))
  84                return NULL;
  85        return mp_anchor(page)->mp[offset >> L2PSIZE];
  86}
  87
  88static inline int insert_metapage(struct page *page, struct metapage *mp)
  89{
  90        struct meta_anchor *a;
  91        int index;
  92        int l2mp_blocks;        /* log2 blocks per metapage */
  93
  94        if (PagePrivate(page))
  95                a = mp_anchor(page);
  96        else {
  97                a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
  98                if (!a)
  99                        return -ENOMEM;
 100                set_page_private(page, (unsigned long)a);
 101                SetPagePrivate(page);
 102                kmap(page);
 103        }
 104
 105        if (mp) {
 106                l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
 107                index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
 108                a->mp_count++;
 109                a->mp[index] = mp;
 110        }
 111
 112        return 0;
 113}
 114
 115static inline void remove_metapage(struct page *page, struct metapage *mp)
 116{
 117        struct meta_anchor *a = mp_anchor(page);
 118        int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
 119        int index;
 120
 121        index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
 122
 123        BUG_ON(a->mp[index] != mp);
 124
 125        a->mp[index] = NULL;
 126        if (--a->mp_count == 0) {
 127                kfree(a);
 128                set_page_private(page, 0);
 129                ClearPagePrivate(page);
 130                kunmap(page);
 131        }
 132}
 133
 134static inline void inc_io(struct page *page)
 135{
 136        atomic_inc(&mp_anchor(page)->io_count);
 137}
 138
 139static inline void dec_io(struct page *page, void (*handler) (struct page *))
 140{
 141        if (atomic_dec_and_test(&mp_anchor(page)->io_count))
 142                handler(page);
 143}
 144
 145#else
 146static inline struct metapage *page_to_mp(struct page *page, int offset)
 147{
 148        return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
 149}
 150
 151static inline int insert_metapage(struct page *page, struct metapage *mp)
 152{
 153        if (mp) {
 154                set_page_private(page, (unsigned long)mp);
 155                SetPagePrivate(page);
 156                kmap(page);
 157        }
 158        return 0;
 159}
 160
 161static inline void remove_metapage(struct page *page, struct metapage *mp)
 162{
 163        set_page_private(page, 0);
 164        ClearPagePrivate(page);
 165        kunmap(page);
 166}
 167
 168#define inc_io(page) do {} while(0)
 169#define dec_io(page, handler) handler(page)
 170
 171#endif
 172
 173static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
 174{
 175        struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
 176
 177        if (mp) {
 178                mp->lid = 0;
 179                mp->lsn = 0;
 180                mp->data = NULL;
 181                mp->clsn = 0;
 182                mp->log = NULL;
 183                init_waitqueue_head(&mp->wait);
 184        }
 185        return mp;
 186}
 187
 188static inline void free_metapage(struct metapage *mp)
 189{
 190        mempool_free(mp, metapage_mempool);
 191}
 192
 193int __init metapage_init(void)
 194{
 195        /*
 196         * Allocate the metapage structures
 197         */
 198        metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
 199                                           0, 0, NULL);
 200        if (metapage_cache == NULL)
 201                return -ENOMEM;
 202
 203        metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
 204                                                    metapage_cache);
 205
 206        if (metapage_mempool == NULL) {
 207                kmem_cache_destroy(metapage_cache);
 208                return -ENOMEM;
 209        }
 210
 211        return 0;
 212}
 213
 214void metapage_exit(void)
 215{
 216        mempool_destroy(metapage_mempool);
 217        kmem_cache_destroy(metapage_cache);
 218}
 219
 220static inline void drop_metapage(struct page *page, struct metapage *mp)
 221{
 222        if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
 223            test_bit(META_io, &mp->flag))
 224                return;
 225        remove_metapage(page, mp);
 226        INCREMENT(mpStat.pagefree);
 227        free_metapage(mp);
 228}
 229
 230/*
 231 * Metapage address space operations
 232 */
 233
 234static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
 235                                    int *len)
 236{
 237        int rc = 0;
 238        int xflag;
 239        s64 xaddr;
 240        sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
 241                               inode->i_blkbits;
 242
 243        if (lblock >= file_blocks)
 244                return 0;
 245        if (lblock + *len > file_blocks)
 246                *len = file_blocks - lblock;
 247
 248        if (inode->i_ino) {
 249                rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
 250                if ((rc == 0) && *len)
 251                        lblock = (sector_t)xaddr;
 252                else
 253                        lblock = 0;
 254        } /* else no mapping */
 255
 256        return lblock;
 257}
 258
 259static void last_read_complete(struct page *page)
 260{
 261        if (!PageError(page))
 262                SetPageUptodate(page);
 263        unlock_page(page);
 264}
 265
 266static void metapage_read_end_io(struct bio *bio)
 267{
 268        struct page *page = bio->bi_private;
 269
 270        if (bio->bi_status) {
 271                printk(KERN_ERR "metapage_read_end_io: I/O error\n");
 272                SetPageError(page);
 273        }
 274
 275        dec_io(page, last_read_complete);
 276        bio_put(bio);
 277}
 278
 279static void remove_from_logsync(struct metapage *mp)
 280{
 281        struct jfs_log *log = mp->log;
 282        unsigned long flags;
 283/*
 284 * This can race.  Recheck that log hasn't been set to null, and after
 285 * acquiring logsync lock, recheck lsn
 286 */
 287        if (!log)
 288                return;
 289
 290        LOGSYNC_LOCK(log, flags);
 291        if (mp->lsn) {
 292                mp->log = NULL;
 293                mp->lsn = 0;
 294                mp->clsn = 0;
 295                log->count--;
 296                list_del(&mp->synclist);
 297        }
 298        LOGSYNC_UNLOCK(log, flags);
 299}
 300
 301static void last_write_complete(struct page *page)
 302{
 303        struct metapage *mp;
 304        unsigned int offset;
 305
 306        for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 307                mp = page_to_mp(page, offset);
 308                if (mp && test_bit(META_io, &mp->flag)) {
 309                        if (mp->lsn)
 310                                remove_from_logsync(mp);
 311                        clear_bit(META_io, &mp->flag);
 312                }
 313                /*
 314                 * I'd like to call drop_metapage here, but I don't think it's
 315                 * safe unless I have the page locked
 316                 */
 317        }
 318        end_page_writeback(page);
 319}
 320
 321static void metapage_write_end_io(struct bio *bio)
 322{
 323        struct page *page = bio->bi_private;
 324
 325        BUG_ON(!PagePrivate(page));
 326
 327        if (bio->bi_status) {
 328                printk(KERN_ERR "metapage_write_end_io: I/O error\n");
 329                SetPageError(page);
 330        }
 331        dec_io(page, last_write_complete);
 332        bio_put(bio);
 333}
 334
 335static int metapage_writepage(struct page *page, struct writeback_control *wbc)
 336{
 337        struct bio *bio = NULL;
 338        int block_offset;       /* block offset of mp within page */
 339        struct inode *inode = page->mapping->host;
 340        int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
 341        int len;
 342        int xlen;
 343        struct metapage *mp;
 344        int redirty = 0;
 345        sector_t lblock;
 346        int nr_underway = 0;
 347        sector_t pblock;
 348        sector_t next_block = 0;
 349        sector_t page_start;
 350        unsigned long bio_bytes = 0;
 351        unsigned long bio_offset = 0;
 352        int offset;
 353        int bad_blocks = 0;
 354
 355        page_start = (sector_t)page->index <<
 356                     (PAGE_SHIFT - inode->i_blkbits);
 357        BUG_ON(!PageLocked(page));
 358        BUG_ON(PageWriteback(page));
 359        set_page_writeback(page);
 360
 361        for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 362                mp = page_to_mp(page, offset);
 363
 364                if (!mp || !test_bit(META_dirty, &mp->flag))
 365                        continue;
 366
 367                if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
 368                        redirty = 1;
 369                        /*
 370                         * Make sure this page isn't blocked indefinitely.
 371                         * If the journal isn't undergoing I/O, push it
 372                         */
 373                        if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
 374                                jfs_flush_journal(mp->log, 0);
 375                        continue;
 376                }
 377
 378                clear_bit(META_dirty, &mp->flag);
 379                set_bit(META_io, &mp->flag);
 380                block_offset = offset >> inode->i_blkbits;
 381                lblock = page_start + block_offset;
 382                if (bio) {
 383                        if (xlen && lblock == next_block) {
 384                                /* Contiguous, in memory & on disk */
 385                                len = min(xlen, blocks_per_mp);
 386                                xlen -= len;
 387                                bio_bytes += len << inode->i_blkbits;
 388                                continue;
 389                        }
 390                        /* Not contiguous */
 391                        if (bio_add_page(bio, page, bio_bytes, bio_offset) <
 392                            bio_bytes)
 393                                goto add_failed;
 394                        /*
 395                         * Increment counter before submitting i/o to keep
 396                         * count from hitting zero before we're through
 397                         */
 398                        inc_io(page);
 399                        if (!bio->bi_iter.bi_size)
 400                                goto dump_bio;
 401                        submit_bio(bio);
 402                        nr_underway++;
 403                        bio = NULL;
 404                } else
 405                        inc_io(page);
 406                xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
 407                pblock = metapage_get_blocks(inode, lblock, &xlen);
 408                if (!pblock) {
 409                        printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
 410                        /*
 411                         * We already called inc_io(), but can't cancel it
 412                         * with dec_io() until we're done with the page
 413                         */
 414                        bad_blocks++;
 415                        continue;
 416                }
 417                len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
 418
 419                bio = bio_alloc(GFP_NOFS, 1);
 420                bio_set_dev(bio, inode->i_sb->s_bdev);
 421                bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
 422                bio->bi_end_io = metapage_write_end_io;
 423                bio->bi_private = page;
 424                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 425
 426                /* Don't call bio_add_page yet, we may add to this vec */
 427                bio_offset = offset;
 428                bio_bytes = len << inode->i_blkbits;
 429
 430                xlen -= len;
 431                next_block = lblock + len;
 432        }
 433        if (bio) {
 434                if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
 435                                goto add_failed;
 436                if (!bio->bi_iter.bi_size)
 437                        goto dump_bio;
 438
 439                submit_bio(bio);
 440                nr_underway++;
 441        }
 442        if (redirty)
 443                redirty_page_for_writepage(wbc, page);
 444
 445        unlock_page(page);
 446
 447        if (bad_blocks)
 448                goto err_out;
 449
 450        if (nr_underway == 0)
 451                end_page_writeback(page);
 452
 453        return 0;
 454add_failed:
 455        /* We should never reach here, since we're only adding one vec */
 456        printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
 457        goto skip;
 458dump_bio:
 459        print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
 460                       4, bio, sizeof(*bio), 0);
 461skip:
 462        bio_put(bio);
 463        unlock_page(page);
 464        dec_io(page, last_write_complete);
 465err_out:
 466        while (bad_blocks--)
 467                dec_io(page, last_write_complete);
 468        return -EIO;
 469}
 470
 471static int metapage_readpage(struct file *fp, struct page *page)
 472{
 473        struct inode *inode = page->mapping->host;
 474        struct bio *bio = NULL;
 475        int block_offset;
 476        int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
 477        sector_t page_start;    /* address of page in fs blocks */
 478        sector_t pblock;
 479        int xlen;
 480        unsigned int len;
 481        int offset;
 482
 483        BUG_ON(!PageLocked(page));
 484        page_start = (sector_t)page->index <<
 485                     (PAGE_SHIFT - inode->i_blkbits);
 486
 487        block_offset = 0;
 488        while (block_offset < blocks_per_page) {
 489                xlen = blocks_per_page - block_offset;
 490                pblock = metapage_get_blocks(inode, page_start + block_offset,
 491                                             &xlen);
 492                if (pblock) {
 493                        if (!PagePrivate(page))
 494                                insert_metapage(page, NULL);
 495                        inc_io(page);
 496                        if (bio)
 497                                submit_bio(bio);
 498
 499                        bio = bio_alloc(GFP_NOFS, 1);
 500                        bio_set_dev(bio, inode->i_sb->s_bdev);
 501                        bio->bi_iter.bi_sector =
 502                                pblock << (inode->i_blkbits - 9);
 503                        bio->bi_end_io = metapage_read_end_io;
 504                        bio->bi_private = page;
 505                        bio_set_op_attrs(bio, REQ_OP_READ, 0);
 506                        len = xlen << inode->i_blkbits;
 507                        offset = block_offset << inode->i_blkbits;
 508                        if (bio_add_page(bio, page, len, offset) < len)
 509                                goto add_failed;
 510                        block_offset += xlen;
 511                } else
 512                        block_offset++;
 513        }
 514        if (bio)
 515                submit_bio(bio);
 516        else
 517                unlock_page(page);
 518
 519        return 0;
 520
 521add_failed:
 522        printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
 523        bio_put(bio);
 524        dec_io(page, last_read_complete);
 525        return -EIO;
 526}
 527
 528static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
 529{
 530        struct metapage *mp;
 531        int ret = 1;
 532        int offset;
 533
 534        for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 535                mp = page_to_mp(page, offset);
 536
 537                if (!mp)
 538                        continue;
 539
 540                jfs_info("metapage_releasepage: mp = 0x%p", mp);
 541                if (mp->count || mp->nohomeok ||
 542                    test_bit(META_dirty, &mp->flag)) {
 543                        jfs_info("count = %ld, nohomeok = %d", mp->count,
 544                                 mp->nohomeok);
 545                        ret = 0;
 546                        continue;
 547                }
 548                if (mp->lsn)
 549                        remove_from_logsync(mp);
 550                remove_metapage(page, mp);
 551                INCREMENT(mpStat.pagefree);
 552                free_metapage(mp);
 553        }
 554        return ret;
 555}
 556
 557static void metapage_invalidatepage(struct page *page, unsigned int offset,
 558                                    unsigned int length)
 559{
 560        BUG_ON(offset || length < PAGE_SIZE);
 561
 562        BUG_ON(PageWriteback(page));
 563
 564        metapage_releasepage(page, 0);
 565}
 566
 567const struct address_space_operations jfs_metapage_aops = {
 568        .readpage       = metapage_readpage,
 569        .writepage      = metapage_writepage,
 570        .releasepage    = metapage_releasepage,
 571        .invalidatepage = metapage_invalidatepage,
 572        .set_page_dirty = __set_page_dirty_nobuffers,
 573};
 574
 575struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
 576                                unsigned int size, int absolute,
 577                                unsigned long new)
 578{
 579        int l2BlocksPerPage;
 580        int l2bsize;
 581        struct address_space *mapping;
 582        struct metapage *mp = NULL;
 583        struct page *page;
 584        unsigned long page_index;
 585        unsigned long page_offset;
 586
 587        jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
 588                 inode->i_ino, lblock, absolute);
 589
 590        l2bsize = inode->i_blkbits;
 591        l2BlocksPerPage = PAGE_SHIFT - l2bsize;
 592        page_index = lblock >> l2BlocksPerPage;
 593        page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
 594        if ((page_offset + size) > PAGE_SIZE) {
 595                jfs_err("MetaData crosses page boundary!!");
 596                jfs_err("lblock = %lx, size  = %d", lblock, size);
 597                dump_stack();
 598                return NULL;
 599        }
 600        if (absolute)
 601                mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
 602        else {
 603                /*
 604                 * If an nfs client tries to read an inode that is larger
 605                 * than any existing inodes, we may try to read past the
 606                 * end of the inode map
 607                 */
 608                if ((lblock << inode->i_blkbits) >= inode->i_size)
 609                        return NULL;
 610                mapping = inode->i_mapping;
 611        }
 612
 613        if (new && (PSIZE == PAGE_SIZE)) {
 614                page = grab_cache_page(mapping, page_index);
 615                if (!page) {
 616                        jfs_err("grab_cache_page failed!");
 617                        return NULL;
 618                }
 619                SetPageUptodate(page);
 620        } else {
 621                page = read_mapping_page(mapping, page_index, NULL);
 622                if (IS_ERR(page) || !PageUptodate(page)) {
 623                        jfs_err("read_mapping_page failed!");
 624                        return NULL;
 625                }
 626                lock_page(page);
 627        }
 628
 629        mp = page_to_mp(page, page_offset);
 630        if (mp) {
 631                if (mp->logical_size != size) {
 632                        jfs_error(inode->i_sb,
 633                                  "get_mp->logical_size != size\n");
 634                        jfs_err("logical_size = %d, size = %d",
 635                                mp->logical_size, size);
 636                        dump_stack();
 637                        goto unlock;
 638                }
 639                mp->count++;
 640                lock_metapage(mp);
 641                if (test_bit(META_discard, &mp->flag)) {
 642                        if (!new) {
 643                                jfs_error(inode->i_sb,
 644                                          "using a discarded metapage\n");
 645                                discard_metapage(mp);
 646                                goto unlock;
 647                        }
 648                        clear_bit(META_discard, &mp->flag);
 649                }
 650        } else {
 651                INCREMENT(mpStat.pagealloc);
 652                mp = alloc_metapage(GFP_NOFS);
 653                if (!mp)
 654                        goto unlock;
 655                mp->page = page;
 656                mp->sb = inode->i_sb;
 657                mp->flag = 0;
 658                mp->xflag = COMMIT_PAGE;
 659                mp->count = 1;
 660                mp->nohomeok = 0;
 661                mp->logical_size = size;
 662                mp->data = page_address(page) + page_offset;
 663                mp->index = lblock;
 664                if (unlikely(insert_metapage(page, mp))) {
 665                        free_metapage(mp);
 666                        goto unlock;
 667                }
 668                lock_metapage(mp);
 669        }
 670
 671        if (new) {
 672                jfs_info("zeroing mp = 0x%p", mp);
 673                memset(mp->data, 0, PSIZE);
 674        }
 675
 676        unlock_page(page);
 677        jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
 678        return mp;
 679
 680unlock:
 681        unlock_page(page);
 682        return NULL;
 683}
 684
 685void grab_metapage(struct metapage * mp)
 686{
 687        jfs_info("grab_metapage: mp = 0x%p", mp);
 688        get_page(mp->page);
 689        lock_page(mp->page);
 690        mp->count++;
 691        lock_metapage(mp);
 692        unlock_page(mp->page);
 693}
 694
 695void force_metapage(struct metapage *mp)
 696{
 697        struct page *page = mp->page;
 698        jfs_info("force_metapage: mp = 0x%p", mp);
 699        set_bit(META_forcewrite, &mp->flag);
 700        clear_bit(META_sync, &mp->flag);
 701        get_page(page);
 702        lock_page(page);
 703        set_page_dirty(page);
 704        if (write_one_page(page))
 705                jfs_error(mp->sb, "write_one_page() failed\n");
 706        clear_bit(META_forcewrite, &mp->flag);
 707        put_page(page);
 708}
 709
 710void hold_metapage(struct metapage *mp)
 711{
 712        lock_page(mp->page);
 713}
 714
 715void put_metapage(struct metapage *mp)
 716{
 717        if (mp->count || mp->nohomeok) {
 718                /* Someone else will release this */
 719                unlock_page(mp->page);
 720                return;
 721        }
 722        get_page(mp->page);
 723        mp->count++;
 724        lock_metapage(mp);
 725        unlock_page(mp->page);
 726        release_metapage(mp);
 727}
 728
 729void release_metapage(struct metapage * mp)
 730{
 731        struct page *page = mp->page;
 732        jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
 733
 734        BUG_ON(!page);
 735
 736        lock_page(page);
 737        unlock_metapage(mp);
 738
 739        assert(mp->count);
 740        if (--mp->count || mp->nohomeok) {
 741                unlock_page(page);
 742                put_page(page);
 743                return;
 744        }
 745
 746        if (test_bit(META_dirty, &mp->flag)) {
 747                set_page_dirty(page);
 748                if (test_bit(META_sync, &mp->flag)) {
 749                        clear_bit(META_sync, &mp->flag);
 750                        if (write_one_page(page))
 751                                jfs_error(mp->sb, "write_one_page() failed\n");
 752                        lock_page(page); /* write_one_page unlocks the page */
 753                }
 754        } else if (mp->lsn)     /* discard_metapage doesn't remove it */
 755                remove_from_logsync(mp);
 756
 757        /* Try to keep metapages from using up too much memory */
 758        drop_metapage(page, mp);
 759
 760        unlock_page(page);
 761        put_page(page);
 762}
 763
 764void __invalidate_metapages(struct inode *ip, s64 addr, int len)
 765{
 766        sector_t lblock;
 767        int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
 768        int BlocksPerPage = 1 << l2BlocksPerPage;
 769        /* All callers are interested in block device's mapping */
 770        struct address_space *mapping =
 771                JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
 772        struct metapage *mp;
 773        struct page *page;
 774        unsigned int offset;
 775
 776        /*
 777         * Mark metapages to discard.  They will eventually be
 778         * released, but should not be written.
 779         */
 780        for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
 781             lblock += BlocksPerPage) {
 782                page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
 783                if (!page)
 784                        continue;
 785                for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 786                        mp = page_to_mp(page, offset);
 787                        if (!mp)
 788                                continue;
 789                        if (mp->index < addr)
 790                                continue;
 791                        if (mp->index >= addr + len)
 792                                break;
 793
 794                        clear_bit(META_dirty, &mp->flag);
 795                        set_bit(META_discard, &mp->flag);
 796                        if (mp->lsn)
 797                                remove_from_logsync(mp);
 798                }
 799                unlock_page(page);
 800                put_page(page);
 801        }
 802}
 803
 804#ifdef CONFIG_JFS_STATISTICS
 805int jfs_mpstat_proc_show(struct seq_file *m, void *v)
 806{
 807        seq_printf(m,
 808                       "JFS Metapage statistics\n"
 809                       "=======================\n"
 810                       "page allocations = %d\n"
 811                       "page frees = %d\n"
 812                       "lock waits = %d\n",
 813                       mpStat.pagealloc,
 814                       mpStat.pagefree,
 815                       mpStat.lockwait);
 816        return 0;
 817}
 818#endif
 819