linux/fs/nilfs2/btnode.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * btnode.c - NILFS B-tree node cache
   4 *
   5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   6 *
   7 * Originally written by Seiji Kihara.
   8 * Fully revised by Ryusuke Konishi for stabilization and simplification.
   9 *
  10 */
  11
  12#include <linux/types.h>
  13#include <linux/buffer_head.h>
  14#include <linux/mm.h>
  15#include <linux/backing-dev.h>
  16#include <linux/gfp.h>
  17#include "nilfs.h"
  18#include "mdt.h"
  19#include "dat.h"
  20#include "page.h"
  21#include "btnode.h"
  22
  23void nilfs_btnode_cache_clear(struct address_space *btnc)
  24{
  25        invalidate_mapping_pages(btnc, 0, -1);
  26        truncate_inode_pages(btnc, 0);
  27}
  28
  29struct buffer_head *
  30nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
  31{
  32        struct inode *inode = NILFS_BTNC_I(btnc);
  33        struct buffer_head *bh;
  34
  35        bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
  36        if (unlikely(!bh))
  37                return NULL;
  38
  39        if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
  40                     buffer_dirty(bh))) {
  41                brelse(bh);
  42                BUG();
  43        }
  44        memset(bh->b_data, 0, i_blocksize(inode));
  45        bh->b_bdev = inode->i_sb->s_bdev;
  46        bh->b_blocknr = blocknr;
  47        set_buffer_mapped(bh);
  48        set_buffer_uptodate(bh);
  49
  50        unlock_page(bh->b_page);
  51        put_page(bh->b_page);
  52        return bh;
  53}
  54
  55int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
  56                              sector_t pblocknr, int mode, int mode_flags,
  57                              struct buffer_head **pbh, sector_t *submit_ptr)
  58{
  59        struct buffer_head *bh;
  60        struct inode *inode = NILFS_BTNC_I(btnc);
  61        struct page *page;
  62        int err;
  63
  64        bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
  65        if (unlikely(!bh))
  66                return -ENOMEM;
  67
  68        err = -EEXIST; /* internal code */
  69        page = bh->b_page;
  70
  71        if (buffer_uptodate(bh) || buffer_dirty(bh))
  72                goto found;
  73
  74        if (pblocknr == 0) {
  75                pblocknr = blocknr;
  76                if (inode->i_ino != NILFS_DAT_INO) {
  77                        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  78
  79                        /* blocknr is a virtual block number */
  80                        err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
  81                                                  &pblocknr);
  82                        if (unlikely(err)) {
  83                                brelse(bh);
  84                                goto out_locked;
  85                        }
  86                }
  87        }
  88
  89        if (mode_flags & REQ_RAHEAD) {
  90                if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
  91                        err = -EBUSY; /* internal code */
  92                        brelse(bh);
  93                        goto out_locked;
  94                }
  95        } else { /* mode == READ */
  96                lock_buffer(bh);
  97        }
  98        if (buffer_uptodate(bh)) {
  99                unlock_buffer(bh);
 100                err = -EEXIST; /* internal code */
 101                goto found;
 102        }
 103        set_buffer_mapped(bh);
 104        bh->b_bdev = inode->i_sb->s_bdev;
 105        bh->b_blocknr = pblocknr; /* set block address for read */
 106        bh->b_end_io = end_buffer_read_sync;
 107        get_bh(bh);
 108        submit_bh(mode, mode_flags, bh);
 109        bh->b_blocknr = blocknr; /* set back to the given block address */
 110        *submit_ptr = pblocknr;
 111        err = 0;
 112found:
 113        *pbh = bh;
 114
 115out_locked:
 116        unlock_page(page);
 117        put_page(page);
 118        return err;
 119}
 120
 121/**
 122 * nilfs_btnode_delete - delete B-tree node buffer
 123 * @bh: buffer to be deleted
 124 *
 125 * nilfs_btnode_delete() invalidates the specified buffer and delete the page
 126 * including the buffer if the page gets unbusy.
 127 */
 128void nilfs_btnode_delete(struct buffer_head *bh)
 129{
 130        struct address_space *mapping;
 131        struct page *page = bh->b_page;
 132        pgoff_t index = page_index(page);
 133        int still_dirty;
 134
 135        get_page(page);
 136        lock_page(page);
 137        wait_on_page_writeback(page);
 138
 139        nilfs_forget_buffer(bh);
 140        still_dirty = PageDirty(page);
 141        mapping = page->mapping;
 142        unlock_page(page);
 143        put_page(page);
 144
 145        if (!still_dirty && mapping)
 146                invalidate_inode_pages2_range(mapping, index, index);
 147}
 148
 149/**
 150 * nilfs_btnode_prepare_change_key
 151 *  prepare to move contents of the block for old key to one of new key.
 152 *  the old buffer will not be removed, but might be reused for new buffer.
 153 *  it might return -ENOMEM because of memory allocation errors,
 154 *  and might return -EIO because of disk read errors.
 155 */
 156int nilfs_btnode_prepare_change_key(struct address_space *btnc,
 157                                    struct nilfs_btnode_chkey_ctxt *ctxt)
 158{
 159        struct buffer_head *obh, *nbh;
 160        struct inode *inode = NILFS_BTNC_I(btnc);
 161        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
 162        int err;
 163
 164        if (oldkey == newkey)
 165                return 0;
 166
 167        obh = ctxt->bh;
 168        ctxt->newbh = NULL;
 169
 170        if (inode->i_blkbits == PAGE_SHIFT) {
 171                struct page *opage = obh->b_page;
 172                lock_page(opage);
 173retry:
 174                /* BUG_ON(oldkey != obh->b_page->index); */
 175                if (unlikely(oldkey != opage->index))
 176                        NILFS_PAGE_BUG(opage,
 177                                       "invalid oldkey %lld (newkey=%lld)",
 178                                       (unsigned long long)oldkey,
 179                                       (unsigned long long)newkey);
 180
 181                xa_lock_irq(&btnc->i_pages);
 182                err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS);
 183                xa_unlock_irq(&btnc->i_pages);
 184                /*
 185                 * Note: page->index will not change to newkey until
 186                 * nilfs_btnode_commit_change_key() will be called.
 187                 * To protect the page in intermediate state, the page lock
 188                 * is held.
 189                 */
 190                if (!err)
 191                        return 0;
 192                else if (err != -EBUSY)
 193                        goto failed_unlock;
 194
 195                err = invalidate_inode_pages2_range(btnc, newkey, newkey);
 196                if (!err)
 197                        goto retry;
 198                /* fallback to copy mode */
 199                unlock_page(opage);
 200        }
 201
 202        nbh = nilfs_btnode_create_block(btnc, newkey);
 203        if (!nbh)
 204                return -ENOMEM;
 205
 206        BUG_ON(nbh == obh);
 207        ctxt->newbh = nbh;
 208        return 0;
 209
 210 failed_unlock:
 211        unlock_page(obh->b_page);
 212        return err;
 213}
 214
 215/**
 216 * nilfs_btnode_commit_change_key
 217 *  commit the change_key operation prepared by prepare_change_key().
 218 */
 219void nilfs_btnode_commit_change_key(struct address_space *btnc,
 220                                    struct nilfs_btnode_chkey_ctxt *ctxt)
 221{
 222        struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
 223        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
 224        struct page *opage;
 225
 226        if (oldkey == newkey)
 227                return;
 228
 229        if (nbh == NULL) {      /* blocksize == pagesize */
 230                opage = obh->b_page;
 231                if (unlikely(oldkey != opage->index))
 232                        NILFS_PAGE_BUG(opage,
 233                                       "invalid oldkey %lld (newkey=%lld)",
 234                                       (unsigned long long)oldkey,
 235                                       (unsigned long long)newkey);
 236                mark_buffer_dirty(obh);
 237
 238                xa_lock_irq(&btnc->i_pages);
 239                __xa_erase(&btnc->i_pages, oldkey);
 240                __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY);
 241                xa_unlock_irq(&btnc->i_pages);
 242
 243                opage->index = obh->b_blocknr = newkey;
 244                unlock_page(opage);
 245        } else {
 246                nilfs_copy_buffer(nbh, obh);
 247                mark_buffer_dirty(nbh);
 248
 249                nbh->b_blocknr = newkey;
 250                ctxt->bh = nbh;
 251                nilfs_btnode_delete(obh); /* will decrement bh->b_count */
 252        }
 253}
 254
 255/**
 256 * nilfs_btnode_abort_change_key
 257 *  abort the change_key operation prepared by prepare_change_key().
 258 */
 259void nilfs_btnode_abort_change_key(struct address_space *btnc,
 260                                   struct nilfs_btnode_chkey_ctxt *ctxt)
 261{
 262        struct buffer_head *nbh = ctxt->newbh;
 263        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
 264
 265        if (oldkey == newkey)
 266                return;
 267
 268        if (nbh == NULL) {      /* blocksize == pagesize */
 269                xa_erase_irq(&btnc->i_pages, newkey);
 270                unlock_page(ctxt->bh->b_page);
 271        } else
 272                brelse(nbh);
 273}
 274