linux/fs/nilfs2/btnode.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * NILFS B-tree node cache
   4 *
   5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   6 *
   7 * Originally written by Seiji Kihara.
   8 * Fully revised by Ryusuke Konishi for stabilization and simplification.
   9 *
  10 */
  11
  12#include <linux/types.h>
  13#include <linux/buffer_head.h>
  14#include <linux/mm.h>
  15#include <linux/backing-dev.h>
  16#include <linux/gfp.h>
  17#include "nilfs.h"
  18#include "mdt.h"
  19#include "dat.h"
  20#include "page.h"
  21#include "btnode.h"
  22
  23
  24/**
  25 * nilfs_init_btnc_inode - initialize B-tree node cache inode
  26 * @btnc_inode: inode to be initialized
  27 *
  28 * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
  29 */
  30void nilfs_init_btnc_inode(struct inode *btnc_inode)
  31{
  32        struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
  33
  34        btnc_inode->i_mode = S_IFREG;
  35        ii->i_flags = 0;
  36        memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
  37        mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
  38}
  39
  40void nilfs_btnode_cache_clear(struct address_space *btnc)
  41{
  42        invalidate_mapping_pages(btnc, 0, -1);
  43        truncate_inode_pages(btnc, 0);
  44}
  45
  46struct buffer_head *
  47nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
  48{
  49        struct inode *inode = btnc->host;
  50        struct buffer_head *bh;
  51
  52        bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
  53        if (unlikely(!bh))
  54                return NULL;
  55
  56        if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
  57                     buffer_dirty(bh))) {
  58                brelse(bh);
  59                BUG();
  60        }
  61        memset(bh->b_data, 0, i_blocksize(inode));
  62        bh->b_bdev = inode->i_sb->s_bdev;
  63        bh->b_blocknr = blocknr;
  64        set_buffer_mapped(bh);
  65        set_buffer_uptodate(bh);
  66
  67        unlock_page(bh->b_page);
  68        put_page(bh->b_page);
  69        return bh;
  70}
  71
  72int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
  73                              sector_t pblocknr, int mode, int mode_flags,
  74                              struct buffer_head **pbh, sector_t *submit_ptr)
  75{
  76        struct buffer_head *bh;
  77        struct inode *inode = btnc->host;
  78        struct page *page;
  79        int err;
  80
  81        bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
  82        if (unlikely(!bh))
  83                return -ENOMEM;
  84
  85        err = -EEXIST; /* internal code */
  86        page = bh->b_page;
  87
  88        if (buffer_uptodate(bh) || buffer_dirty(bh))
  89                goto found;
  90
  91        if (pblocknr == 0) {
  92                pblocknr = blocknr;
  93                if (inode->i_ino != NILFS_DAT_INO) {
  94                        struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  95
  96                        /* blocknr is a virtual block number */
  97                        err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
  98                                                  &pblocknr);
  99                        if (unlikely(err)) {
 100                                brelse(bh);
 101                                goto out_locked;
 102                        }
 103                }
 104        }
 105
 106        if (mode_flags & REQ_RAHEAD) {
 107                if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
 108                        err = -EBUSY; /* internal code */
 109                        brelse(bh);
 110                        goto out_locked;
 111                }
 112        } else { /* mode == READ */
 113                lock_buffer(bh);
 114        }
 115        if (buffer_uptodate(bh)) {
 116                unlock_buffer(bh);
 117                err = -EEXIST; /* internal code */
 118                goto found;
 119        }
 120        set_buffer_mapped(bh);
 121        bh->b_bdev = inode->i_sb->s_bdev;
 122        bh->b_blocknr = pblocknr; /* set block address for read */
 123        bh->b_end_io = end_buffer_read_sync;
 124        get_bh(bh);
 125        submit_bh(mode, mode_flags, bh);
 126        bh->b_blocknr = blocknr; /* set back to the given block address */
 127        *submit_ptr = pblocknr;
 128        err = 0;
 129found:
 130        *pbh = bh;
 131
 132out_locked:
 133        unlock_page(page);
 134        put_page(page);
 135        return err;
 136}
 137
 138/**
 139 * nilfs_btnode_delete - delete B-tree node buffer
 140 * @bh: buffer to be deleted
 141 *
 142 * nilfs_btnode_delete() invalidates the specified buffer and delete the page
 143 * including the buffer if the page gets unbusy.
 144 */
 145void nilfs_btnode_delete(struct buffer_head *bh)
 146{
 147        struct address_space *mapping;
 148        struct page *page = bh->b_page;
 149        pgoff_t index = page_index(page);
 150        int still_dirty;
 151
 152        get_page(page);
 153        lock_page(page);
 154        wait_on_page_writeback(page);
 155
 156        nilfs_forget_buffer(bh);
 157        still_dirty = PageDirty(page);
 158        mapping = page->mapping;
 159        unlock_page(page);
 160        put_page(page);
 161
 162        if (!still_dirty && mapping)
 163                invalidate_inode_pages2_range(mapping, index, index);
 164}
 165
 166/**
 167 * nilfs_btnode_prepare_change_key
 168 *  prepare to move contents of the block for old key to one of new key.
 169 *  the old buffer will not be removed, but might be reused for new buffer.
 170 *  it might return -ENOMEM because of memory allocation errors,
 171 *  and might return -EIO because of disk read errors.
 172 */
 173int nilfs_btnode_prepare_change_key(struct address_space *btnc,
 174                                    struct nilfs_btnode_chkey_ctxt *ctxt)
 175{
 176        struct buffer_head *obh, *nbh;
 177        struct inode *inode = btnc->host;
 178        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
 179        int err;
 180
 181        if (oldkey == newkey)
 182                return 0;
 183
 184        obh = ctxt->bh;
 185        ctxt->newbh = NULL;
 186
 187        if (inode->i_blkbits == PAGE_SHIFT) {
 188                struct page *opage = obh->b_page;
 189                lock_page(opage);
 190retry:
 191                /* BUG_ON(oldkey != obh->b_page->index); */
 192                if (unlikely(oldkey != opage->index))
 193                        NILFS_PAGE_BUG(opage,
 194                                       "invalid oldkey %lld (newkey=%lld)",
 195                                       (unsigned long long)oldkey,
 196                                       (unsigned long long)newkey);
 197
 198                xa_lock_irq(&btnc->i_pages);
 199                err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS);
 200                xa_unlock_irq(&btnc->i_pages);
 201                /*
 202                 * Note: page->index will not change to newkey until
 203                 * nilfs_btnode_commit_change_key() will be called.
 204                 * To protect the page in intermediate state, the page lock
 205                 * is held.
 206                 */
 207                if (!err)
 208                        return 0;
 209                else if (err != -EBUSY)
 210                        goto failed_unlock;
 211
 212                err = invalidate_inode_pages2_range(btnc, newkey, newkey);
 213                if (!err)
 214                        goto retry;
 215                /* fallback to copy mode */
 216                unlock_page(opage);
 217        }
 218
 219        nbh = nilfs_btnode_create_block(btnc, newkey);
 220        if (!nbh)
 221                return -ENOMEM;
 222
 223        BUG_ON(nbh == obh);
 224        ctxt->newbh = nbh;
 225        return 0;
 226
 227 failed_unlock:
 228        unlock_page(obh->b_page);
 229        return err;
 230}
 231
 232/**
 233 * nilfs_btnode_commit_change_key
 234 *  commit the change_key operation prepared by prepare_change_key().
 235 */
 236void nilfs_btnode_commit_change_key(struct address_space *btnc,
 237                                    struct nilfs_btnode_chkey_ctxt *ctxt)
 238{
 239        struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
 240        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
 241        struct page *opage;
 242
 243        if (oldkey == newkey)
 244                return;
 245
 246        if (nbh == NULL) {      /* blocksize == pagesize */
 247                opage = obh->b_page;
 248                if (unlikely(oldkey != opage->index))
 249                        NILFS_PAGE_BUG(opage,
 250                                       "invalid oldkey %lld (newkey=%lld)",
 251                                       (unsigned long long)oldkey,
 252                                       (unsigned long long)newkey);
 253                mark_buffer_dirty(obh);
 254
 255                xa_lock_irq(&btnc->i_pages);
 256                __xa_erase(&btnc->i_pages, oldkey);
 257                __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY);
 258                xa_unlock_irq(&btnc->i_pages);
 259
 260                opage->index = obh->b_blocknr = newkey;
 261                unlock_page(opage);
 262        } else {
 263                nilfs_copy_buffer(nbh, obh);
 264                mark_buffer_dirty(nbh);
 265
 266                nbh->b_blocknr = newkey;
 267                ctxt->bh = nbh;
 268                nilfs_btnode_delete(obh); /* will decrement bh->b_count */
 269        }
 270}
 271
 272/**
 273 * nilfs_btnode_abort_change_key
 274 *  abort the change_key operation prepared by prepare_change_key().
 275 */
 276void nilfs_btnode_abort_change_key(struct address_space *btnc,
 277                                   struct nilfs_btnode_chkey_ctxt *ctxt)
 278{
 279        struct buffer_head *nbh = ctxt->newbh;
 280        __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
 281
 282        if (oldkey == newkey)
 283                return;
 284
 285        if (nbh == NULL) {      /* blocksize == pagesize */
 286                xa_erase_irq(&btnc->i_pages, newkey);
 287                unlock_page(ctxt->bh->b_page);
 288        } else
 289                brelse(nbh);
 290}
 291