linux/fs/nilfs2/dat.c
<<
>>
Prefs
   1/*
   2 * dat.c - NILFS disk address translation.
   3 *
   4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  19 *
  20 * Written by Koji Sato <koji@osrg.net>.
  21 */
  22
  23#include <linux/types.h>
  24#include <linux/buffer_head.h>
  25#include <linux/string.h>
  26#include <linux/errno.h>
  27#include "nilfs.h"
  28#include "mdt.h"
  29#include "alloc.h"
  30#include "dat.h"
  31
  32
  33#define NILFS_CNO_MIN   ((__u64)1)
  34#define NILFS_CNO_MAX   (~(__u64)0)
  35
  36/**
  37 * struct nilfs_dat_info - on-memory private data of DAT file
  38 * @mi: on-memory private data of metadata file
  39 * @palloc_cache: persistent object allocator cache of DAT file
  40 * @shadow: shadow map of DAT file
  41 */
  42struct nilfs_dat_info {
  43        struct nilfs_mdt_info mi;
  44        struct nilfs_palloc_cache palloc_cache;
  45        struct nilfs_shadow_map shadow;
  46};
  47
  48static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
  49{
  50        return (struct nilfs_dat_info *)NILFS_MDT(dat);
  51}
  52
  53static int nilfs_dat_prepare_entry(struct inode *dat,
  54                                   struct nilfs_palloc_req *req, int create)
  55{
  56        return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
  57                                            create, &req->pr_entry_bh);
  58}
  59
  60static void nilfs_dat_commit_entry(struct inode *dat,
  61                                   struct nilfs_palloc_req *req)
  62{
  63        mark_buffer_dirty(req->pr_entry_bh);
  64        nilfs_mdt_mark_dirty(dat);
  65        brelse(req->pr_entry_bh);
  66}
  67
  68static void nilfs_dat_abort_entry(struct inode *dat,
  69                                  struct nilfs_palloc_req *req)
  70{
  71        brelse(req->pr_entry_bh);
  72}
  73
  74int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
  75{
  76        int ret;
  77
  78        ret = nilfs_palloc_prepare_alloc_entry(dat, req);
  79        if (ret < 0)
  80                return ret;
  81
  82        ret = nilfs_dat_prepare_entry(dat, req, 1);
  83        if (ret < 0)
  84                nilfs_palloc_abort_alloc_entry(dat, req);
  85
  86        return ret;
  87}
  88
  89void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
  90{
  91        struct nilfs_dat_entry *entry;
  92        void *kaddr;
  93
  94        kaddr = kmap_atomic(req->pr_entry_bh->b_page);
  95        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
  96                                             req->pr_entry_bh, kaddr);
  97        entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
  98        entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
  99        entry->de_blocknr = cpu_to_le64(0);
 100        kunmap_atomic(kaddr);
 101
 102        nilfs_palloc_commit_alloc_entry(dat, req);
 103        nilfs_dat_commit_entry(dat, req);
 104}
 105
 106void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
 107{
 108        nilfs_dat_abort_entry(dat, req);
 109        nilfs_palloc_abort_alloc_entry(dat, req);
 110}
 111
 112static void nilfs_dat_commit_free(struct inode *dat,
 113                                  struct nilfs_palloc_req *req)
 114{
 115        struct nilfs_dat_entry *entry;
 116        void *kaddr;
 117
 118        kaddr = kmap_atomic(req->pr_entry_bh->b_page);
 119        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
 120                                             req->pr_entry_bh, kaddr);
 121        entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
 122        entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
 123        entry->de_blocknr = cpu_to_le64(0);
 124        kunmap_atomic(kaddr);
 125
 126        nilfs_dat_commit_entry(dat, req);
 127        nilfs_palloc_commit_free_entry(dat, req);
 128}
 129
 130int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
 131{
 132        int ret;
 133
 134        ret = nilfs_dat_prepare_entry(dat, req, 0);
 135        WARN_ON(ret == -ENOENT);
 136        return ret;
 137}
 138
 139void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
 140                            sector_t blocknr)
 141{
 142        struct nilfs_dat_entry *entry;
 143        void *kaddr;
 144
 145        kaddr = kmap_atomic(req->pr_entry_bh->b_page);
 146        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
 147                                             req->pr_entry_bh, kaddr);
 148        entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
 149        entry->de_blocknr = cpu_to_le64(blocknr);
 150        kunmap_atomic(kaddr);
 151
 152        nilfs_dat_commit_entry(dat, req);
 153}
 154
 155int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
 156{
 157        struct nilfs_dat_entry *entry;
 158        sector_t blocknr;
 159        void *kaddr;
 160        int ret;
 161
 162        ret = nilfs_dat_prepare_entry(dat, req, 0);
 163        if (ret < 0) {
 164                WARN_ON(ret == -ENOENT);
 165                return ret;
 166        }
 167
 168        kaddr = kmap_atomic(req->pr_entry_bh->b_page);
 169        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
 170                                             req->pr_entry_bh, kaddr);
 171        blocknr = le64_to_cpu(entry->de_blocknr);
 172        kunmap_atomic(kaddr);
 173
 174        if (blocknr == 0) {
 175                ret = nilfs_palloc_prepare_free_entry(dat, req);
 176                if (ret < 0) {
 177                        nilfs_dat_abort_entry(dat, req);
 178                        return ret;
 179                }
 180        }
 181
 182        return 0;
 183}
 184
 185void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
 186                          int dead)
 187{
 188        struct nilfs_dat_entry *entry;
 189        __u64 start, end;
 190        sector_t blocknr;
 191        void *kaddr;
 192
 193        kaddr = kmap_atomic(req->pr_entry_bh->b_page);
 194        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
 195                                             req->pr_entry_bh, kaddr);
 196        end = start = le64_to_cpu(entry->de_start);
 197        if (!dead) {
 198                end = nilfs_mdt_cno(dat);
 199                WARN_ON(start > end);
 200        }
 201        entry->de_end = cpu_to_le64(end);
 202        blocknr = le64_to_cpu(entry->de_blocknr);
 203        kunmap_atomic(kaddr);
 204
 205        if (blocknr == 0)
 206                nilfs_dat_commit_free(dat, req);
 207        else
 208                nilfs_dat_commit_entry(dat, req);
 209}
 210
 211void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
 212{
 213        struct nilfs_dat_entry *entry;
 214        __u64 start;
 215        sector_t blocknr;
 216        void *kaddr;
 217
 218        kaddr = kmap_atomic(req->pr_entry_bh->b_page);
 219        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
 220                                             req->pr_entry_bh, kaddr);
 221        start = le64_to_cpu(entry->de_start);
 222        blocknr = le64_to_cpu(entry->de_blocknr);
 223        kunmap_atomic(kaddr);
 224
 225        if (start == nilfs_mdt_cno(dat) && blocknr == 0)
 226                nilfs_palloc_abort_free_entry(dat, req);
 227        nilfs_dat_abort_entry(dat, req);
 228}
 229
 230int nilfs_dat_prepare_update(struct inode *dat,
 231                             struct nilfs_palloc_req *oldreq,
 232                             struct nilfs_palloc_req *newreq)
 233{
 234        int ret;
 235
 236        ret = nilfs_dat_prepare_end(dat, oldreq);
 237        if (!ret) {
 238                ret = nilfs_dat_prepare_alloc(dat, newreq);
 239                if (ret < 0)
 240                        nilfs_dat_abort_end(dat, oldreq);
 241        }
 242        return ret;
 243}
 244
 245void nilfs_dat_commit_update(struct inode *dat,
 246                             struct nilfs_palloc_req *oldreq,
 247                             struct nilfs_palloc_req *newreq, int dead)
 248{
 249        nilfs_dat_commit_end(dat, oldreq, dead);
 250        nilfs_dat_commit_alloc(dat, newreq);
 251}
 252
 253void nilfs_dat_abort_update(struct inode *dat,
 254                            struct nilfs_palloc_req *oldreq,
 255                            struct nilfs_palloc_req *newreq)
 256{
 257        nilfs_dat_abort_end(dat, oldreq);
 258        nilfs_dat_abort_alloc(dat, newreq);
 259}
 260
 261/**
 262 * nilfs_dat_mark_dirty -
 263 * @dat: DAT file inode
 264 * @vblocknr: virtual block number
 265 *
 266 * Description:
 267 *
 268 * Return Value: On success, 0 is returned. On error, one of the following
 269 * negative error codes is returned.
 270 *
 271 * %-EIO - I/O error.
 272 *
 273 * %-ENOMEM - Insufficient amount of memory available.
 274 */
 275int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
 276{
 277        struct nilfs_palloc_req req;
 278        int ret;
 279
 280        req.pr_entry_nr = vblocknr;
 281        ret = nilfs_dat_prepare_entry(dat, &req, 0);
 282        if (ret == 0)
 283                nilfs_dat_commit_entry(dat, &req);
 284        return ret;
 285}
 286
 287/**
 288 * nilfs_dat_freev - free virtual block numbers
 289 * @dat: DAT file inode
 290 * @vblocknrs: array of virtual block numbers
 291 * @nitems: number of virtual block numbers
 292 *
 293 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
 294 * @vblocknrs and @nitems.
 295 *
 296 * Return Value: On success, 0 is returned. On error, one of the following
 297 * negative error codes is returned.
 298 *
 299 * %-EIO - I/O error.
 300 *
 301 * %-ENOMEM - Insufficient amount of memory available.
 302 *
 303 * %-ENOENT - The virtual block number have not been allocated.
 304 */
 305int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
 306{
 307        return nilfs_palloc_freev(dat, vblocknrs, nitems);
 308}
 309
 310/**
 311 * nilfs_dat_move - change a block number
 312 * @dat: DAT file inode
 313 * @vblocknr: virtual block number
 314 * @blocknr: block number
 315 *
 316 * Description: nilfs_dat_move() changes the block number associated with
 317 * @vblocknr to @blocknr.
 318 *
 319 * Return Value: On success, 0 is returned. On error, one of the following
 320 * negative error codes is returned.
 321 *
 322 * %-EIO - I/O error.
 323 *
 324 * %-ENOMEM - Insufficient amount of memory available.
 325 */
 326int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
 327{
 328        struct buffer_head *entry_bh;
 329        struct nilfs_dat_entry *entry;
 330        void *kaddr;
 331        int ret;
 332
 333        ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
 334        if (ret < 0)
 335                return ret;
 336
 337        /*
 338         * The given disk block number (blocknr) is not yet written to
 339         * the device at this point.
 340         *
 341         * To prevent nilfs_dat_translate() from returning the
 342         * uncommitted block number, this makes a copy of the entry
 343         * buffer and redirects nilfs_dat_translate() to the copy.
 344         */
 345        if (!buffer_nilfs_redirected(entry_bh)) {
 346                ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
 347                if (ret) {
 348                        brelse(entry_bh);
 349                        return ret;
 350                }
 351        }
 352
 353        kaddr = kmap_atomic(entry_bh->b_page);
 354        entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
 355        if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
 356                printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
 357                       (unsigned long long)vblocknr,
 358                       (unsigned long long)le64_to_cpu(entry->de_start),
 359                       (unsigned long long)le64_to_cpu(entry->de_end));
 360                kunmap_atomic(kaddr);
 361                brelse(entry_bh);
 362                return -EINVAL;
 363        }
 364        WARN_ON(blocknr == 0);
 365        entry->de_blocknr = cpu_to_le64(blocknr);
 366        kunmap_atomic(kaddr);
 367
 368        mark_buffer_dirty(entry_bh);
 369        nilfs_mdt_mark_dirty(dat);
 370
 371        brelse(entry_bh);
 372
 373        return 0;
 374}
 375
 376/**
 377 * nilfs_dat_translate - translate a virtual block number to a block number
 378 * @dat: DAT file inode
 379 * @vblocknr: virtual block number
 380 * @blocknrp: pointer to a block number
 381 *
 382 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
 383 * to the corresponding block number.
 384 *
 385 * Return Value: On success, 0 is returned and the block number associated
 386 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
 387 * of the following negative error codes is returned.
 388 *
 389 * %-EIO - I/O error.
 390 *
 391 * %-ENOMEM - Insufficient amount of memory available.
 392 *
 393 * %-ENOENT - A block number associated with @vblocknr does not exist.
 394 */
 395int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
 396{
 397        struct buffer_head *entry_bh, *bh;
 398        struct nilfs_dat_entry *entry;
 399        sector_t blocknr;
 400        void *kaddr;
 401        int ret;
 402
 403        ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
 404        if (ret < 0)
 405                return ret;
 406
 407        if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
 408                bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
 409                if (bh) {
 410                        WARN_ON(!buffer_uptodate(bh));
 411                        brelse(entry_bh);
 412                        entry_bh = bh;
 413                }
 414        }
 415
 416        kaddr = kmap_atomic(entry_bh->b_page);
 417        entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
 418        blocknr = le64_to_cpu(entry->de_blocknr);
 419        if (blocknr == 0) {
 420                ret = -ENOENT;
 421                goto out;
 422        }
 423        *blocknrp = blocknr;
 424
 425 out:
 426        kunmap_atomic(kaddr);
 427        brelse(entry_bh);
 428        return ret;
 429}
 430
 431ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
 432                            size_t nvi)
 433{
 434        struct buffer_head *entry_bh;
 435        struct nilfs_dat_entry *entry;
 436        struct nilfs_vinfo *vinfo = buf;
 437        __u64 first, last;
 438        void *kaddr;
 439        unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
 440        int i, j, n, ret;
 441
 442        for (i = 0; i < nvi; i += n) {
 443                ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
 444                                                   0, &entry_bh);
 445                if (ret < 0)
 446                        return ret;
 447                kaddr = kmap_atomic(entry_bh->b_page);
 448                /* last virtual block number in this block */
 449                first = vinfo->vi_vblocknr;
 450                do_div(first, entries_per_block);
 451                first *= entries_per_block;
 452                last = first + entries_per_block - 1;
 453                for (j = i, n = 0;
 454                     j < nvi && vinfo->vi_vblocknr >= first &&
 455                             vinfo->vi_vblocknr <= last;
 456                     j++, n++, vinfo = (void *)vinfo + visz) {
 457                        entry = nilfs_palloc_block_get_entry(
 458                                dat, vinfo->vi_vblocknr, entry_bh, kaddr);
 459                        vinfo->vi_start = le64_to_cpu(entry->de_start);
 460                        vinfo->vi_end = le64_to_cpu(entry->de_end);
 461                        vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
 462                }
 463                kunmap_atomic(kaddr);
 464                brelse(entry_bh);
 465        }
 466
 467        return nvi;
 468}
 469
 470/**
 471 * nilfs_dat_read - read or get dat inode
 472 * @sb: super block instance
 473 * @entry_size: size of a dat entry
 474 * @raw_inode: on-disk dat inode
 475 * @inodep: buffer to store the inode
 476 */
 477int nilfs_dat_read(struct super_block *sb, size_t entry_size,
 478                   struct nilfs_inode *raw_inode, struct inode **inodep)
 479{
 480        static struct lock_class_key dat_lock_key;
 481        struct inode *dat;
 482        struct nilfs_dat_info *di;
 483        int err;
 484
 485        if (entry_size > sb->s_blocksize) {
 486                printk(KERN_ERR
 487                       "NILFS: too large DAT entry size: %zu bytes.\n",
 488                       entry_size);
 489                return -EINVAL;
 490        } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
 491                printk(KERN_ERR
 492                       "NILFS: too small DAT entry size: %zu bytes.\n",
 493                       entry_size);
 494                return -EINVAL;
 495        }
 496
 497        dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
 498        if (unlikely(!dat))
 499                return -ENOMEM;
 500        if (!(dat->i_state & I_NEW))
 501                goto out;
 502
 503        err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
 504        if (err)
 505                goto failed;
 506
 507        err = nilfs_palloc_init_blockgroup(dat, entry_size);
 508        if (err)
 509                goto failed;
 510
 511        di = NILFS_DAT_I(dat);
 512        lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
 513        nilfs_palloc_setup_cache(dat, &di->palloc_cache);
 514        nilfs_mdt_setup_shadow_map(dat, &di->shadow);
 515
 516        err = nilfs_read_inode_common(dat, raw_inode);
 517        if (err)
 518                goto failed;
 519
 520        unlock_new_inode(dat);
 521 out:
 522        *inodep = dat;
 523        return 0;
 524 failed:
 525        iget_failed(dat);
 526        return err;
 527}
 528