linux/fs/ntfs/mft.c
<<
>>
Prefs
   1/**
   2 * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project.
   3 *
   4 * Copyright (c) 2001-2006 Anton Altaparmakov
   5 * Copyright (c) 2002 Richard Russon
   6 *
   7 * This program/include file is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License as published
   9 * by the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program/include file is distributed in the hope that it will be
  13 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
  14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program (in the main directory of the Linux-NTFS
  19 * distribution in the file COPYING); if not, write to the Free Software
  20 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21 */
  22
  23#include <linux/buffer_head.h>
  24#include <linux/swap.h>
  25
  26#include "attrib.h"
  27#include "aops.h"
  28#include "bitmap.h"
  29#include "debug.h"
  30#include "dir.h"
  31#include "lcnalloc.h"
  32#include "malloc.h"
  33#include "mft.h"
  34#include "ntfs.h"
  35
  36/**
  37 * map_mft_record_page - map the page in which a specific mft record resides
  38 * @ni:         ntfs inode whose mft record page to map
  39 *
  40 * This maps the page in which the mft record of the ntfs inode @ni is situated
  41 * and returns a pointer to the mft record within the mapped page.
  42 *
  43 * Return value needs to be checked with IS_ERR() and if that is true PTR_ERR()
  44 * contains the negative error code returned.
  45 */
  46static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
  47{
  48        loff_t i_size;
  49        ntfs_volume *vol = ni->vol;
  50        struct inode *mft_vi = vol->mft_ino;
  51        struct page *page;
  52        unsigned long index, end_index;
  53        unsigned ofs;
  54
  55        BUG_ON(ni->page);
  56        /*
  57         * The index into the page cache and the offset within the page cache
  58         * page of the wanted mft record. FIXME: We need to check for
  59         * overflowing the unsigned long, but I don't think we would ever get
  60         * here if the volume was that big...
  61         */
  62        index = (u64)ni->mft_no << vol->mft_record_size_bits >>
  63                        PAGE_CACHE_SHIFT;
  64        ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
  65
  66        i_size = i_size_read(mft_vi);
  67        /* The maximum valid index into the page cache for $MFT's data. */
  68        end_index = i_size >> PAGE_CACHE_SHIFT;
  69
  70        /* If the wanted index is out of bounds the mft record doesn't exist. */
  71        if (unlikely(index >= end_index)) {
  72                if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs +
  73                                vol->mft_record_size) {
  74                        page = ERR_PTR(-ENOENT);
  75                        ntfs_error(vol->sb, "Attemt to read mft record 0x%lx, "
  76                                        "which is beyond the end of the mft.  "
  77                                        "This is probably a bug in the ntfs "
  78                                        "driver.", ni->mft_no);
  79                        goto err_out;
  80                }
  81        }
  82        /* Read, map, and pin the page. */
  83        page = ntfs_map_page(mft_vi->i_mapping, index);
  84        if (likely(!IS_ERR(page))) {
  85                /* Catch multi sector transfer fixup errors. */
  86                if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
  87                                ofs)))) {
  88                        ni->page = page;
  89                        ni->page_ofs = ofs;
  90                        return page_address(page) + ofs;
  91                }
  92                ntfs_error(vol->sb, "Mft record 0x%lx is corrupt.  "
  93                                "Run chkdsk.", ni->mft_no);
  94                ntfs_unmap_page(page);
  95                page = ERR_PTR(-EIO);
  96                NVolSetErrors(vol);
  97        }
  98err_out:
  99        ni->page = NULL;
 100        ni->page_ofs = 0;
 101        return (void*)page;
 102}
 103
 104/**
 105 * map_mft_record - map, pin and lock an mft record
 106 * @ni:         ntfs inode whose MFT record to map
 107 *
 108 * First, take the mrec_lock mutex.  We might now be sleeping, while waiting
 109 * for the mutex if it was already locked by someone else.
 110 *
 111 * The page of the record is mapped using map_mft_record_page() before being
 112 * returned to the caller.
 113 *
 114 * This in turn uses ntfs_map_page() to get the page containing the wanted mft
 115 * record (it in turn calls read_cache_page() which reads it in from disk if
 116 * necessary, increments the use count on the page so that it cannot disappear
 117 * under us and returns a reference to the page cache page).
 118 *
 119 * If read_cache_page() invokes ntfs_readpage() to load the page from disk, it
 120 * sets PG_locked and clears PG_uptodate on the page. Once I/O has completed
 121 * and the post-read mst fixups on each mft record in the page have been
 122 * performed, the page gets PG_uptodate set and PG_locked cleared (this is done
 123 * in our asynchronous I/O completion handler end_buffer_read_mft_async()).
 124 * ntfs_map_page() waits for PG_locked to become clear and checks if
 125 * PG_uptodate is set and returns an error code if not. This provides
 126 * sufficient protection against races when reading/using the page.
 127 *
 128 * However there is the write mapping to think about. Doing the above described
 129 * checking here will be fine, because when initiating the write we will set
 130 * PG_locked and clear PG_uptodate making sure nobody is touching the page
 131 * contents. Doing the locking this way means that the commit to disk code in
 132 * the page cache code paths is automatically sufficiently locked with us as
 133 * we will not touch a page that has been locked or is not uptodate. The only
 134 * locking problem then is them locking the page while we are accessing it.
 135 *
 136 * So that code will end up having to own the mrec_lock of all mft
 137 * records/inodes present in the page before I/O can proceed. In that case we
 138 * wouldn't need to bother with PG_locked and PG_uptodate as nobody will be
 139 * accessing anything without owning the mrec_lock mutex.  But we do need to
 140 * use them because of the read_cache_page() invocation and the code becomes so
 141 * much simpler this way that it is well worth it.
 142 *
 143 * The mft record is now ours and we return a pointer to it. You need to check
 144 * the returned pointer with IS_ERR() and if that is true, PTR_ERR() will return
 145 * the error code.
 146 *
 147 * NOTE: Caller is responsible for setting the mft record dirty before calling
 148 * unmap_mft_record(). This is obviously only necessary if the caller really
 149 * modified the mft record...
 150 * Q: Do we want to recycle one of the VFS inode state bits instead?
 151 * A: No, the inode ones mean we want to change the mft record, not we want to
 152 * write it out.
 153 */
 154MFT_RECORD *map_mft_record(ntfs_inode *ni)
 155{
 156        MFT_RECORD *m;
 157
 158        ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);
 159
 160        /* Make sure the ntfs inode doesn't go away. */
 161        atomic_inc(&ni->count);
 162
 163        /* Serialize access to this mft record. */
 164        mutex_lock(&ni->mrec_lock);
 165
 166        m = map_mft_record_page(ni);
 167        if (likely(!IS_ERR(m)))
 168                return m;
 169
 170        mutex_unlock(&ni->mrec_lock);
 171        atomic_dec(&ni->count);
 172        ntfs_error(ni->vol->sb, "Failed with error code %lu.", -PTR_ERR(m));
 173        return m;
 174}
 175
 176/**
 177 * unmap_mft_record_page - unmap the page in which a specific mft record resides
 178 * @ni:         ntfs inode whose mft record page to unmap
 179 *
 180 * This unmaps the page in which the mft record of the ntfs inode @ni is
 181 * situated and returns. This is a NOOP if highmem is not configured.
 182 *
 183 * The unmap happens via ntfs_unmap_page() which in turn decrements the use
 184 * count on the page thus releasing it from the pinned state.
 185 *
 186 * We do not actually unmap the page from memory of course, as that will be
 187 * done by the page cache code itself when memory pressure increases or
 188 * whatever.
 189 */
 190static inline void unmap_mft_record_page(ntfs_inode *ni)
 191{
 192        BUG_ON(!ni->page);
 193
 194        // TODO: If dirty, blah...
 195        ntfs_unmap_page(ni->page);
 196        ni->page = NULL;
 197        ni->page_ofs = 0;
 198        return;
 199}
 200
 201/**
 202 * unmap_mft_record - release a mapped mft record
 203 * @ni:         ntfs inode whose MFT record to unmap
 204 *
 205 * We release the page mapping and the mrec_lock mutex which unmaps the mft
 206 * record and releases it for others to get hold of. We also release the ntfs
 207 * inode by decrementing the ntfs inode reference count.
 208 *
 209 * NOTE: If caller has modified the mft record, it is imperative to set the mft
 210 * record dirty BEFORE calling unmap_mft_record().
 211 */
 212void unmap_mft_record(ntfs_inode *ni)
 213{
 214        struct page *page = ni->page;
 215
 216        BUG_ON(!page);
 217
 218        ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);
 219
 220        unmap_mft_record_page(ni);
 221        mutex_unlock(&ni->mrec_lock);
 222        atomic_dec(&ni->count);
 223        /*
 224         * If pure ntfs_inode, i.e. no vfs inode attached, we leave it to
 225         * ntfs_clear_extent_inode() in the extent inode case, and to the
 226         * caller in the non-extent, yet pure ntfs inode case, to do the actual
 227         * tear down of all structures and freeing of all allocated memory.
 228         */
 229        return;
 230}
 231
 232/**
 233 * map_extent_mft_record - load an extent inode and attach it to its base
 234 * @base_ni:    base ntfs inode
 235 * @mref:       mft reference of the extent inode to load
 236 * @ntfs_ino:   on successful return, pointer to the ntfs_inode structure
 237 *
 238 * Load the extent mft record @mref and attach it to its base inode @base_ni.
 239 * Return the mapped extent mft record if IS_ERR(result) is false.  Otherwise
 240 * PTR_ERR(result) gives the negative error code.
 241 *
 242 * On successful return, @ntfs_ino contains a pointer to the ntfs_inode
 243 * structure of the mapped extent inode.
 244 */
 245MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
 246                ntfs_inode **ntfs_ino)
 247{
 248        MFT_RECORD *m;
 249        ntfs_inode *ni = NULL;
 250        ntfs_inode **extent_nis = NULL;
 251        int i;
 252        unsigned long mft_no = MREF(mref);
 253        u16 seq_no = MSEQNO(mref);
 254        bool destroy_ni = false;
 255
 256        ntfs_debug("Mapping extent mft record 0x%lx (base mft record 0x%lx).",
 257                        mft_no, base_ni->mft_no);
 258        /* Make sure the base ntfs inode doesn't go away. */
 259        atomic_inc(&base_ni->count);
 260        /*
 261         * Check if this extent inode has already been added to the base inode,
 262         * in which case just return it. If not found, add it to the base
 263         * inode before returning it.
 264         */
 265        mutex_lock(&base_ni->extent_lock);
 266        if (base_ni->nr_extents > 0) {
 267                extent_nis = base_ni->ext.extent_ntfs_inos;
 268                for (i = 0; i < base_ni->nr_extents; i++) {
 269                        if (mft_no != extent_nis[i]->mft_no)
 270                                continue;
 271                        ni = extent_nis[i];
 272                        /* Make sure the ntfs inode doesn't go away. */
 273                        atomic_inc(&ni->count);
 274                        break;
 275                }
 276        }
 277        if (likely(ni != NULL)) {
 278                mutex_unlock(&base_ni->extent_lock);
 279                atomic_dec(&base_ni->count);
 280                /* We found the record; just have to map and return it. */
 281                m = map_mft_record(ni);
 282                /* map_mft_record() has incremented this on success. */
 283                atomic_dec(&ni->count);
 284                if (likely(!IS_ERR(m))) {
 285                        /* Verify the sequence number. */
 286                        if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
 287                                ntfs_debug("Done 1.");
 288                                *ntfs_ino = ni;
 289                                return m;
 290                        }
 291                        unmap_mft_record(ni);
 292                        ntfs_error(base_ni->vol->sb, "Found stale extent mft "
 293                                        "reference! Corrupt filesystem. "
 294                                        "Run chkdsk.");
 295                        return ERR_PTR(-EIO);
 296                }
 297map_err_out:
 298                ntfs_error(base_ni->vol->sb, "Failed to map extent "
 299                                "mft record, error code %ld.", -PTR_ERR(m));
 300                return m;
 301        }
 302        /* Record wasn't there. Get a new ntfs inode and initialize it. */
 303        ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no);
 304        if (unlikely(!ni)) {
 305                mutex_unlock(&base_ni->extent_lock);
 306                atomic_dec(&base_ni->count);
 307                return ERR_PTR(-ENOMEM);
 308        }
 309        ni->vol = base_ni->vol;
 310        ni->seq_no = seq_no;
 311        ni->nr_extents = -1;
 312        ni->ext.base_ntfs_ino = base_ni;
 313        /* Now map the record. */
 314        m = map_mft_record(ni);
 315        if (IS_ERR(m)) {
 316                mutex_unlock(&base_ni->extent_lock);
 317                atomic_dec(&base_ni->count);
 318                ntfs_clear_extent_inode(ni);
 319                goto map_err_out;
 320        }
 321        /* Verify the sequence number if it is present. */
 322        if (seq_no && (le16_to_cpu(m->sequence_number) != seq_no)) {
 323                ntfs_error(base_ni->vol->sb, "Found stale extent mft "
 324                                "reference! Corrupt filesystem. Run chkdsk.");
 325                destroy_ni = true;
 326                m = ERR_PTR(-EIO);
 327                goto unm_err_out;
 328        }
 329        /* Attach extent inode to base inode, reallocating memory if needed. */
 330        if (!(base_ni->nr_extents & 3)) {
 331                ntfs_inode **tmp;
 332                int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *);
 333
 334                tmp = kmalloc(new_size, GFP_NOFS);
 335                if (unlikely(!tmp)) {
 336                        ntfs_error(base_ni->vol->sb, "Failed to allocate "
 337                                        "internal buffer.");
 338                        destroy_ni = true;
 339                        m = ERR_PTR(-ENOMEM);
 340                        goto unm_err_out;
 341                }
 342                if (base_ni->nr_extents) {
 343                        BUG_ON(!base_ni->ext.extent_ntfs_inos);
 344                        memcpy(tmp, base_ni->ext.extent_ntfs_inos, new_size -
 345                                        4 * sizeof(ntfs_inode *));
 346                        kfree(base_ni->ext.extent_ntfs_inos);
 347                }
 348                base_ni->ext.extent_ntfs_inos = tmp;
 349        }
 350        base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni;
 351        mutex_unlock(&base_ni->extent_lock);
 352        atomic_dec(&base_ni->count);
 353        ntfs_debug("Done 2.");
 354        *ntfs_ino = ni;
 355        return m;
 356unm_err_out:
 357        unmap_mft_record(ni);
 358        mutex_unlock(&base_ni->extent_lock);
 359        atomic_dec(&base_ni->count);
 360        /*
 361         * If the extent inode was not attached to the base inode we need to
 362         * release it or we will leak memory.
 363         */
 364        if (destroy_ni)
 365                ntfs_clear_extent_inode(ni);
 366        return m;
 367}
 368
 369#ifdef NTFS_RW
 370
 371/**
 372 * __mark_mft_record_dirty - set the mft record and the page containing it dirty
 373 * @ni:         ntfs inode describing the mapped mft record
 374 *
 375 * Internal function.  Users should call mark_mft_record_dirty() instead.
 376 *
 377 * Set the mapped (extent) mft record of the (base or extent) ntfs inode @ni,
 378 * as well as the page containing the mft record, dirty.  Also, mark the base
 379 * vfs inode dirty.  This ensures that any changes to the mft record are
 380 * written out to disk.
 381 *
 382 * NOTE:  We only set I_DIRTY_SYNC and I_DIRTY_DATASYNC (and not I_DIRTY_PAGES)
 383 * on the base vfs inode, because even though file data may have been modified,
 384 * it is dirty in the inode meta data rather than the data page cache of the
 385 * inode, and thus there are no data pages that need writing out.  Therefore, a
 386 * full mark_inode_dirty() is overkill.  A mark_inode_dirty_sync(), on the
 387 * other hand, is not sufficient, because ->write_inode needs to be called even
 388 * in case of fdatasync. This needs to happen or the file data would not
 389 * necessarily hit the device synchronously, even though the vfs inode has the
 390 * O_SYNC flag set.  Also, I_DIRTY_DATASYNC simply "feels" better than just
 391 * I_DIRTY_SYNC, since the file data has not actually hit the block device yet,
 392 * which is not what I_DIRTY_SYNC on its own would suggest.
 393 */
 394void __mark_mft_record_dirty(ntfs_inode *ni)
 395{
 396        ntfs_inode *base_ni;
 397
 398        ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
 399        BUG_ON(NInoAttr(ni));
 400        mark_ntfs_record_dirty(ni->page, ni->page_ofs);
 401        /* Determine the base vfs inode and mark it dirty, too. */
 402        mutex_lock(&ni->extent_lock);
 403        if (likely(ni->nr_extents >= 0))
 404                base_ni = ni;
 405        else
 406                base_ni = ni->ext.base_ntfs_ino;
 407        mutex_unlock(&ni->extent_lock);
 408        __mark_inode_dirty(VFS_I(base_ni), I_DIRTY_SYNC | I_DIRTY_DATASYNC);
 409}
 410
 411static const char *ntfs_please_email = "Please email "
 412                "linux-ntfs-dev@lists.sourceforge.net and say that you saw "
 413                "this message.  Thank you.";
 414
 415/**
 416 * ntfs_sync_mft_mirror_umount - synchronise an mft record to the mft mirror
 417 * @vol:        ntfs volume on which the mft record to synchronize resides
 418 * @mft_no:     mft record number of mft record to synchronize
 419 * @m:          mapped, mst protected (extent) mft record to synchronize
 420 *
 421 * Write the mapped, mst protected (extent) mft record @m with mft record
 422 * number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol,
 423 * bypassing the page cache and the $MFTMirr inode itself.
 424 *
 425 * This function is only for use at umount time when the mft mirror inode has
 426 * already been disposed off.  We BUG() if we are called while the mft mirror
 427 * inode is still attached to the volume.
 428 *
 429 * On success return 0.  On error return -errno.
 430 *
 431 * NOTE:  This function is not implemented yet as I am not convinced it can
 432 * actually be triggered considering the sequence of commits we do in super.c::
 433 * ntfs_put_super().  But just in case we provide this place holder as the
 434 * alternative would be either to BUG() or to get a NULL pointer dereference
 435 * and Oops.
 436 */
 437static int ntfs_sync_mft_mirror_umount(ntfs_volume *vol,
 438                const unsigned long mft_no, MFT_RECORD *m)
 439{
 440        BUG_ON(vol->mftmirr_ino);
 441        ntfs_error(vol->sb, "Umount time mft mirror syncing is not "
 442                        "implemented yet.  %s", ntfs_please_email);
 443        return -EOPNOTSUPP;
 444}
 445
 446/**
 447 * ntfs_sync_mft_mirror - synchronize an mft record to the mft mirror
 448 * @vol:        ntfs volume on which the mft record to synchronize resides
 449 * @mft_no:     mft record number of mft record to synchronize
 450 * @m:          mapped, mst protected (extent) mft record to synchronize
 451 * @sync:       if true, wait for i/o completion
 452 *
 453 * Write the mapped, mst protected (extent) mft record @m with mft record
 454 * number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol.
 455 *
 456 * On success return 0.  On error return -errno and set the volume errors flag
 457 * in the ntfs volume @vol.
 458 *
 459 * NOTE:  We always perform synchronous i/o and ignore the @sync parameter.
 460 *
 461 * TODO:  If @sync is false, want to do truly asynchronous i/o, i.e. just
 462 * schedule i/o via ->writepage or do it via kntfsd or whatever.
 463 */
 464int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
 465                MFT_RECORD *m, int sync)
 466{
 467        struct page *page;
 468        unsigned int blocksize = vol->sb->s_blocksize;
 469        int max_bhs = vol->mft_record_size / blocksize;
 470        struct buffer_head *bhs[max_bhs];
 471        struct buffer_head *bh, *head;
 472        u8 *kmirr;
 473        runlist_element *rl;
 474        unsigned int block_start, block_end, m_start, m_end, page_ofs;
 475        int i_bhs, nr_bhs, err = 0;
 476        unsigned char blocksize_bits = vol->sb->s_blocksize_bits;
 477
 478        ntfs_debug("Entering for inode 0x%lx.", mft_no);
 479        BUG_ON(!max_bhs);
 480        if (unlikely(!vol->mftmirr_ino)) {
 481                /* This could happen during umount... */
 482                err = ntfs_sync_mft_mirror_umount(vol, mft_no, m);
 483                if (likely(!err))
 484                        return err;
 485                goto err_out;
 486        }
 487        /* Get the page containing the mirror copy of the mft record @m. */
 488        page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
 489                        (PAGE_CACHE_SHIFT - vol->mft_record_size_bits));
 490        if (IS_ERR(page)) {
 491                ntfs_error(vol->sb, "Failed to map mft mirror page.");
 492                err = PTR_ERR(page);
 493                goto err_out;
 494        }
 495        lock_page(page);
 496        BUG_ON(!PageUptodate(page));
 497        ClearPageUptodate(page);
 498        /* Offset of the mft mirror record inside the page. */
 499        page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
 500        /* The address in the page of the mirror copy of the mft record @m. */
 501        kmirr = page_address(page) + page_ofs;
 502        /* Copy the mst protected mft record to the mirror. */
 503        memcpy(kmirr, m, vol->mft_record_size);
 504        /* Create uptodate buffers if not present. */
 505        if (unlikely(!page_has_buffers(page))) {
 506                struct buffer_head *tail;
 507
 508                bh = head = alloc_page_buffers(page, blocksize, 1);
 509                do {
 510                        set_buffer_uptodate(bh);
 511                        tail = bh;
 512                        bh = bh->b_this_page;
 513                } while (bh);
 514                tail->b_this_page = head;
 515                attach_page_buffers(page, head);
 516        }
 517        bh = head = page_buffers(page);
 518        BUG_ON(!bh);
 519        rl = NULL;
 520        nr_bhs = 0;
 521        block_start = 0;
 522        m_start = kmirr - (u8*)page_address(page);
 523        m_end = m_start + vol->mft_record_size;
 524        do {
 525                block_end = block_start + blocksize;
 526                /* If the buffer is outside the mft record, skip it. */
 527                if (block_end <= m_start)
 528                        continue;
 529                if (unlikely(block_start >= m_end))
 530                        break;
 531                /* Need to map the buffer if it is not mapped already. */
 532                if (unlikely(!buffer_mapped(bh))) {
 533                        VCN vcn;
 534                        LCN lcn;
 535                        unsigned int vcn_ofs;
 536
 537                        bh->b_bdev = vol->sb->s_bdev;
 538                        /* Obtain the vcn and offset of the current block. */
 539                        vcn = ((VCN)mft_no << vol->mft_record_size_bits) +
 540                                        (block_start - m_start);
 541                        vcn_ofs = vcn & vol->cluster_size_mask;
 542                        vcn >>= vol->cluster_size_bits;
 543                        if (!rl) {
 544                                down_read(&NTFS_I(vol->mftmirr_ino)->
 545                                                runlist.lock);
 546                                rl = NTFS_I(vol->mftmirr_ino)->runlist.rl;
 547                                /*
 548                                 * $MFTMirr always has the whole of its runlist
 549                                 * in memory.
 550                                 */
 551                                BUG_ON(!rl);
 552                        }
 553                        /* Seek to element containing target vcn. */
 554                        while (rl->length && rl[1].vcn <= vcn)
 555                                rl++;
 556                        lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
 557                        /* For $MFTMirr, only lcn >= 0 is a successful remap. */
 558                        if (likely(lcn >= 0)) {
 559                                /* Setup buffer head to correct block. */
 560                                bh->b_blocknr = ((lcn <<
 561                                                vol->cluster_size_bits) +
 562                                                vcn_ofs) >> blocksize_bits;
 563                                set_buffer_mapped(bh);
 564                        } else {
 565                                bh->b_blocknr = -1;
 566                                ntfs_error(vol->sb, "Cannot write mft mirror "
 567                                                "record 0x%lx because its "
 568                                                "location on disk could not "
 569                                                "be determined (error code "
 570                                                "%lli).", mft_no,
 571                                                (long long)lcn);
 572                                err = -EIO;
 573                        }
 574                }
 575                BUG_ON(!buffer_uptodate(bh));
 576                BUG_ON(!nr_bhs && (m_start != block_start));
 577                BUG_ON(nr_bhs >= max_bhs);
 578                bhs[nr_bhs++] = bh;
 579                BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
 580        } while (block_start = block_end, (bh = bh->b_this_page) != head);
 581        if (unlikely(rl))
 582                up_read(&NTFS_I(vol->mftmirr_ino)->runlist.lock);
 583        if (likely(!err)) {
 584                /* Lock buffers and start synchronous write i/o on them. */
 585                for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
 586                        struct buffer_head *tbh = bhs[i_bhs];
 587
 588                        if (!trylock_buffer(tbh))
 589                                BUG();
 590                        BUG_ON(!buffer_uptodate(tbh));
 591                        clear_buffer_dirty(tbh);
 592                        get_bh(tbh);
 593                        tbh->b_end_io = end_buffer_write_sync;
 594                        submit_bh(WRITE, tbh);
 595                }
 596                /* Wait on i/o completion of buffers. */
 597                for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
 598                        struct buffer_head *tbh = bhs[i_bhs];
 599
 600                        wait_on_buffer(tbh);
 601                        if (unlikely(!buffer_uptodate(tbh))) {
 602                                err = -EIO;
 603                                /*
 604                                 * Set the buffer uptodate so the page and
 605                                 * buffer states do not become out of sync.
 606                                 */
 607                                set_buffer_uptodate(tbh);
 608                        }
 609                }
 610        } else /* if (unlikely(err)) */ {
 611                /* Clean the buffers. */
 612                for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
 613                        clear_buffer_dirty(bhs[i_bhs]);
 614        }
 615        /* Current state: all buffers are clean, unlocked, and uptodate. */
 616        /* Remove the mst protection fixups again. */
 617        post_write_mst_fixup((NTFS_RECORD*)kmirr);
 618        flush_dcache_page(page);
 619        SetPageUptodate(page);
 620        unlock_page(page);
 621        ntfs_unmap_page(page);
 622        if (likely(!err)) {
 623                ntfs_debug("Done.");
 624        } else {
 625                ntfs_error(vol->sb, "I/O error while writing mft mirror "
 626                                "record 0x%lx!", mft_no);
 627err_out:
 628                ntfs_error(vol->sb, "Failed to synchronize $MFTMirr (error "
 629                                "code %i).  Volume will be left marked dirty "
 630                                "on umount.  Run ntfsfix on the partition "
 631                                "after umounting to correct this.", -err);
 632                NVolSetErrors(vol);
 633        }
 634        return err;
 635}
 636
 637/**
 638 * write_mft_record_nolock - write out a mapped (extent) mft record
 639 * @ni:         ntfs inode describing the mapped (extent) mft record
 640 * @m:          mapped (extent) mft record to write
 641 * @sync:       if true, wait for i/o completion
 642 *
 643 * Write the mapped (extent) mft record @m described by the (regular or extent)
 644 * ntfs inode @ni to backing store.  If the mft record @m has a counterpart in
 645 * the mft mirror, that is also updated.
 646 *
 647 * We only write the mft record if the ntfs inode @ni is dirty and the first
 648 * buffer belonging to its mft record is dirty, too.  We ignore the dirty state
 649 * of subsequent buffers because we could have raced with
 650 * fs/ntfs/aops.c::mark_ntfs_record_dirty().
 651 *
 652 * On success, clean the mft record and return 0.  On error, leave the mft
 653 * record dirty and return -errno.
 654 *
 655 * NOTE:  We always perform synchronous i/o and ignore the @sync parameter.
 656 * However, if the mft record has a counterpart in the mft mirror and @sync is
 657 * true, we write the mft record, wait for i/o completion, and only then write
 658 * the mft mirror copy.  This ensures that if the system crashes either the mft
 659 * or the mft mirror will contain a self-consistent mft record @m.  If @sync is
 660 * false on the other hand, we start i/o on both and then wait for completion
 661 * on them.  This provides a speedup but no longer guarantees that you will end
 662 * up with a self-consistent mft record in the case of a crash but if you asked
 663 * for asynchronous writing you probably do not care about that anyway.
 664 *
 665 * TODO:  If @sync is false, want to do truly asynchronous i/o, i.e. just
 666 * schedule i/o via ->writepage or do it via kntfsd or whatever.
 667 */
 668int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
 669{
 670        ntfs_volume *vol = ni->vol;
 671        struct page *page = ni->page;
 672        unsigned int blocksize = vol->sb->s_blocksize;
 673        unsigned char blocksize_bits = vol->sb->s_blocksize_bits;
 674        int max_bhs = vol->mft_record_size / blocksize;
 675        struct buffer_head *bhs[max_bhs];
 676        struct buffer_head *bh, *head;
 677        runlist_element *rl;
 678        unsigned int block_start, block_end, m_start, m_end;
 679        int i_bhs, nr_bhs, err = 0;
 680
 681        ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
 682        BUG_ON(NInoAttr(ni));
 683        BUG_ON(!max_bhs);
 684        BUG_ON(!PageLocked(page));
 685        /*
 686         * If the ntfs_inode is clean no need to do anything.  If it is dirty,
 687         * mark it as clean now so that it can be redirtied later on if needed.
 688         * There is no danger of races since the caller is holding the locks
 689         * for the mft record @m and the page it is in.
 690         */
 691        if (!NInoTestClearDirty(ni))
 692                goto done;
 693        bh = head = page_buffers(page);
 694        BUG_ON(!bh);
 695        rl = NULL;
 696        nr_bhs = 0;
 697        block_start = 0;
 698        m_start = ni->page_ofs;
 699        m_end = m_start + vol->mft_record_size;
 700        do {
 701                block_end = block_start + blocksize;
 702                /* If the buffer is outside the mft record, skip it. */
 703                if (block_end <= m_start)
 704                        continue;
 705                if (unlikely(block_start >= m_end))
 706                        break;
 707                /*
 708                 * If this block is not the first one in the record, we ignore
 709                 * the buffer's dirty state because we could have raced with a
 710                 * parallel mark_ntfs_record_dirty().
 711                 */
 712                if (block_start == m_start) {
 713                        /* This block is the first one in the record. */
 714                        if (!buffer_dirty(bh)) {
 715                                BUG_ON(nr_bhs);
 716                                /* Clean records are not written out. */
 717                                break;
 718                        }
 719                }
 720                /* Need to map the buffer if it is not mapped already. */
 721                if (unlikely(!buffer_mapped(bh))) {
 722                        VCN vcn;
 723                        LCN lcn;
 724                        unsigned int vcn_ofs;
 725
 726                        bh->b_bdev = vol->sb->s_bdev;
 727                        /* Obtain the vcn and offset of the current block. */
 728                        vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) +
 729                                        (block_start - m_start);
 730                        vcn_ofs = vcn & vol->cluster_size_mask;
 731                        vcn >>= vol->cluster_size_bits;
 732                        if (!rl) {
 733                                down_read(&NTFS_I(vol->mft_ino)->runlist.lock);
 734                                rl = NTFS_I(vol->mft_ino)->runlist.rl;
 735                                BUG_ON(!rl);
 736                        }
 737                        /* Seek to element containing target vcn. */
 738                        while (rl->length && rl[1].vcn <= vcn)
 739                                rl++;
 740                        lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
 741                        /* For $MFT, only lcn >= 0 is a successful remap. */
 742                        if (likely(lcn >= 0)) {
 743                                /* Setup buffer head to correct block. */
 744                                bh->b_blocknr = ((lcn <<
 745                                                vol->cluster_size_bits) +
 746                                                vcn_ofs) >> blocksize_bits;
 747                                set_buffer_mapped(bh);
 748                        } else {
 749                                bh->b_blocknr = -1;
 750                                ntfs_error(vol->sb, "Cannot write mft record "
 751                                                "0x%lx because its location "
 752                                                "on disk could not be "
 753                                                "determined (error code %lli).",
 754                                                ni->mft_no, (long long)lcn);
 755                                err = -EIO;
 756                        }
 757                }
 758                BUG_ON(!buffer_uptodate(bh));
 759                BUG_ON(!nr_bhs && (m_start != block_start));
 760                BUG_ON(nr_bhs >= max_bhs);
 761                bhs[nr_bhs++] = bh;
 762                BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
 763        } while (block_start = block_end, (bh = bh->b_this_page) != head);
 764        if (unlikely(rl))
 765                up_read(&NTFS_I(vol->mft_ino)->runlist.lock);
 766        if (!nr_bhs)
 767                goto done;
 768        if (unlikely(err))
 769                goto cleanup_out;
 770        /* Apply the mst protection fixups. */
 771        err = pre_write_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size);
 772        if (err) {
 773                ntfs_error(vol->sb, "Failed to apply mst fixups!");
 774                goto cleanup_out;
 775        }
 776        flush_dcache_mft_record_page(ni);
 777        /* Lock buffers and start synchronous write i/o on them. */
 778        for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
 779                struct buffer_head *tbh = bhs[i_bhs];
 780
 781                if (!trylock_buffer(tbh))
 782                        BUG();
 783                BUG_ON(!buffer_uptodate(tbh));
 784                clear_buffer_dirty(tbh);
 785                get_bh(tbh);
 786                tbh->b_end_io = end_buffer_write_sync;
 787                submit_bh(WRITE, tbh);
 788        }
 789        /* Synchronize the mft mirror now if not @sync. */
 790        if (!sync && ni->mft_no < vol->mftmirr_size)
 791                ntfs_sync_mft_mirror(vol, ni->mft_no, m, sync);
 792        /* Wait on i/o completion of buffers. */
 793        for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
 794                struct buffer_head *tbh = bhs[i_bhs];
 795
 796                wait_on_buffer(tbh);
 797                if (unlikely(!buffer_uptodate(tbh))) {
 798                        err = -EIO;
 799                        /*
 800                         * Set the buffer uptodate so the page and buffer
 801                         * states do not become out of sync.
 802                         */
 803                        if (PageUptodate(page))
 804                                set_buffer_uptodate(tbh);
 805                }
 806        }
 807        /* If @sync, now synchronize the mft mirror. */
 808        if (sync && ni->mft_no < vol->mftmirr_size)
 809                ntfs_sync_mft_mirror(vol, ni->mft_no, m, sync);
 810        /* Remove the mst protection fixups again. */
 811        post_write_mst_fixup((NTFS_RECORD*)m);
 812        flush_dcache_mft_record_page(ni);
 813        if (unlikely(err)) {
 814                /* I/O error during writing.  This is really bad! */
 815                ntfs_error(vol->sb, "I/O error while writing mft record "
 816                                "0x%lx!  Marking base inode as bad.  You "
 817                                "should unmount the volume and run chkdsk.",
 818                                ni->mft_no);
 819                goto err_out;
 820        }
 821done:
 822        ntfs_debug("Done.");
 823        return 0;
 824cleanup_out:
 825        /* Clean the buffers. */
 826        for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
 827                clear_buffer_dirty(bhs[i_bhs]);
 828err_out:
 829        /*
 830         * Current state: all buffers are clean, unlocked, and uptodate.
 831         * The caller should mark the base inode as bad so that no more i/o
 832         * happens.  ->clear_inode() will still be invoked so all extent inodes
 833         * and other allocated memory will be freed.
 834         */
 835        if (err == -ENOMEM) {
 836                ntfs_error(vol->sb, "Not enough memory to write mft record.  "
 837                                "Redirtying so the write is retried later.");
 838                mark_mft_record_dirty(ni);
 839                err = 0;
 840        } else
 841                NVolSetErrors(vol);
 842        return err;
 843}
 844
 845/**
 846 * ntfs_may_write_mft_record - check if an mft record may be written out
 847 * @vol:        [IN]  ntfs volume on which the mft record to check resides
 848 * @mft_no:     [IN]  mft record number of the mft record to check
 849 * @m:          [IN]  mapped mft record to check
 850 * @locked_ni:  [OUT] caller has to unlock this ntfs inode if one is returned
 851 *
 852 * Check if the mapped (base or extent) mft record @m with mft record number
 853 * @mft_no belonging to the ntfs volume @vol may be written out.  If necessary
 854 * and possible the ntfs inode of the mft record is locked and the base vfs
 855 * inode is pinned.  The locked ntfs inode is then returned in @locked_ni.  The
 856 * caller is responsible for unlocking the ntfs inode and unpinning the base
 857 * vfs inode.
 858 *
 859 * Return 'true' if the mft record may be written out and 'false' if not.
 860 *
 861 * The caller has locked the page and cleared the uptodate flag on it which
 862 * means that we can safely write out any dirty mft records that do not have
 863 * their inodes in icache as determined by ilookup5() as anyone
 864 * opening/creating such an inode would block when attempting to map the mft
 865 * record in read_cache_page() until we are finished with the write out.
 866 *
 867 * Here is a description of the tests we perform:
 868 *
 869 * If the inode is found in icache we know the mft record must be a base mft
 870 * record.  If it is dirty, we do not write it and return 'false' as the vfs
 871 * inode write paths will result in the access times being updated which would
 872 * cause the base mft record to be redirtied and written out again.  (We know
 873 * the access time update will modify the base mft record because Windows
 874 * chkdsk complains if the standard information attribute is not in the base
 875 * mft record.)
 876 *
 877 * If the inode is in icache and not dirty, we attempt to lock the mft record
 878 * and if we find the lock was already taken, it is not safe to write the mft
 879 * record and we return 'false'.
 880 *
 881 * If we manage to obtain the lock we have exclusive access to the mft record,
 882 * which also allows us safe writeout of the mft record.  We then set
 883 * @locked_ni to the locked ntfs inode and return 'true'.
 884 *
 885 * Note we cannot just lock the mft record and sleep while waiting for the lock
 886 * because this would deadlock due to lock reversal (normally the mft record is
 887 * locked before the page is locked but we already have the page locked here
 888 * when we try to lock the mft record).
 889 *
 890 * If the inode is not in icache we need to perform further checks.
 891 *
 892 * If the mft record is not a FILE record or it is a base mft record, we can
 893 * safely write it and return 'true'.
 894 *
 895 * We now know the mft record is an extent mft record.  We check if the inode
 896 * corresponding to its base mft record is in icache and obtain a reference to
 897 * it if it is.  If it is not, we can safely write it and return 'true'.
 898 *
 899 * We now have the base inode for the extent mft record.  We check if it has an
 900 * ntfs inode for the extent mft record attached and if not it is safe to write
 901 * the extent mft record and we return 'true'.
 902 *
 903 * The ntfs inode for the extent mft record is attached to the base inode so we
 904 * attempt to lock the extent mft record and if we find the lock was already
 905 * taken, it is not safe to write the extent mft record and we return 'false'.
 906 *
 907 * If we manage to obtain the lock we have exclusive access to the extent mft
 908 * record, which also allows us safe writeout of the extent mft record.  We
 909 * set the ntfs inode of the extent mft record clean and then set @locked_ni to
 910 * the now locked ntfs inode and return 'true'.
 911 *
 912 * Note, the reason for actually writing dirty mft records here and not just
 913 * relying on the vfs inode dirty code paths is that we can have mft records
 914 * modified without them ever having actual inodes in memory.  Also we can have
 915 * dirty mft records with clean ntfs inodes in memory.  None of the described
 916 * cases would result in the dirty mft records being written out if we only
 917 * relied on the vfs inode dirty code paths.  And these cases can really occur
 918 * during allocation of new mft records and in particular when the
 919 * initialized_size of the $MFT/$DATA attribute is extended and the new space
 920 * is initialized using ntfs_mft_record_format().  The clean inode can then
 921 * appear if the mft record is reused for a new inode before it got written
 922 * out.
 923 */
 924bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
 925                const MFT_RECORD *m, ntfs_inode **locked_ni)
 926{
 927        struct super_block *sb = vol->sb;
 928        struct inode *mft_vi = vol->mft_ino;
 929        struct inode *vi;
 930        ntfs_inode *ni, *eni, **extent_nis;
 931        int i;
 932        ntfs_attr na;
 933
 934        ntfs_debug("Entering for inode 0x%lx.", mft_no);
 935        /*
 936         * Normally we do not return a locked inode so set @locked_ni to NULL.
 937         */
 938        BUG_ON(!locked_ni);
 939        *locked_ni = NULL;
 940        /*
 941         * Check if the inode corresponding to this mft record is in the VFS
 942         * inode cache and obtain a reference to it if it is.
 943         */
 944        ntfs_debug("Looking for inode 0x%lx in icache.", mft_no);
 945        na.mft_no = mft_no;
 946        na.name = NULL;
 947        na.name_len = 0;
 948        na.type = AT_UNUSED;
 949        /*
 950         * Optimize inode 0, i.e. $MFT itself, since we have it in memory and
 951         * we get here for it rather often.
 952         */
 953        if (!mft_no) {
 954                /* Balance the below iput(). */
 955                vi = igrab(mft_vi);
 956                BUG_ON(vi != mft_vi);
 957        } else {
 958                /*
 959                 * Have to use ilookup5_nowait() since ilookup5() waits for the
 960                 * inode lock which causes ntfs to deadlock when a concurrent
 961                 * inode write via the inode dirty code paths and the page
 962                 * dirty code path of the inode dirty code path when writing
 963                 * $MFT occurs.
 964                 */
 965                vi = ilookup5_nowait(sb, mft_no, (test_t)ntfs_test_inode, &na);
 966        }
 967        if (vi) {
 968                ntfs_debug("Base inode 0x%lx is in icache.", mft_no);
 969                /* The inode is in icache. */
 970                ni = NTFS_I(vi);
 971                /* Take a reference to the ntfs inode. */
 972                atomic_inc(&ni->count);
 973                /* If the inode is dirty, do not write this record. */
 974                if (NInoDirty(ni)) {
 975                        ntfs_debug("Inode 0x%lx is dirty, do not write it.",
 976                                        mft_no);
 977                        atomic_dec(&ni->count);
 978                        iput(vi);
 979                        return false;
 980                }
 981                ntfs_debug("Inode 0x%lx is not dirty.", mft_no);
 982                /* The inode is not dirty, try to take the mft record lock. */
 983                if (unlikely(!mutex_trylock(&ni->mrec_lock))) {
 984                        ntfs_debug("Mft record 0x%lx is already locked, do "
 985                                        "not write it.", mft_no);
 986                        atomic_dec(&ni->count);
 987                        iput(vi);
 988                        return false;
 989                }
 990                ntfs_debug("Managed to lock mft record 0x%lx, write it.",
 991                                mft_no);
 992                /*
 993                 * The write has to occur while we hold the mft record lock so
 994                 * return the locked ntfs inode.
 995                 */
 996                *locked_ni = ni;
 997                return true;
 998        }
 999        ntfs_debug("Inode 0x%lx is not in icache.", mft_no);
1000        /* The inode is not in icache. */
1001        /* Write the record if it is not a mft record (type "FILE"). */
1002        if (!ntfs_is_mft_record(m->magic)) {
1003                ntfs_debug("Mft record 0x%lx is not a FILE record, write it.",
1004                                mft_no);
1005                return true;
1006        }
1007        /* Write the mft record if it is a base inode. */
1008        if (!m->base_mft_record) {
1009                ntfs_debug("Mft record 0x%lx is a base record, write it.",
1010                                mft_no);
1011                return true;
1012        }
1013        /*
1014         * This is an extent mft record.  Check if the inode corresponding to
1015         * its base mft record is in icache and obtain a reference to it if it
1016         * is.
1017         */
1018        na.mft_no = MREF_LE(m->base_mft_record);
1019        ntfs_debug("Mft record 0x%lx is an extent record.  Looking for base "
1020                        "inode 0x%lx in icache.", mft_no, na.mft_no);
1021        if (!na.mft_no) {
1022                /* Balance the below iput(). */
1023                vi = igrab(mft_vi);
1024                BUG_ON(vi != mft_vi);
1025        } else
1026                vi = ilookup5_nowait(sb, na.mft_no, (test_t)ntfs_test_inode,
1027                                &na);
1028        if (!vi) {
1029                /*
1030                 * The base inode is not in icache, write this extent mft
1031                 * record.
1032                 */
1033                ntfs_debug("Base inode 0x%lx is not in icache, write the "
1034                                "extent record.", na.mft_no);
1035                return true;
1036        }
1037        ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no);
1038        /*
1039         * The base inode is in icache.  Check if it has the extent inode
1040         * corresponding to this extent mft record attached.
1041         */
1042        ni = NTFS_I(vi);
1043        mutex_lock(&ni->extent_lock);
1044        if (ni->nr_extents <= 0) {
1045                /*
1046                 * The base inode has no attached extent inodes, write this
1047                 * extent mft record.
1048                 */
1049                mutex_unlock(&ni->extent_lock);
1050                iput(vi);
1051                ntfs_debug("Base inode 0x%lx has no attached extent inodes, "
1052                                "write the extent record.", na.mft_no);
1053                return true;
1054        }
1055        /* Iterate over the attached extent inodes. */
1056        extent_nis = ni->ext.extent_ntfs_inos;
1057        for (eni = NULL, i = 0; i < ni->nr_extents; ++i) {
1058                if (mft_no == extent_nis[i]->mft_no) {
1059                        /*
1060                         * Found the extent inode corresponding to this extent
1061                         * mft record.
1062                         */
1063                        eni = extent_nis[i];
1064                        break;
1065                }
1066        }
1067        /*
1068         * If the extent inode was not attached to the base inode, write this
1069         * extent mft record.
1070         */
1071        if (!eni) {
1072                mutex_unlock(&ni->extent_lock);
1073                iput(vi);
1074                ntfs_debug("Extent inode 0x%lx is not attached to its base "
1075                                "inode 0x%lx, write the extent record.",
1076                                mft_no, na.mft_no);
1077                return true;
1078        }
1079        ntfs_debug("Extent inode 0x%lx is attached to its base inode 0x%lx.",
1080                        mft_no, na.mft_no);
1081        /* Take a reference to the extent ntfs inode. */
1082        atomic_inc(&eni->count);
1083        mutex_unlock(&ni->extent_lock);
1084        /*
1085         * Found the extent inode coresponding to this extent mft record.
1086         * Try to take the mft record lock.
1087         */
1088        if (unlikely(!mutex_trylock(&eni->mrec_lock))) {
1089                atomic_dec(&eni->count);
1090                iput(vi);
1091                ntfs_debug("Extent mft record 0x%lx is already locked, do "
1092                                "not write it.", mft_no);
1093                return false;
1094        }
1095        ntfs_debug("Managed to lock extent mft record 0x%lx, write it.",
1096                        mft_no);
1097        if (NInoTestClearDirty(eni))
1098                ntfs_debug("Extent inode 0x%lx is dirty, marking it clean.",
1099                                mft_no);
1100        /*
1101         * The write has to occur while we hold the mft record lock so return
1102         * the locked extent ntfs inode.
1103         */
1104        *locked_ni = eni;
1105        return true;
1106}
1107
1108static const char *es = "  Leaving inconsistent metadata.  Unmount and run "
1109                "chkdsk.";
1110
1111/**
1112 * ntfs_mft_bitmap_find_and_alloc_free_rec_nolock - see name
1113 * @vol:        volume on which to search for a free mft record
1114 * @base_ni:    open base inode if allocating an extent mft record or NULL
1115 *
1116 * Search for a free mft record in the mft bitmap attribute on the ntfs volume
1117 * @vol.
1118 *
1119 * If @base_ni is NULL start the search at the default allocator position.
1120 *
1121 * If @base_ni is not NULL start the search at the mft record after the base
1122 * mft record @base_ni.
1123 *
1124 * Return the free mft record on success and -errno on error.  An error code of
1125 * -ENOSPC means that there are no free mft records in the currently
1126 * initialized mft bitmap.
1127 *
1128 * Locking: Caller must hold vol->mftbmp_lock for writing.
1129 */
1130static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
1131                ntfs_inode *base_ni)
1132{
1133        s64 pass_end, ll, data_pos, pass_start, ofs, bit;
1134        unsigned long flags;
1135        struct address_space *mftbmp_mapping;
1136        u8 *buf, *byte;
1137        struct page *page;
1138        unsigned int page_ofs, size;
1139        u8 pass, b;
1140
1141        ntfs_debug("Searching for free mft record in the currently "
1142                        "initialized mft bitmap.");
1143        mftbmp_mapping = vol->mftbmp_ino->i_mapping;
1144        /*
1145         * Set the end of the pass making sure we do not overflow the mft
1146         * bitmap.
1147         */
1148        read_lock_irqsave(&NTFS_I(vol->mft_ino)->size_lock, flags);
1149        pass_end = NTFS_I(vol->mft_ino)->allocated_size >>
1150                        vol->mft_record_size_bits;
1151        read_unlock_irqrestore(&NTFS_I(vol->mft_ino)->size_lock, flags);
1152        read_lock_irqsave(&NTFS_I(vol->mftbmp_ino)->size_lock, flags);
1153        ll = NTFS_I(vol->mftbmp_ino)->initialized_size << 3;
1154        read_unlock_irqrestore(&NTFS_I(vol->mftbmp_ino)->size_lock, flags);
1155        if (pass_end > ll)
1156                pass_end = ll;
1157        pass = 1;
1158        if (!base_ni)
1159                data_pos = vol->mft_data_pos;
1160        else
1161                data_pos = base_ni->mft_no + 1;
1162        if (data_pos < 24)
1163                data_pos = 24;
1164        if (data_pos >= pass_end) {
1165                data_pos = 24;
1166                pass = 2;
1167                /* This happens on a freshly formatted volume. */
1168                if (data_pos >= pass_end)
1169                        return -ENOSPC;
1170        }
1171        pass_start = data_pos;
1172        ntfs_debug("Starting bitmap search: pass %u, pass_start 0x%llx, "
1173                        "pass_end 0x%llx, data_pos 0x%llx.", pass,
1174                        (long long)pass_start, (long long)pass_end,
1175                        (long long)data_pos);
1176        /* Loop until a free mft record is found. */
1177        for (; pass <= 2;) {
1178                /* Cap size to pass_end. */
1179                ofs = data_pos >> 3;
1180                page_ofs = ofs & ~PAGE_CACHE_MASK;
1181                size = PAGE_CACHE_SIZE - page_ofs;
1182                ll = ((pass_end + 7) >> 3) - ofs;
1183                if (size > ll)
1184                        size = ll;
1185                size <<= 3;
1186                /*
1187                 * If we are still within the active pass, search the next page
1188                 * for a zero bit.
1189                 */
1190                if (size) {
1191                        page = ntfs_map_page(mftbmp_mapping,
1192                                        ofs >> PAGE_CACHE_SHIFT);
1193                        if (IS_ERR(page)) {
1194                                ntfs_error(vol->sb, "Failed to read mft "
1195                                                "bitmap, aborting.");
1196                                return PTR_ERR(page);
1197                        }
1198                        buf = (u8*)page_address(page) + page_ofs;
1199                        bit = data_pos & 7;
1200                        data_pos &= ~7ull;
1201                        ntfs_debug("Before inner for loop: size 0x%x, "
1202                                        "data_pos 0x%llx, bit 0x%llx", size,
1203                                        (long long)data_pos, (long long)bit);
1204                        for (; bit < size && data_pos + bit < pass_end;
1205                                        bit &= ~7ull, bit += 8) {
1206                                byte = buf + (bit >> 3);
1207                                if (*byte == 0xff)
1208                                        continue;
1209                                b = ffz((unsigned long)*byte);
1210                                if (b < 8 && b >= (bit & 7)) {
1211                                        ll = data_pos + (bit & ~7ull) + b;
1212                                        if (unlikely(ll > (1ll << 32))) {
1213                                                ntfs_unmap_page(page);
1214                                                return -ENOSPC;
1215                                        }
1216                                        *byte |= 1 << b;
1217                                        flush_dcache_page(page);
1218                                        set_page_dirty(page);
1219                                        ntfs_unmap_page(page);
1220                                        ntfs_debug("Done.  (Found and "
1221                                                        "allocated mft record "
1222                                                        "0x%llx.)",
1223                                                        (long long)ll);
1224                                        return ll;
1225                                }
1226                        }
1227                        ntfs_debug("After inner for loop: size 0x%x, "
1228                                        "data_pos 0x%llx, bit 0x%llx", size,
1229                                        (long long)data_pos, (long long)bit);
1230                        data_pos += size;
1231                        ntfs_unmap_page(page);
1232                        /*
1233                         * If the end of the pass has not been reached yet,
1234                         * continue searching the mft bitmap for a zero bit.
1235                         */
1236                        if (data_pos < pass_end)
1237                                continue;
1238                }
1239                /* Do the next pass. */
1240                if (++pass == 2) {
1241                        /*
1242                         * Starting the second pass, in which we scan the first
1243                         * part of the zone which we omitted earlier.
1244                         */
1245                        pass_end = pass_start;
1246                        data_pos = pass_start = 24;
1247                        ntfs_debug("pass %i, pass_start 0x%llx, pass_end "
1248                                        "0x%llx.", pass, (long long)pass_start,
1249                                        (long long)pass_end);
1250                        if (data_pos >= pass_end)
1251                                break;
1252                }
1253        }
1254        /* No free mft records in currently initialized mft bitmap. */
1255        ntfs_debug("Done.  (No free mft records left in currently initialized "
1256                        "mft bitmap.)");
1257        return -ENOSPC;
1258}
1259
1260/**
1261 * ntfs_mft_bitmap_extend_allocation_nolock - extend mft bitmap by a cluster
1262 * @vol:        volume on which to extend the mft bitmap attribute
1263 *
1264 * Extend the mft bitmap attribute on the ntfs volume @vol by one cluster.
1265 *
1266 * Note: Only changes allocated_size, i.e. does not touch initialized_size or
1267 * data_size.
1268 *
1269 * Return 0 on success and -errno on error.
1270 *
1271 * Locking: - Caller must hold vol->mftbmp_lock for writing.
1272 *          - This function takes NTFS_I(vol->mftbmp_ino)->runlist.lock for
1273 *            writing and releases it before returning.
1274 *          - This function takes vol->lcnbmp_lock for writing and releases it
1275 *            before returning.
1276 */
1277static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
1278{
1279        LCN lcn;
1280        s64 ll;
1281        unsigned long flags;
1282        struct page *page;
1283        ntfs_inode *mft_ni, *mftbmp_ni;
1284        runlist_element *rl, *rl2 = NULL;
1285        ntfs_attr_search_ctx *ctx = NULL;
1286        MFT_RECORD *mrec;
1287        ATTR_RECORD *a = NULL;
1288        int ret, mp_size;
1289        u32 old_alen = 0;
1290        u8 *b, tb;
1291        struct {
1292                u8 added_cluster:1;
1293                u8 added_run:1;
1294                u8 mp_rebuilt:1;
1295        } status = { 0, 0, 0 };
1296
1297        ntfs_debug("Extending mft bitmap allocation.");
1298        mft_ni = NTFS_I(vol->mft_ino);
1299        mftbmp_ni = NTFS_I(vol->mftbmp_ino);
1300        /*
1301         * Determine the last lcn of the mft bitmap.  The allocated size of the
1302         * mft bitmap cannot be zero so we are ok to do this.
1303         */
1304        down_write(&mftbmp_ni->runlist.lock);
1305        read_lock_irqsave(&mftbmp_ni->size_lock, flags);
1306        ll = mftbmp_ni->allocated_size;
1307        read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
1308        rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
1309                        (ll - 1) >> vol->cluster_size_bits, NULL);
1310        if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
1311                up_write(&mftbmp_ni->runlist.lock);
1312                ntfs_error(vol->sb, "Failed to determine last allocated "
1313                                "cluster of mft bitmap attribute.");
1314                if (!IS_ERR(rl))
1315                        ret = -EIO;
1316                else
1317                        ret = PTR_ERR(rl);
1318                return ret;
1319        }
1320        lcn = rl->lcn + rl->length;
1321        ntfs_debug("Last lcn of mft bitmap attribute is 0x%llx.",
1322                        (long long)lcn);
1323        /*
1324         * Attempt to get the cluster following the last allocated cluster by
1325         * hand as it may be in the MFT zone so the allocator would not give it
1326         * to us.
1327         */
1328        ll = lcn >> 3;
1329        page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
1330                        ll >> PAGE_CACHE_SHIFT);
1331        if (IS_ERR(page)) {
1332                up_write(&mftbmp_ni->runlist.lock);
1333                ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
1334                return PTR_ERR(page);
1335        }
1336        b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK);
1337        tb = 1 << (lcn & 7ull);
1338        down_write(&vol->lcnbmp_lock);
1339        if (*b != 0xff && !(*b & tb)) {
1340                /* Next cluster is free, allocate it. */
1341                *b |= tb;
1342                flush_dcache_page(page);
1343                set_page_dirty(page);
1344                up_write(&vol->lcnbmp_lock);
1345                ntfs_unmap_page(page);
1346                /* Update the mft bitmap runlist. */
1347                rl->length++;
1348                rl[1].vcn++;
1349                status.added_cluster = 1;
1350                ntfs_debug("Appending one cluster to mft bitmap.");
1351        } else {
1352                up_write(&vol->lcnbmp_lock);
1353                ntfs_unmap_page(page);
1354                /* Allocate a cluster from the DATA_ZONE. */
1355                rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE,
1356                                true);
1357                if (IS_ERR(rl2)) {
1358                        up_write(&mftbmp_ni->runlist.lock);
1359                        ntfs_error(vol->sb, "Failed to allocate a cluster for "
1360                                        "the mft bitmap.");
1361                        return PTR_ERR(rl2);
1362                }
1363                rl = ntfs_runlists_merge(mftbmp_ni->runlist.rl, rl2);
1364                if (IS_ERR(rl)) {
1365                        up_write(&mftbmp_ni->runlist.lock);
1366                        ntfs_error(vol->sb, "Failed to merge runlists for mft "
1367                                        "bitmap.");
1368                        if (ntfs_cluster_free_from_rl(vol, rl2)) {
1369                                ntfs_error(vol->sb, "Failed to dealocate "
1370                                                "allocated cluster.%s", es);
1371                                NVolSetErrors(vol);
1372                        }
1373                        ntfs_free(rl2);
1374                        return PTR_ERR(rl);
1375                }
1376                mftbmp_ni->runlist.rl = rl;
1377                status.added_run = 1;
1378                ntfs_debug("Adding one run to mft bitmap.");
1379                /* Find the last run in the new runlist. */
1380                for (; rl[1].length; rl++)
1381                        ;
1382        }
1383        /*
1384         * Update the attribute record as well.  Note: @rl is the last
1385         * (non-terminator) runlist element of mft bitmap.
1386         */
1387        mrec = map_mft_record(mft_ni);
1388        if (IS_ERR(mrec)) {
1389                ntfs_error(vol->sb, "Failed to map mft record.");
1390                ret = PTR_ERR(mrec);
1391                goto undo_alloc;
1392        }
1393        ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
1394        if (unlikely(!ctx)) {
1395                ntfs_error(vol->sb, "Failed to get search context.");
1396                ret = -ENOMEM;
1397                goto undo_alloc;
1398        }
1399        ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
1400                        mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
1401                        0, ctx);
1402        if (unlikely(ret)) {
1403                ntfs_error(vol->sb, "Failed to find last attribute extent of "
1404                                "mft bitmap attribute.");
1405                if (ret == -ENOENT)
1406                        ret = -EIO;
1407                goto undo_alloc;
1408        }
1409        a = ctx->attr;
1410        ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
1411        /* Search back for the previous last allocated cluster of mft bitmap. */
1412        for (rl2 = rl; rl2 > mftbmp_ni->runlist.rl; rl2--) {
1413                if (ll >= rl2->vcn)
1414                        break;
1415        }
1416        BUG_ON(ll < rl2->vcn);
1417        BUG_ON(ll >= rl2->vcn + rl2->length);
1418        /* Get the size for the new mapping pairs array for this extent. */
1419        mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
1420        if (unlikely(mp_size <= 0)) {
1421                ntfs_error(vol->sb, "Get size for mapping pairs failed for "
1422                                "mft bitmap attribute extent.");
1423                ret = mp_size;
1424                if (!ret)
1425                        ret = -EIO;
1426                goto undo_alloc;
1427        }
1428        /* Expand the attribute record if necessary. */
1429        old_alen = le32_to_cpu(a->length);
1430        ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size +
1431                        le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
1432        if (unlikely(ret)) {
1433                if (ret != -ENOSPC) {
1434                        ntfs_error(vol->sb, "Failed to resize attribute "
1435                                        "record for mft bitmap attribute.");
1436                        goto undo_alloc;
1437                }
1438                // TODO: Deal with this by moving this extent to a new mft
1439                // record or by starting a new extent in a new mft record or by
1440                // moving other attributes out of this mft record.
1441                // Note: It will need to be a special mft record and if none of
1442                // those are available it gets rather complicated...
1443                ntfs_error(vol->sb, "Not enough space in this mft record to "
1444                                "accomodate extended mft bitmap attribute "
1445                                "extent.  Cannot handle this yet.");
1446                ret = -EOPNOTSUPP;
1447                goto undo_alloc;
1448        }
1449        status.mp_rebuilt = 1;
1450        /* Generate the mapping pairs array directly into the attr record. */
1451        ret = ntfs_mapping_pairs_build(vol, (u8*)a +
1452                        le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
1453                        mp_size, rl2, ll, -1, NULL);
1454        if (unlikely(ret)) {
1455                ntfs_error(vol->sb, "Failed to build mapping pairs array for "
1456                                "mft bitmap attribute.");
1457                goto undo_alloc;
1458        }
1459        /* Update the highest_vcn. */
1460        a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
1461        /*
1462         * We now have extended the mft bitmap allocated_size by one cluster.
1463         * Reflect this in the ntfs_inode structure and the attribute record.
1464         */
1465        if (a->data.non_resident.lowest_vcn) {
1466                /*
1467                 * We are not in the first attribute extent, switch to it, but
1468                 * first ensure the changes will make it to disk later.
1469                 */
1470                flush_dcache_mft_record_page(ctx->ntfs_ino);
1471                mark_mft_record_dirty(ctx->ntfs_ino);
1472                ntfs_attr_reinit_search_ctx(ctx);
1473                ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
1474                                mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL,
1475                                0, ctx);
1476                if (unlikely(ret)) {
1477                        ntfs_error(vol->sb, "Failed to find first attribute "
1478                                        "extent of mft bitmap attribute.");
1479                        goto restore_undo_alloc;
1480                }
1481                a = ctx->attr;
1482        }
1483        write_lock_irqsave(&mftbmp_ni->size_lock, flags);
1484        mftbmp_ni->allocated_size += vol->cluster_size;
1485        a->data.non_resident.allocated_size =
1486                        cpu_to_sle64(mftbmp_ni->allocated_size);
1487        write_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
1488        /* Ensure the changes make it to disk. */
1489        flush_dcache_mft_record_page(ctx->ntfs_ino);
1490        mark_mft_record_dirty(ctx->ntfs_ino);
1491        ntfs_attr_put_search_ctx(ctx);
1492        unmap_mft_record(mft_ni);
1493        up_write(&mftbmp_ni->runlist.lock);
1494        ntfs_debug("Done.");
1495        return 0;
1496restore_undo_alloc:
1497        ntfs_attr_reinit_search_ctx(ctx);
1498        if (ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
1499                        mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
1500                        0, ctx)) {
1501                ntfs_error(vol->sb, "Failed to find last attribute extent of "
1502                                "mft bitmap attribute.%s", es);
1503                write_lock_irqsave(&mftbmp_ni->size_lock, flags);
1504                mftbmp_ni->allocated_size += vol->cluster_size;
1505                write_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
1506                ntfs_attr_put_search_ctx(ctx);
1507                unmap_mft_record(mft_ni);
1508                up_write(&mftbmp_ni->runlist.lock);
1509                /*
1510                 * The only thing that is now wrong is ->allocated_size of the
1511                 * base attribute extent which chkdsk should be able to fix.
1512                 */
1513                NVolSetErrors(vol);
1514                return ret;
1515        }
1516        a = ctx->attr;
1517        a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 2);
1518undo_alloc:
1519        if (status.added_cluster) {
1520                /* Truncate the last run in the runlist by one cluster. */
1521                rl->length--;
1522                rl[1].vcn--;
1523        } else if (status.added_run) {
1524                lcn = rl->lcn;
1525                /* Remove the last run from the runlist. */
1526                rl->lcn = rl[1].lcn;
1527                rl->length = 0;
1528        }
1529        /* Deallocate the cluster. */
1530        down_write(&vol->lcnbmp_lock);
1531        if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
1532                ntfs_error(vol->sb, "Failed to free allocated cluster.%s", es);
1533                NVolSetErrors(vol);
1534        }
1535        up_write(&vol->lcnbmp_lock);
1536        if (status.mp_rebuilt) {
1537                if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
1538                                a->data.non_resident.mapping_pairs_offset),
1539                                old_alen - le16_to_cpu(
1540                                a->data.non_resident.mapping_pairs_offset),
1541                                rl2, ll, -1, NULL)) {
1542                        ntfs_error(vol->sb, "Failed to restore mapping pairs "
1543                                        "array.%s", es);
1544                        NVolSetErrors(vol);
1545                }
1546                if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
1547                        ntfs_error(vol->sb, "Failed to restore attribute "
1548                                        "record.%s", es);
1549                        NVolSetErrors(vol);
1550                }
1551                flush_dcache_mft_record_page(ctx->ntfs_ino);
1552                mark_mft_record_dirty(ctx->ntfs_ino);
1553        }
1554        if (ctx)
1555                ntfs_attr_put_search_ctx(ctx);
1556        if (!IS_ERR(mrec))
1557                unmap_mft_record(mft_ni);
1558        up_write(&mftbmp_ni->runlist.lock);
1559        return ret;
1560}
1561
1562/**
1563 * ntfs_mft_bitmap_extend_initialized_nolock - extend mftbmp initialized data
1564 * @vol:        volume on which to extend the mft bitmap attribute
1565 *
1566 * Extend the initialized portion of the mft bitmap attribute on the ntfs
1567 * volume @vol by 8 bytes.
1568 *
1569 * Note:  Only changes initialized_size and data_size, i.e. requires that
1570 * allocated_size is big enough to fit the new initialized_size.
1571 *
1572 * Return 0 on success and -error on error.
1573 *
1574 * Locking: Caller must hold vol->mftbmp_lock for writing.
1575 */
1576static int ntfs_mft_bitmap_extend_initialized_nolock(ntfs_volume *vol)
1577{
1578        s64 old_data_size, old_initialized_size;
1579        unsigned long flags;
1580        struct inode *mftbmp_vi;
1581        ntfs_inode *mft_ni, *mftbmp_ni;
1582        ntfs_attr_search_ctx *ctx;
1583        MFT_RECORD *mrec;
1584        ATTR_RECORD *a;
1585        int ret;
1586
1587        ntfs_debug("Extending mft bitmap initiailized (and data) size.");
1588        mft_ni = NTFS_I(vol->mft_ino);
1589        mftbmp_vi = vol->mftbmp_ino;
1590        mftbmp_ni = NTFS_I(mftbmp_vi);
1591        /* Get the attribute record. */
1592        mrec = map_mft_record(mft_ni);
1593        if (IS_ERR(mrec)) {
1594                ntfs_error(vol->sb, "Failed to map mft record.");
1595                return PTR_ERR(mrec);
1596        }
1597        ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
1598        if (unlikely(!ctx)) {
1599                ntfs_error(vol->sb, "Failed to get search context.");
1600                ret = -ENOMEM;
1601                goto unm_err_out;
1602        }
1603        ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
1604                        mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx);
1605        if (unlikely(ret)) {
1606                ntfs_error(vol->sb, "Failed to find first attribute extent of "
1607                                "mft bitmap attribute.");
1608                if (ret == -ENOENT)
1609                        ret = -EIO;
1610                goto put_err_out;
1611        }
1612        a = ctx->attr;
1613        write_lock_irqsave(&mftbmp_ni->size_lock, flags);
1614        old_data_size = i_size_read(mftbmp_vi);
1615        old_initialized_size = mftbmp_ni->initialized_size;
1616        /*
1617         * We can simply update the initialized_size before filling the space
1618         * with zeroes because the caller is holding the mft bitmap lock for
1619         * writing which ensures that no one else is trying to access the data.
1620         */
1621        mftbmp_ni->initialized_size += 8;
1622        a->data.non_resident.initialized_size =
1623                        cpu_to_sle64(mftbmp_ni->initialized_size);
1624        if (mftbmp_ni->initialized_size > old_data_size) {
1625                i_size_write(mftbmp_vi, mftbmp_ni->initialized_size);
1626                a->data.non_resident.data_size =
1627                                cpu_to_sle64(mftbmp_ni->initialized_size);
1628        }
1629        write_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
1630        /* Ensure the changes make it to disk. */
1631        flush_dcache_mft_record_page(ctx->ntfs_ino);
1632        mark_mft_record_dirty(ctx->ntfs_ino);
1633        ntfs_attr_put_search_ctx(ctx);
1634        unmap_mft_record(mft_ni);
1635        /* Initialize the mft bitmap attribute value with zeroes. */
1636        ret = ntfs_attr_set(mftbmp_ni, old_initialized_size, 8, 0);
1637        if (likely(!ret)) {
1638                ntfs_debug("Done.  (Wrote eight initialized bytes to mft "
1639                                "bitmap.");
1640                return 0;
1641        }
1642        ntfs_error(vol->sb, "Failed to write to mft bitmap.");
1643        /* Try to recover from the error. */
1644        mrec = map_mft_record(mft_ni);
1645        if (IS_ERR(mrec)) {
1646                ntfs_error(vol->sb, "Failed to map mft record.%s", es);
1647                NVolSetErrors(vol);
1648                return ret;
1649        }
1650        ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
1651        if (unlikely(!ctx)) {
1652                ntfs_error(vol->sb, "Failed to get search context.%s", es);
1653                NVolSetErrors(vol);
1654                goto unm_err_out;
1655        }
1656        if (ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
1657                        mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx)) {
1658                ntfs_error(vol->sb, "Failed to find first attribute extent of "
1659                                "mft bitmap attribute.%s", es);
1660                NVolSetErrors(vol);
1661put_err_out:
1662                ntfs_attr_put_search_ctx(ctx);
1663unm_err_out:
1664                unmap_mft_record(mft_ni);
1665                goto err_out;
1666        }
1667        a = ctx->attr;
1668        write_lock_irqsave(&mftbmp_ni->size_lock, flags);
1669        mftbmp_ni->initialized_size = old_initialized_size;
1670        a->data.non_resident.initialized_size =
1671                        cpu_to_sle64(old_initialized_size);
1672        if (i_size_read(mftbmp_vi) != old_data_size) {
1673                i_size_write(mftbmp_vi, old_data_size);
1674                a->data.non_resident.data_size = cpu_to_sle64(old_data_size);
1675        }
1676        write_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
1677        flush_dcache_mft_record_page(ctx->ntfs_ino);
1678        mark_mft_record_dirty(ctx->ntfs_ino);
1679        ntfs_attr_put_search_ctx(ctx);
1680        unmap_mft_record(mft_ni);
1681#ifdef DEBUG
1682        read_lock_irqsave(&mftbmp_ni->size_lock, flags);
1683        ntfs_debug("Restored status of mftbmp: allocated_size 0x%llx, "
1684                        "data_size 0x%llx, initialized_size 0x%llx.",
1685                        (long long)mftbmp_ni->allocated_size,
1686                        (long long)i_size_read(mftbmp_vi),
1687                        (long long)mftbmp_ni->initialized_size);
1688        read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
1689#endif /* DEBUG */
1690err_out:
1691        return ret;
1692}
1693
1694/**
1695 * ntfs_mft_data_extend_allocation_nolock - extend mft data attribute
1696 * @vol:        volume on which to extend the mft data attribute
1697 *
1698 * Extend the mft data attribute on the ntfs volume @vol by 16 mft records
1699 * worth of clusters or if not enough space for this by one mft record worth
1700 * of clusters.
1701 *
1702 * Note:  Only changes allocated_size, i.e. does not touch initialized_size or
1703 * data_size.
1704 *
1705 * Return 0 on success and -errno on error.
1706 *
1707 * Locking: - Caller must hold vol->mftbmp_lock for writing.
1708 *          - This function takes NTFS_I(vol->mft_ino)->runlist.lock for
1709 *            writing and releases it before returning.
1710 *          - This function calls functions which take vol->lcnbmp_lock for
1711 *            writing and release it before returning.
1712 */
1713static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
1714{
1715        LCN lcn;
1716        VCN old_last_vcn;
1717        s64 min_nr, nr, ll;
1718        unsigned long flags;
1719        ntfs_inode *mft_ni;
1720        runlist_element *rl, *rl2;
1721        ntfs_attr_search_ctx *ctx = NULL;
1722        MFT_RECORD *mrec;
1723        ATTR_RECORD *a = NULL;
1724        int ret, mp_size;
1725        u32 old_alen = 0;
1726        bool mp_rebuilt = false;
1727
1728        ntfs_debug("Extending mft data allocation.");
1729        mft_ni = NTFS_I(vol->mft_ino);
1730        /*
1731         * Determine the preferred allocation location, i.e. the last lcn of
1732         * the mft data attribute.  The allocated size of the mft data
1733         * attribute cannot be zero so we are ok to do this.
1734         */
1735        down_write(&mft_ni->runlist.lock);
1736        read_lock_irqsave(&mft_ni->size_lock, flags);
1737        ll = mft_ni->allocated_size;
1738        read_unlock_irqrestore(&mft_ni->size_lock, flags);
1739        rl = ntfs_attr_find_vcn_nolock(mft_ni,
1740                        (ll - 1) >> vol->cluster_size_bits, NULL);
1741        if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
1742                up_write(&mft_ni->runlist.lock);
1743                ntfs_error(vol->sb, "Failed to determine last allocated "
1744                                "cluster of mft data attribute.");
1745                if (!IS_ERR(rl))
1746                        ret = -EIO;
1747                else
1748                        ret = PTR_ERR(rl);
1749                return ret;
1750        }
1751        lcn = rl->lcn + rl->length;
1752        ntfs_debug("Last lcn of mft data attribute is 0x%llx.", (long long)lcn);
1753        /* Minimum allocation is one mft record worth of clusters. */
1754        min_nr = vol->mft_record_size >> vol->cluster_size_bits;
1755        if (!min_nr)
1756                min_nr = 1;
1757        /* Want to allocate 16 mft records worth of clusters. */
1758        nr = vol->mft_record_size << 4 >> vol->cluster_size_bits;
1759        if (!nr)
1760                nr = min_nr;
1761        /* Ensure we do not go above 2^32-1 mft records. */
1762        read_lock_irqsave(&mft_ni->size_lock, flags);
1763        ll = mft_ni->allocated_size;
1764        read_unlock_irqrestore(&mft_ni->size_lock, flags);
1765        if (unlikely((ll + (nr << vol->cluster_size_bits)) >>
1766                        vol->mft_record_size_bits >= (1ll << 32))) {
1767                nr = min_nr;
1768                if (unlikely((ll + (nr << vol->cluster_size_bits)) >>
1769                                vol->mft_record_size_bits >= (1ll << 32))) {
1770                        ntfs_warning(vol->sb, "Cannot allocate mft record "
1771                                        "because the maximum number of inodes "
1772                                        "(2^32) has already been reached.");
1773                        up_write(&mft_ni->runlist.lock);
1774                        return -ENOSPC;
1775                }
1776        }
1777        ntfs_debug("Trying mft data allocation with %s cluster count %lli.",
1778                        nr > min_nr ? "default" : "minimal", (long long)nr);
1779        old_last_vcn = rl[1].vcn;
1780        do {
1781                rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
1782                                true);
1783                if (likely(!IS_ERR(rl2)))
1784                        break;
1785                if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
1786                        ntfs_error(vol->sb, "Failed to allocate the minimal "
1787                                        "number of clusters (%lli) for the "
1788                                        "mft data attribute.", (long long)nr);
1789                        up_write(&mft_ni->runlist.lock);
1790                        return PTR_ERR(rl2);
1791                }
1792                /*
1793                 * There is not enough space to do the allocation, but there
1794                 * might be enough space to do a minimal allocation so try that
1795                 * before failing.
1796                 */
1797                nr = min_nr;
1798                ntfs_debug("Retrying mft data allocation with minimal cluster "
1799                                "count %lli.", (long long)nr);
1800        } while (1);
1801        rl = ntfs_runlists_merge(mft_ni->runlist.rl, rl2);
1802        if (IS_ERR(rl)) {
1803                up_write(&mft_ni->runlist.lock);
1804                ntfs_error(vol->sb, "Failed to merge runlists for mft data "
1805                                "attribute.");
1806                if (ntfs_cluster_free_from_rl(vol, rl2)) {
1807                        ntfs_error(vol->sb, "Failed to dealocate clusters "
1808                                        "from the mft data attribute.%s", es);
1809                        NVolSetErrors(vol);
1810                }
1811                ntfs_free(rl2);
1812                return PTR_ERR(rl);
1813        }
1814        mft_ni->runlist.rl = rl;
1815        ntfs_debug("Allocated %lli clusters.", (long long)nr);
1816        /* Find the last run in the new runlist. */
1817        for (; rl[1].length; rl++)
1818                ;
1819        /* Update the attribute record as well. */
1820        mrec = map_mft_record(mft_ni);
1821        if (IS_ERR(mrec)) {
1822                ntfs_error(vol->sb, "Failed to map mft record.");
1823                ret = PTR_ERR(mrec);
1824                goto undo_alloc;
1825        }
1826        ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
1827        if (unlikely(!ctx)) {
1828                ntfs_error(vol->sb, "Failed to get search context.");
1829                ret = -ENOMEM;
1830                goto undo_alloc;
1831        }
1832        ret = ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
1833                        CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx);
1834        if (unlikely(ret)) {
1835                ntfs_error(vol->sb, "Failed to find last attribute extent of "
1836                                "mft data attribute.");
1837                if (ret == -ENOENT)
1838                        ret = -EIO;
1839                goto undo_alloc;
1840        }
1841        a = ctx->attr;
1842        ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
1843        /* Search back for the previous last allocated cluster of mft bitmap. */
1844        for (rl2 = rl; rl2 > mft_ni->runlist.rl; rl2--) {
1845                if (ll >= rl2->vcn)
1846                        break;
1847        }
1848        BUG_ON(ll < rl2->vcn);
1849        BUG_ON(ll >= rl2->vcn + rl2->length);
1850        /* Get the size for the new mapping pairs array for this extent. */
1851        mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
1852        if (unlikely(mp_size <= 0)) {
1853                ntfs_error(vol->sb, "Get size for mapping pairs failed for "
1854                                "mft data attribute extent.");
1855                ret = mp_size;
1856                if (!ret)
1857                        ret = -EIO;
1858                goto undo_alloc;
1859        }
1860        /* Expand the attribute record if necessary. */
1861        old_alen = le32_to_cpu(a->length);
1862        ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size +
1863                        le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
1864        if (unlikely(ret)) {
1865                if (ret != -ENOSPC) {
1866                        ntfs_error(vol->sb, "Failed to resize attribute "
1867                                        "record for mft data attribute.");
1868                        goto undo_alloc;
1869                }
1870                // TODO: Deal with this by moving this extent to a new mft
1871                // record or by starting a new extent in a new mft record or by
1872                // moving other attributes out of this mft record.
1873                // Note: Use the special reserved mft records and ensure that
1874                // this extent is not required to find the mft record in
1875                // question.  If no free special records left we would need to
1876                // move an existing record away, insert ours in its place, and
1877                // then place the moved record into the newly allocated space
1878                // and we would then need to update all references to this mft
1879                // record appropriately.  This is rather complicated...
1880                ntfs_error(vol->sb, "Not enough space in this mft record to "
1881                                "accomodate extended mft data attribute "
1882                                "extent.  Cannot handle this yet.");
1883                ret = -EOPNOTSUPP;
1884                goto undo_alloc;
1885        }
1886        mp_rebuilt = true;
1887        /* Generate the mapping pairs array directly into the attr record. */
1888        ret = ntfs_mapping_pairs_build(vol, (u8*)a +
1889                        le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
1890                        mp_size, rl2, ll, -1, NULL);
1891        if (unlikely(ret)) {
1892                ntfs_error(vol->sb, "Failed to build mapping pairs array of "
1893                                "mft data attribute.");
1894                goto undo_alloc;
1895        }
1896        /* Update the highest_vcn. */
1897        a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
1898        /*
1899         * We now have extended the mft data allocated_size by nr clusters.
1900         * Reflect this in the ntfs_inode structure and the attribute record.
1901         * @rl is the last (non-terminator) runlist element of mft data
1902         * attribute.
1903         */
1904        if (a->data.non_resident.lowest_vcn) {
1905                /*
1906                 * We are not in the first attribute extent, switch to it, but
1907                 * first ensure the changes will make it to disk later.
1908                 */
1909                flush_dcache_mft_record_page(ctx->ntfs_ino);
1910                mark_mft_record_dirty(ctx->ntfs_ino);
1911                ntfs_attr_reinit_search_ctx(ctx);
1912                ret = ntfs_attr_lookup(mft_ni->type, mft_ni->name,
1913                                mft_ni->name_len, CASE_SENSITIVE, 0, NULL, 0,
1914                                ctx);
1915                if (unlikely(ret)) {
1916                        ntfs_error(vol->sb, "Failed to find first attribute "
1917                                        "extent of mft data attribute.");
1918                        goto restore_undo_alloc;
1919                }
1920                a = ctx->attr;
1921        }
1922        write_lock_irqsave(&mft_ni->size_lock, flags);
1923        mft_ni->allocated_size += nr << vol->cluster_size_bits;
1924        a->data.non_resident.allocated_size =
1925                        cpu_to_sle64(mft_ni->allocated_size);
1926        write_unlock_irqrestore(&mft_ni->size_lock, flags);
1927        /* Ensure the changes make it to disk. */
1928        flush_dcache_mft_record_page(ctx->ntfs_ino);
1929        mark_mft_record_dirty(ctx->ntfs_ino);
1930        ntfs_attr_put_search_ctx(ctx);
1931        unmap_mft_record(mft_ni);
1932        up_write(&mft_ni->runlist.lock);
1933        ntfs_debug("Done.");
1934        return 0;
1935restore_undo_alloc:
1936        ntfs_attr_reinit_search_ctx(ctx);
1937        if (ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
1938                        CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx)) {
1939                ntfs_error(vol->sb, "Failed to find last attribute extent of "
1940                                "mft data attribute.%s", es);
1941                write_lock_irqsave(&mft_ni->size_lock, flags);
1942                mft_ni->allocated_size += nr << vol->cluster_size_bits;
1943                write_unlock_irqrestore(&mft_ni->size_lock, flags);
1944                ntfs_attr_put_search_ctx(ctx);
1945                unmap_mft_record(mft_ni);
1946                up_write(&mft_ni->runlist.lock);
1947                /*
1948                 * The only thing that is now wrong is ->allocated_size of the
1949                 * base attribute extent which chkdsk should be able to fix.
1950                 */
1951                NVolSetErrors(vol);
1952                return ret;
1953        }
1954        ctx->attr->data.non_resident.highest_vcn =
1955                        cpu_to_sle64(old_last_vcn - 1);
1956undo_alloc:
1957        if (ntfs_cluster_free(mft_ni, old_last_vcn, -1, ctx) < 0) {
1958                ntfs_error(vol->sb, "Failed to free clusters from mft data "
1959                                "attribute.%s", es);
1960                NVolSetErrors(vol);
1961        }
1962        a = ctx->attr;
1963        if (ntfs_rl_truncate_nolock(vol, &mft_ni->runlist, old_last_vcn)) {
1964                ntfs_error(vol->sb, "Failed to truncate mft data attribute "
1965                                "runlist.%s", es);
1966                NVolSetErrors(vol);
1967        }
1968        if (mp_rebuilt && !IS_ERR(ctx->mrec)) {
1969                if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
1970                                a->data.non_resident.mapping_pairs_offset),
1971                                old_alen - le16_to_cpu(
1972                                a->data.non_resident.mapping_pairs_offset),
1973                                rl2, ll, -1, NULL)) {
1974                        ntfs_error(vol->sb, "Failed to restore mapping pairs "
1975                                        "array.%s", es);
1976                        NVolSetErrors(vol);
1977                }
1978                if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
1979                        ntfs_error(vol->sb, "Failed to restore attribute "
1980                                        "record.%s", es);
1981                        NVolSetErrors(vol);
1982                }
1983                flush_dcache_mft_record_page(ctx->ntfs_ino);
1984                mark_mft_record_dirty(ctx->ntfs_ino);
1985        } else if (IS_ERR(ctx->mrec)) {
1986                ntfs_error(vol->sb, "Failed to restore attribute search "
1987                                "context.%s", es);
1988                NVolSetErrors(vol);
1989        }
1990        if (ctx)
1991                ntfs_attr_put_search_ctx(ctx);
1992        if (!IS_ERR(mrec))
1993                unmap_mft_record(mft_ni);
1994        up_write(&mft_ni->runlist.lock);
1995        return ret;
1996}
1997
1998/**
1999 * ntfs_mft_record_layout - layout an mft record into a memory buffer
2000 * @vol:        volume to which the mft record will belong
2001 * @mft_no:     mft reference specifying the mft record number
2002 * @m:          destination buffer of size >= @vol->mft_record_size bytes
2003 *
2004 * Layout an empty, unused mft record with the mft record number @mft_no into
2005 * the buffer @m.  The volume @vol is needed because the mft record structure
2006 * was modified in NTFS 3.1 so we need to know which volume version this mft
2007 * record will be used on.
2008 *
2009 * Return 0 on success and -errno on error.
2010 */
2011static int ntfs_mft_record_layout(const ntfs_volume *vol, const s64 mft_no,
2012                MFT_RECORD *m)
2013{
2014        ATTR_RECORD *a;
2015
2016        ntfs_debug("Entering for mft record 0x%llx.", (long long)mft_no);
2017        if (mft_no >= (1ll << 32)) {
2018                ntfs_error(vol->sb, "Mft record number 0x%llx exceeds "
2019                                "maximum of 2^32.", (long long)mft_no);
2020                return -ERANGE;
2021        }
2022        /* Start by clearing the whole mft record to gives us a clean slate. */
2023        memset(m, 0, vol->mft_record_size);
2024        /* Aligned to 2-byte boundary. */
2025        if (vol->major_ver < 3 || (vol->major_ver == 3 && !vol->minor_ver))
2026                m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD_OLD) + 1) & ~1);
2027        else {
2028                m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD) + 1) & ~1);
2029                /*
2030                 * Set the NTFS 3.1+ specific fields while we know that the
2031                 * volume version is 3.1+.
2032                 */
2033                m->reserved = 0;
2034                m->mft_record_number = cpu_to_le32((u32)mft_no);
2035        }
2036        m->magic = magic_FILE;
2037        if (vol->mft_record_size >= NTFS_BLOCK_SIZE)
2038                m->usa_count = cpu_to_le16(vol->mft_record_size /
2039                                NTFS_BLOCK_SIZE + 1);
2040        else {
2041                m->usa_count = cpu_to_le16(1);
2042                ntfs_warning(vol->sb, "Sector size is bigger than mft record "
2043                                "size.  Setting usa_count to 1.  If chkdsk "
2044                                "reports this as corruption, please email "
2045                                "linux-ntfs-dev@lists.sourceforge.net stating "
2046                                "that you saw this message and that the "
2047                                "modified filesystem created was corrupt.  "
2048                                "Thank you.");
2049        }
2050        /* Set the update sequence number to 1. */
2051        *(le16*)((u8*)m + le16_to_cpu(m->usa_ofs)) = cpu_to_le16(1);
2052        m->lsn = 0;
2053        m->sequence_number = cpu_to_le16(1);
2054        m->link_count = 0;
2055        /*
2056         * Place the attributes straight after the update sequence array,
2057         * aligned to 8-byte boundary.
2058         */
2059        m->attrs_offset = cpu_to_le16((le16_to_cpu(m->usa_ofs) +
2060                        (le16_to_cpu(m->usa_count) << 1) + 7) & ~7);
2061        m->flags = 0;
2062        /*
2063         * Using attrs_offset plus eight bytes (for the termination attribute).
2064         * attrs_offset is already aligned to 8-byte boundary, so no need to
2065         * align again.
2066         */
2067        m->bytes_in_use = cpu_to_le32(le16_to_cpu(m->attrs_offset) + 8);
2068        m->bytes_allocated = cpu_to_le32(vol->mft_record_size);
2069        m->base_mft_record = 0;
2070        m->next_attr_instance = 0;
2071        /* Add the termination attribute. */
2072        a = (ATTR_RECORD*)((u8*)m + le16_to_cpu(m->attrs_offset));
2073        a->type = AT_END;
2074        a->length = 0;
2075        ntfs_debug("Done.");
2076        return 0;
2077}
2078
2079/**
2080 * ntfs_mft_record_format - format an mft record on an ntfs volume
2081 * @vol:        volume on which to format the mft record
2082 * @mft_no:     mft record number to format
2083 *
2084 * Format the mft record @mft_no in $MFT/$DATA, i.e. lay out an empty, unused
2085 * mft record into the appropriate place of the mft data attribute.  This is
2086 * used when extending the mft data attribute.
2087 *
2088 * Return 0 on success and -errno on error.
2089 */
2090static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
2091{
2092        loff_t i_size;
2093        struct inode *mft_vi = vol->mft_ino;
2094        struct page *page;
2095        MFT_RECORD *m;
2096        pgoff_t index, end_index;
2097        unsigned int ofs;
2098        int err;
2099
2100        ntfs_debug("Entering for mft record 0x%llx.", (long long)mft_no);
2101        /*
2102         * The index into the page cache and the offset within the page cache
2103         * page of the wanted mft record.
2104         */
2105        index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
2106        ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
2107        /* The maximum valid index into the page cache for $MFT's data. */
2108        i_size = i_size_read(mft_vi);
2109        end_index = i_size >> PAGE_CACHE_SHIFT;
2110        if (unlikely(index >= end_index)) {
2111                if (unlikely(index > end_index || ofs + vol->mft_record_size >=
2112                                (i_size & ~PAGE_CACHE_MASK))) {
2113                        ntfs_error(vol->sb, "Tried to format non-existing mft "
2114                                        "record 0x%llx.", (long long)mft_no);
2115                        return -ENOENT;
2116                }
2117        }
2118        /* Read, map, and pin the page containing the mft record. */
2119        page = ntfs_map_page(mft_vi->i_mapping, index);
2120        if (IS_ERR(page)) {
2121                ntfs_error(vol->sb, "Failed to map page containing mft record "
2122                                "to format 0x%llx.", (long long)mft_no);
2123                return PTR_ERR(page);
2124        }
2125        lock_page(page);
2126        BUG_ON(!PageUptodate(page));
2127        ClearPageUptodate(page);
2128        m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
2129        err = ntfs_mft_record_layout(vol, mft_no, m);
2130        if (unlikely(err)) {
2131                ntfs_error(vol->sb, "Failed to layout mft record 0x%llx.",
2132                                (long long)mft_no);
2133                SetPageUptodate(page);
2134                unlock_page(page);
2135                ntfs_unmap_page(page);
2136                return err;
2137        }
2138        flush_dcache_page(page);
2139        SetPageUptodate(page);
2140        unlock_page(page);
2141        /*
2142         * Make sure the mft record is written out to disk.  We could use
2143         * ilookup5() to check if an inode is in icache and so on but this is
2144         * unnecessary as ntfs_writepage() will write the dirty record anyway.
2145         */
2146        mark_ntfs_record_dirty(page, ofs);
2147        ntfs_unmap_page(page);
2148        ntfs_debug("Done.");
2149        return 0;
2150}
2151
2152/**
2153 * ntfs_mft_record_alloc - allocate an mft record on an ntfs volume
2154 * @vol:        [IN]  volume on which to allocate the mft record
2155 * @mode:       [IN]  mode if want a file or directory, i.e. base inode or 0
2156 * @base_ni:    [IN]  open base inode if allocating an extent mft record or NULL
2157 * @mrec:       [OUT] on successful return this is the mapped mft record
2158 *
2159 * Allocate an mft record in $MFT/$DATA of an open ntfs volume @vol.
2160 *
2161 * If @base_ni is NULL make the mft record a base mft record, i.e. a file or
2162 * direvctory inode, and allocate it at the default allocator position.  In
2163 * this case @mode is the file mode as given to us by the caller.  We in
2164 * particular use @mode to distinguish whether a file or a directory is being
2165 * created (S_IFDIR(mode) and S_IFREG(mode), respectively).
2166 *
2167 * If @base_ni is not NULL make the allocated mft record an extent record,
2168 * allocate it starting at the mft record after the base mft record and attach
2169 * the allocated and opened ntfs inode to the base inode @base_ni.  In this
2170 * case @mode must be 0 as it is meaningless for extent inodes.
2171 *
2172 * You need to check the return value with IS_ERR().  If false, the function
2173 * was successful and the return value is the now opened ntfs inode of the
2174 * allocated mft record.  *@mrec is then set to the allocated, mapped, pinned,
2175 * and locked mft record.  If IS_ERR() is true, the function failed and the
2176 * error code is obtained from PTR_ERR(return value).  *@mrec is undefined in
2177 * this case.
2178 *
2179 * Allocation strategy:
2180 *
2181 * To find a free mft record, we scan the mft bitmap for a zero bit.  To
2182 * optimize this we start scanning at the place specified by @base_ni or if
2183 * @base_ni is NULL we start where we last stopped and we perform wrap around
2184 * when we reach the end.  Note, we do not try to allocate mft records below
2185 * number 24 because numbers 0 to 15 are the defined system files anyway and 16
2186 * to 24 are special in that they are used for storing extension mft records
2187 * for the $DATA attribute of $MFT.  This is required to avoid the possibility
2188 * of creating a runlist with a circular dependency which once written to disk
2189 * can never be read in again.  Windows will only use records 16 to 24 for
2190 * normal files if the volume is completely out of space.  We never use them
2191 * which means that when the volume is really out of space we cannot create any
2192 * more files while Windows can still create up to 8 small files.  We can start
2193 * doing this at some later time, it does not matter much for now.
2194 *
2195 * When scanning the mft bitmap, we only search up to the last allocated mft
2196 * record.  If there are no free records left in the range 24 to number of
2197 * allocated mft records, then we extend the $MFT/$DATA attribute in order to
2198 * create free mft records.  We extend the allocated size of $MFT/$DATA by 16
2199 * records at a time or one cluster, if cluster size is above 16kiB.  If there
2200 * is not sufficient space to do this, we try to extend by a single mft record
2201 * or one cluster, if cluster size is above the mft record size.
2202 *
2203 * No matter how many mft records we allocate, we initialize only the first
2204 * allocated mft record, incrementing mft data size and initialized size
2205 * accordingly, open an ntfs_inode for it and return it to the caller, unless
2206 * there are less than 24 mft records, in which case we allocate and initialize
2207 * mft records until we reach record 24 which we consider as the first free mft
2208 * record for use by normal files.
2209 *
2210 * If during any stage we overflow the initialized data in the mft bitmap, we
2211 * extend the initialized size (and data size) by 8 bytes, allocating another
2212 * cluster if required.  The bitmap data size has to be at least equal to the
2213 * number of mft records in the mft, but it can be bigger, in which case the
2214 * superflous bits are padded with zeroes.
2215 *
2216 * Thus, when we return successfully (IS_ERR() is false), we will have:
2217 *      - initialized / extended the mft bitmap if necessary,
2218 *      - initialized / extended the mft data if necessary,
2219 *      - set the bit corresponding to the mft record being allocated in the
2220 *        mft bitmap,
2221 *      - opened an ntfs_inode for the allocated mft record, and we will have
2222 *      - returned the ntfs_inode as well as the allocated mapped, pinned, and
2223 *        locked mft record.
2224 *
2225 * On error, the volume will be left in a consistent state and no record will
2226 * be allocated.  If rolling back a partial operation fails, we may leave some
2227 * inconsistent metadata in which case we set NVolErrors() so the volume is
2228 * left dirty when unmounted.
2229 *
2230 * Note, this function cannot make use of most of the normal functions, like
2231 * for example for attribute resizing, etc, because when the run list overflows
2232 * the base mft record and an attribute list is used, it is very important that
2233 * the extension mft records used to store the $DATA attribute of $MFT can be
2234 * reached without having to read the information contained inside them, as
2235 * this would make it impossible to find them in the first place after the
2236 * volume is unmounted.  $MFT/$BITMAP probably does not need to follow this
2237 * rule because the bitmap is not essential for finding the mft records, but on
2238 * the other hand, handling the bitmap in this special way would make life
2239 * easier because otherwise there might be circular invocations of functions
2240 * when reading the bitmap.
2241 */
2242ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
2243                ntfs_inode *base_ni, MFT_RECORD **mrec)
2244{
2245        s64 ll, bit, old_data_initialized, old_data_size;
2246        unsigned long flags;
2247        struct inode *vi;
2248        struct page *page;
2249        ntfs_inode *mft_ni, *mftbmp_ni, *ni;
2250        ntfs_attr_search_ctx *ctx;
2251        MFT_RECORD *m;
2252        ATTR_RECORD *a;
2253        pgoff_t index;
2254        unsigned int ofs;
2255        int err;
2256        le16 seq_no, usn;
2257        bool record_formatted = false;
2258
2259        if (base_ni) {
2260                ntfs_debug("Entering (allocating an extent mft record for "
2261                                "base mft record 0x%llx).",
2262                                (long long)base_ni->mft_no);
2263                /* @mode and @base_ni are mutually exclusive. */
2264                BUG_ON(mode);
2265        } else
2266                ntfs_debug("Entering (allocating a base mft record).");
2267        if (mode) {
2268                /* @mode and @base_ni are mutually exclusive. */
2269                BUG_ON(base_ni);
2270                /* We only support creation of normal files and directories. */
2271                if (!S_ISREG(mode) && !S_ISDIR(mode))
2272                        return ERR_PTR(-EOPNOTSUPP);
2273        }
2274        BUG_ON(!mrec);
2275        mft_ni = NTFS_I(vol->mft_ino);
2276        mftbmp_ni = NTFS_I(vol->mftbmp_ino);
2277        down_write(&vol->mftbmp_lock);
2278        bit = ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(vol, base_ni);
2279        if (bit >= 0) {
2280                ntfs_debug("Found and allocated free record (#1), bit 0x%llx.",
2281                                (long long)bit);
2282                goto have_alloc_rec;
2283        }
2284        if (bit != -ENOSPC) {
2285                up_write(&vol->mftbmp_lock);
2286                return ERR_PTR(bit);
2287        }
2288        /*
2289         * No free mft records left.  If the mft bitmap already covers more
2290         * than the currently used mft records, the next records are all free,
2291         * so we can simply allocate the first unused mft record.
2292         * Note: We also have to make sure that the mft bitmap at least covers
2293         * the first 24 mft records as they are special and whilst they may not
2294         * be in use, we do not allocate from them.
2295         */
2296        read_lock_irqsave(&mft_ni->size_lock, flags);
2297        ll = mft_ni->initialized_size >> vol->mft_record_size_bits;
2298        read_unlock_irqrestore(&mft_ni->size_lock, flags);
2299        read_lock_irqsave(&mftbmp_ni->size_lock, flags);
2300        old_data_initialized = mftbmp_ni->initialized_size;
2301        read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
2302        if (old_data_initialized << 3 > ll && old_data_initialized > 3) {
2303                bit = ll;
2304                if (bit < 24)
2305                        bit = 24;
2306                if (unlikely(bit >= (1ll << 32)))
2307                        goto max_err_out;
2308                ntfs_debug("Found free record (#2), bit 0x%llx.",
2309                                (long long)bit);
2310                goto found_free_rec;
2311        }
2312        /*
2313         * The mft bitmap needs to be expanded until it covers the first unused
2314         * mft record that we can allocate.
2315         * Note: The smallest mft record we allocate is mft record 24.
2316         */
2317        bit = old_data_initialized << 3;
2318        if (unlikely(bit >= (1ll << 32)))
2319                goto max_err_out;
2320        read_lock_irqsave(&mftbmp_ni->size_lock, flags);
2321        old_data_size = mftbmp_ni->allocated_size;
2322        ntfs_debug("Status of mftbmp before extension: allocated_size 0x%llx, "
2323                        "data_size 0x%llx, initialized_size 0x%llx.",
2324                        (long long)old_data_size,
2325                        (long long)i_size_read(vol->mftbmp_ino),
2326                        (long long)old_data_initialized);
2327        read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
2328        if (old_data_initialized + 8 > old_data_size) {
2329                /* Need to extend bitmap by one more cluster. */
2330                ntfs_debug("mftbmp: initialized_size + 8 > allocated_size.");
2331                err = ntfs_mft_bitmap_extend_allocation_nolock(vol);
2332                if (unlikely(err)) {
2333                        up_write(&vol->mftbmp_lock);
2334                        goto err_out;
2335                }
2336#ifdef DEBUG
2337                read_lock_irqsave(&mftbmp_ni->size_lock, flags);
2338                ntfs_debug("Status of mftbmp after allocation extension: "
2339                                "allocated_size 0x%llx, data_size 0x%llx, "
2340                                "initialized_size 0x%llx.",
2341                                (long long)mftbmp_ni->allocated_size,
2342                                (long long)i_size_read(vol->mftbmp_ino),
2343                                (long long)mftbmp_ni->initialized_size);
2344                read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
2345#endif /* DEBUG */
2346        }
2347        /*
2348         * We now have sufficient allocated space, extend the initialized_size
2349         * as well as the data_size if necessary and fill the new space with
2350         * zeroes.
2351         */
2352        err = ntfs_mft_bitmap_extend_initialized_nolock(vol);
2353        if (unlikely(err)) {
2354                up_write(&vol->mftbmp_lock);
2355                goto err_out;
2356        }
2357#ifdef DEBUG
2358        read_lock_irqsave(&mftbmp_ni->size_lock, flags);
2359        ntfs_debug("Status of mftbmp after initialized extention: "
2360                        "allocated_size 0x%llx, data_size 0x%llx, "
2361                        "initialized_size 0x%llx.",
2362                        (long long)mftbmp_ni->allocated_size,
2363                        (long long)i_size_read(vol->mftbmp_ino),
2364                        (long long)mftbmp_ni->initialized_size);
2365        read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
2366#endif /* DEBUG */
2367        ntfs_debug("Found free record (#3), bit 0x%llx.", (long long)bit);
2368found_free_rec:
2369        /* @bit is the found free mft record, allocate it in the mft bitmap. */
2370        ntfs_debug("At found_free_rec.");
2371        err = ntfs_bitmap_set_bit(vol->mftbmp_ino, bit);
2372        if (unlikely(err)) {
2373                ntfs_error(vol->sb, "Failed to allocate bit in mft bitmap.");
2374                up_write(&vol->mftbmp_lock);
2375                goto err_out;
2376        }
2377        ntfs_debug("Set bit 0x%llx in mft bitmap.", (long long)bit);
2378have_alloc_rec:
2379        /*
2380         * The mft bitmap is now uptodate.  Deal with mft data attribute now.
2381         * Note, we keep hold of the mft bitmap lock for writing until all
2382         * modifications to the mft data attribute are complete, too, as they
2383         * will impact decisions for mft bitmap and mft record allocation done
2384         * by a parallel allocation and if the lock is not maintained a
2385         * parallel allocation could allocate the same mft record as this one.
2386         */
2387        ll = (bit + 1) << vol->mft_record_size_bits;
2388        read_lock_irqsave(&mft_ni->size_lock, flags);
2389        old_data_initialized = mft_ni->initialized_size;
2390        read_unlock_irqrestore(&mft_ni->size_lock, flags);
2391        if (ll <= old_data_initialized) {
2392                ntfs_debug("Allocated mft record already initialized.");
2393                goto mft_rec_already_initialized;
2394        }
2395        ntfs_debug("Initializing allocated mft record.");
2396        /*
2397         * The mft record is outside the initialized data.  Extend the mft data
2398         * attribute until it covers the allocated record.  The loop is only
2399         * actually traversed more than once when a freshly formatted volume is
2400         * first written to so it optimizes away nicely in the common case.
2401         */
2402        read_lock_irqsave(&mft_ni->size_lock, flags);
2403        ntfs_debug("Status of mft data before extension: "
2404                        "allocated_size 0x%llx, data_size 0x%llx, "
2405                        "initialized_size 0x%llx.",
2406                        (long long)mft_ni->allocated_size,
2407                        (long long)i_size_read(vol->mft_ino),
2408                        (long long)mft_ni->initialized_size);
2409        while (ll > mft_ni->allocated_size) {
2410                read_unlock_irqrestore(&mft_ni->size_lock, flags);
2411                err = ntfs_mft_data_extend_allocation_nolock(vol);
2412                if (unlikely(err)) {
2413                        ntfs_error(vol->sb, "Failed to extend mft data "
2414                                        "allocation.");
2415                        goto undo_mftbmp_alloc_nolock;
2416                }
2417                read_lock_irqsave(&mft_ni->size_lock, flags);
2418                ntfs_debug("Status of mft data after allocation extension: "
2419                                "allocated_size 0x%llx, data_size 0x%llx, "
2420                                "initialized_size 0x%llx.",
2421                                (long long)mft_ni->allocated_size,
2422                                (long long)i_size_read(vol->mft_ino),
2423                                (long long)mft_ni->initialized_size);
2424        }
2425        read_unlock_irqrestore(&mft_ni->size_lock, flags);
2426        /*
2427         * Extend mft data initialized size (and data size of course) to reach
2428         * the allocated mft record, formatting the mft records allong the way.
2429         * Note: We only modify the ntfs_inode structure as that is all that is
2430         * needed by ntfs_mft_record_format().  We will update the attribute
2431         * record itself in one fell swoop later on.
2432         */
2433        write_lock_irqsave(&mft_ni->size_lock, flags);
2434        old_data_initialized = mft_ni->initialized_size;
2435        old_data_size = vol->mft_ino->i_size;
2436        while (ll > mft_ni->initialized_size) {
2437                s64 new_initialized_size, mft_no;
2438                
2439                new_initialized_size = mft_ni->initialized_size +
2440                                vol->mft_record_size;
2441                mft_no = mft_ni->initialized_size >> vol->mft_record_size_bits;
2442                if (new_initialized_size > i_size_read(vol->mft_ino))
2443                        i_size_write(vol->mft_ino, new_initialized_size);
2444                write_unlock_irqrestore(&mft_ni->size_lock, flags);
2445                ntfs_debug("Initializing mft record 0x%llx.",
2446                                (long long)mft_no);
2447                err = ntfs_mft_record_format(vol, mft_no);
2448                if (unlikely(err)) {
2449                        ntfs_error(vol->sb, "Failed to format mft record.");
2450                        goto undo_data_init;
2451                }
2452                write_lock_irqsave(&mft_ni->size_lock, flags);
2453                mft_ni->initialized_size = new_initialized_size;
2454        }
2455        write_unlock_irqrestore(&mft_ni->size_lock, flags);
2456        record_formatted = true;
2457        /* Update the mft data attribute record to reflect the new sizes. */
2458        m = map_mft_record(mft_ni);
2459        if (IS_ERR(m)) {
2460                ntfs_error(vol->sb, "Failed to map mft record.");
2461                err = PTR_ERR(m);
2462                goto undo_data_init;
2463        }
2464        ctx = ntfs_attr_get_search_ctx(mft_ni, m);
2465        if (unlikely(!ctx)) {
2466                ntfs_error(vol->sb, "Failed to get search context.");
2467                err = -ENOMEM;
2468                unmap_mft_record(mft_ni);
2469                goto undo_data_init;
2470        }
2471        err = ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
2472                        CASE_SENSITIVE, 0, NULL, 0, ctx);
2473        if (unlikely(err)) {
2474                ntfs_error(vol->sb, "Failed to find first attribute extent of "
2475                                "mft data attribute.");
2476                ntfs_attr_put_search_ctx(ctx);
2477                unmap_mft_record(mft_ni);
2478                goto undo_data_init;
2479        }
2480        a = ctx->attr;
2481        read_lock_irqsave(&mft_ni->size_lock, flags);
2482        a->data.non_resident.initialized_size =
2483                        cpu_to_sle64(mft_ni->initialized_size);
2484        a->data.non_resident.data_size =
2485                        cpu_to_sle64(i_size_read(vol->mft_ino));
2486        read_unlock_irqrestore(&mft_ni->size_lock, flags);
2487        /* Ensure the changes make it to disk. */
2488        flush_dcache_mft_record_page(ctx->ntfs_ino);
2489        mark_mft_record_dirty(ctx->ntfs_ino);
2490        ntfs_attr_put_search_ctx(ctx);
2491        unmap_mft_record(mft_ni);
2492        read_lock_irqsave(&mft_ni->size_lock, flags);
2493        ntfs_debug("Status of mft data after mft record initialization: "
2494                        "allocated_size 0x%llx, data_size 0x%llx, "
2495                        "initialized_size 0x%llx.",
2496                        (long long)mft_ni->allocated_size,
2497                        (long long)i_size_read(vol->mft_ino),
2498                        (long long)mft_ni->initialized_size);
2499        BUG_ON(i_size_read(vol->mft_ino) > mft_ni->allocated_size);
2500        BUG_ON(mft_ni->initialized_size > i_size_read(vol->mft_ino));
2501        read_unlock_irqrestore(&mft_ni->size_lock, flags);
2502mft_rec_already_initialized:
2503        /*
2504         * We can finally drop the mft bitmap lock as the mft data attribute
2505         * has been fully updated.  The only disparity left is that the
2506         * allocated mft record still needs to be marked as in use to match the
2507         * set bit in the mft bitmap but this is actually not a problem since
2508         * this mft record is not referenced from anywhere yet and the fact
2509         * that it is allocated in the mft bitmap means that no-one will try to
2510         * allocate it either.
2511         */
2512        up_write(&vol->mftbmp_lock);
2513        /*
2514         * We now have allocated and initialized the mft record.  Calculate the
2515         * index of and the offset within the page cache page the record is in.
2516         */
2517        index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
2518        ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
2519        /* Read, map, and pin the page containing the mft record. */
2520        page = ntfs_map_page(vol->mft_ino->i_mapping, index);
2521        if (IS_ERR(page)) {
2522                ntfs_error(vol->sb, "Failed to map page containing allocated "
2523                                "mft record 0x%llx.", (long long)bit);
2524                err = PTR_ERR(page);
2525                goto undo_mftbmp_alloc;
2526        }
2527        lock_page(page);
2528        BUG_ON(!PageUptodate(page));
2529        ClearPageUptodate(page);
2530        m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
2531        /* If we just formatted the mft record no need to do it again. */
2532        if (!record_formatted) {
2533                /* Sanity check that the mft record is really not in use. */
2534                if (ntfs_is_file_record(m->magic) &&
2535                                (m->flags & MFT_RECORD_IN_USE)) {
2536                        ntfs_error(vol->sb, "Mft record 0x%llx was marked "
2537                                        "free in mft bitmap but is marked "
2538                                        "used itself.  Corrupt filesystem.  "
2539                                        "Unmount and run chkdsk.",
2540                                        (long long)bit);
2541                        err = -EIO;
2542                        SetPageUptodate(page);
2543                        unlock_page(page);
2544                        ntfs_unmap_page(page);
2545                        NVolSetErrors(vol);
2546                        goto undo_mftbmp_alloc;
2547                }
2548                /*
2549                 * We need to (re-)format the mft record, preserving the
2550                 * sequence number if it is not zero as well as the update
2551                 * sequence number if it is not zero or -1 (0xffff).  This
2552                 * means we do not need to care whether or not something went
2553                 * wrong with the previous mft record.
2554                 */
2555                seq_no = m->sequence_number;
2556                usn = *(le16*)((u8*)m + le16_to_cpu(m->usa_ofs));
2557                err = ntfs_mft_record_layout(vol, bit, m);
2558                if (unlikely(err)) {
2559                        ntfs_error(vol->sb, "Failed to layout allocated mft "
2560                                        "record 0x%llx.", (long long)bit);
2561                        SetPageUptodate(page);
2562                        unlock_page(page);
2563                        ntfs_unmap_page(page);
2564                        goto undo_mftbmp_alloc;
2565                }
2566                if (seq_no)
2567                        m->sequence_number = seq_no;
2568                if (usn && le16_to_cpu(usn) != 0xffff)
2569                        *(le16*)((u8*)m + le16_to_cpu(m->usa_ofs)) = usn;
2570        }
2571        /* Set the mft record itself in use. */
2572        m->flags |= MFT_RECORD_IN_USE;
2573        if (S_ISDIR(mode))
2574                m->flags |= MFT_RECORD_IS_DIRECTORY;
2575        flush_dcache_page(page);
2576        SetPageUptodate(page);
2577        if (base_ni) {
2578                /*
2579                 * Setup the base mft record in the extent mft record.  This
2580                 * completes initialization of the allocated extent mft record
2581                 * and we can simply use it with map_extent_mft_record().
2582                 */
2583                m->base_mft_record = MK_LE_MREF(base_ni->mft_no,
2584                                base_ni->seq_no);
2585                /*
2586                 * Allocate an extent inode structure for the new mft record,
2587                 * attach it to the base inode @base_ni and map, pin, and lock
2588                 * its, i.e. the allocated, mft record.
2589                 */
2590                m = map_extent_mft_record(base_ni, bit, &ni);
2591                if (IS_ERR(m)) {
2592                        ntfs_error(vol->sb, "Failed to map allocated extent "
2593                                        "mft record 0x%llx.", (long long)bit);
2594                        err = PTR_ERR(m);
2595                        /* Set the mft record itself not in use. */
2596                        m->flags &= cpu_to_le16(
2597                                        ~le16_to_cpu(MFT_RECORD_IN_USE));
2598                        flush_dcache_page(page);
2599                        /* Make sure the mft record is written out to disk. */
2600                        mark_ntfs_record_dirty(page, ofs);
2601                        unlock_page(page);
2602                        ntfs_unmap_page(page);
2603                        goto undo_mftbmp_alloc;
2604                }
2605                /*
2606                 * Make sure the allocated mft record is written out to disk.
2607                 * No need to set the inode dirty because the caller is going
2608                 * to do that anyway after finishing with the new extent mft
2609                 * record (e.g. at a minimum a new attribute will be added to
2610                 * the mft record.
2611                 */
2612                mark_ntfs_record_dirty(page, ofs);
2613                unlock_page(page);
2614                /*
2615                 * Need to unmap the page since map_extent_mft_record() mapped
2616                 * it as well so we have it mapped twice at the moment.
2617                 */
2618                ntfs_unmap_page(page);
2619        } else {
2620                /*
2621                 * Allocate a new VFS inode and set it up.  NOTE: @vi->i_nlink
2622                 * is set to 1 but the mft record->link_count is 0.  The caller
2623                 * needs to bear this in mind.
2624                 */
2625                vi = new_inode(vol->sb);
2626                if (unlikely(!vi)) {
2627                        err = -ENOMEM;
2628                        /* Set the mft record itself not in use. */
2629                        m->flags &= cpu_to_le16(
2630                                        ~le16_to_cpu(MFT_RECORD_IN_USE));
2631                        flush_dcache_page(page);
2632                        /* Make sure the mft record is written out to disk. */
2633                        mark_ntfs_record_dirty(page, ofs);
2634                        unlock_page(page);
2635                        ntfs_unmap_page(page);
2636                        goto undo_mftbmp_alloc;
2637                }
2638                vi->i_ino = bit;
2639                /*
2640                 * This is for checking whether an inode has changed w.r.t. a
2641                 * file so that the file can be updated if necessary (compare
2642                 * with f_version).
2643                 */
2644                vi->i_version = 1;
2645
2646                /* The owner and group come from the ntfs volume. */
2647                vi->i_uid = vol->uid;
2648                vi->i_gid = vol->gid;
2649
2650                /* Initialize the ntfs specific part of @vi. */
2651                ntfs_init_big_inode(vi);
2652                ni = NTFS_I(vi);
2653                /*
2654                 * Set the appropriate mode, attribute type, and name.  For
2655                 * directories, also setup the index values to the defaults.
2656                 */
2657                if (S_ISDIR(mode)) {
2658                        vi->i_mode = S_IFDIR | S_IRWXUGO;
2659                        vi->i_mode &= ~vol->dmask;
2660
2661                        NInoSetMstProtected(ni);
2662                        ni->type = AT_INDEX_ALLOCATION;
2663                        ni->name = I30;
2664                        ni->name_len = 4;
2665
2666                        ni->itype.index.block_size = 4096;
2667                        ni->itype.index.block_size_bits = ntfs_ffs(4096) - 1;
2668                        ni->itype.index.collation_rule = COLLATION_FILE_NAME;
2669                        if (vol->cluster_size <= ni->itype.index.block_size) {
2670                                ni->itype.index.vcn_size = vol->cluster_size;
2671                                ni->itype.index.vcn_size_bits =
2672                                                vol->cluster_size_bits;
2673                        } else {
2674                                ni->itype.index.vcn_size = vol->sector_size;
2675                                ni->itype.index.vcn_size_bits =
2676                                                vol->sector_size_bits;
2677                        }
2678                } else {
2679                        vi->i_mode = S_IFREG | S_IRWXUGO;
2680                        vi->i_mode &= ~vol->fmask;
2681
2682                        ni->type = AT_DATA;
2683                        ni->name = NULL;
2684                        ni->name_len = 0;
2685                }
2686                if (IS_RDONLY(vi))
2687                        vi->i_mode &= ~S_IWUGO;
2688
2689                /* Set the inode times to the current time. */
2690                vi->i_atime = vi->i_mtime = vi->i_ctime =
2691                        current_fs_time(vi->i_sb);
2692                /*
2693                 * Set the file size to 0, the ntfs inode sizes are set to 0 by
2694                 * the call to ntfs_init_big_inode() below.
2695                 */
2696                vi->i_size = 0;
2697                vi->i_blocks = 0;
2698
2699                /* Set the sequence number. */
2700                vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
2701                /*
2702                 * Manually map, pin, and lock the mft record as we already
2703                 * have its page mapped and it is very easy to do.
2704                 */
2705                atomic_inc(&ni->count);
2706                mutex_lock(&ni->mrec_lock);
2707                ni->page = page;
2708                ni->page_ofs = ofs;
2709                /*
2710                 * Make sure the allocated mft record is written out to disk.
2711                 * NOTE: We do not set the ntfs inode dirty because this would
2712                 * fail in ntfs_write_inode() because the inode does not have a
2713                 * standard information attribute yet.  Also, there is no need
2714                 * to set the inode dirty because the caller is going to do
2715                 * that anyway after finishing with the new mft record (e.g. at
2716                 * a minimum some new attributes will be added to the mft
2717                 * record.
2718                 */
2719                mark_ntfs_record_dirty(page, ofs);
2720                unlock_page(page);
2721
2722                /* Add the inode to the inode hash for the superblock. */
2723                insert_inode_hash(vi);
2724
2725                /* Update the default mft allocation position. */
2726                vol->mft_data_pos = bit + 1;
2727        }
2728        /*
2729         * Return the opened, allocated inode of the allocated mft record as
2730         * well as the mapped, pinned, and locked mft record.
2731         */
2732        ntfs_debug("Returning opened, allocated %sinode 0x%llx.",
2733                        base_ni ? "extent " : "", (long long)bit);
2734        *mrec = m;
2735        return ni;
2736undo_data_init:
2737        write_lock_irqsave(&mft_ni->size_lock, flags);
2738        mft_ni->initialized_size = old_data_initialized;
2739        i_size_write(vol->mft_ino, old_data_size);
2740        write_unlock_irqrestore(&mft_ni->size_lock, flags);
2741        goto undo_mftbmp_alloc_nolock;
2742undo_mftbmp_alloc:
2743        down_write(&vol->mftbmp_lock);
2744undo_mftbmp_alloc_nolock:
2745        if (ntfs_bitmap_clear_bit(vol->mftbmp_ino, bit)) {
2746                ntfs_error(vol->sb, "Failed to clear bit in mft bitmap.%s", es);
2747                NVolSetErrors(vol);
2748        }
2749        up_write(&vol->mftbmp_lock);
2750err_out:
2751        return ERR_PTR(err);
2752max_err_out:
2753        ntfs_warning(vol->sb, "Cannot allocate mft record because the maximum "
2754                        "number of inodes (2^32) has already been reached.");
2755        up_write(&vol->mftbmp_lock);
2756        return ERR_PTR(-ENOSPC);
2757}
2758
2759/**
2760 * ntfs_extent_mft_record_free - free an extent mft record on an ntfs volume
2761 * @ni:         ntfs inode of the mapped extent mft record to free
2762 * @m:          mapped extent mft record of the ntfs inode @ni
2763 *
2764 * Free the mapped extent mft record @m of the extent ntfs inode @ni.
2765 *
2766 * Note that this function unmaps the mft record and closes and destroys @ni
2767 * internally and hence you cannot use either @ni nor @m any more after this
2768 * function returns success.
2769 *
2770 * On success return 0 and on error return -errno.  @ni and @m are still valid
2771 * in this case and have not been freed.
2772 *
2773 * For some errors an error message is displayed and the success code 0 is
2774 * returned and the volume is then left dirty on umount.  This makes sense in
2775 * case we could not rollback the changes that were already done since the
2776 * caller no longer wants to reference this mft record so it does not matter to
2777 * the caller if something is wrong with it as long as it is properly detached
2778 * from the base inode.
2779 */
2780int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m)
2781{
2782        unsigned long mft_no = ni->mft_no;
2783        ntfs_volume *vol = ni->vol;
2784        ntfs_inode *base_ni;
2785        ntfs_inode **extent_nis;
2786        int i, err;
2787        le16 old_seq_no;
2788        u16 seq_no;
2789        
2790        BUG_ON(NInoAttr(ni));
2791        BUG_ON(ni->nr_extents != -1);
2792
2793        mutex_lock(&ni->extent_lock);
2794        base_ni = ni->ext.base_ntfs_ino;
2795        mutex_unlock(&ni->extent_lock);
2796
2797        BUG_ON(base_ni->nr_extents <= 0);
2798
2799        ntfs_debug("Entering for extent inode 0x%lx, base inode 0x%lx.\n",
2800                        mft_no, base_ni->mft_no);
2801
2802        mutex_lock(&base_ni->extent_lock);
2803
2804        /* Make sure we are holding the only reference to the extent inode. */
2805        if (atomic_read(&ni->count) > 2) {
2806                ntfs_error(vol->sb, "Tried to free busy extent inode 0x%lx, "
2807                                "not freeing.", base_ni->mft_no);
2808                mutex_unlock(&base_ni->extent_lock);
2809                return -EBUSY;
2810        }
2811
2812        /* Dissociate the ntfs inode from the base inode. */
2813        extent_nis = base_ni->ext.extent_ntfs_inos;
2814        err = -ENOENT;
2815        for (i = 0; i < base_ni->nr_extents; i++) {
2816                if (ni != extent_nis[i])
2817                        continue;
2818                extent_nis += i;
2819                base_ni->nr_extents--;
2820                memmove(extent_nis, extent_nis + 1, (base_ni->nr_extents - i) *
2821                                sizeof(ntfs_inode*));
2822                err = 0;
2823                break;
2824        }
2825
2826        mutex_unlock(&base_ni->extent_lock);
2827
2828        if (unlikely(err)) {
2829                ntfs_error(vol->sb, "Extent inode 0x%lx is not attached to "
2830                                "its base inode 0x%lx.", mft_no,
2831                                base_ni->mft_no);
2832                BUG();
2833        }
2834
2835        /*
2836         * The extent inode is no longer attached to the base inode so no one
2837         * can get a reference to it any more.
2838         */
2839
2840        /* Mark the mft record as not in use. */
2841        m->flags &= ~MFT_RECORD_IN_USE;
2842
2843        /* Increment the sequence number, skipping zero, if it is not zero. */
2844        old_seq_no = m->sequence_number;
2845        seq_no = le16_to_cpu(old_seq_no);
2846        if (seq_no == 0xffff)
2847                seq_no = 1;
2848        else if (seq_no)
2849                seq_no++;
2850        m->sequence_number = cpu_to_le16(seq_no);
2851
2852        /*
2853         * Set the ntfs inode dirty and write it out.  We do not need to worry
2854         * about the base inode here since whatever caused the extent mft
2855         * record to be freed is guaranteed to do it already.
2856         */
2857        NInoSetDirty(ni);
2858        err = write_mft_record(ni, m, 0);
2859        if (unlikely(err)) {
2860                ntfs_error(vol->sb, "Failed to write mft record 0x%lx, not "
2861                                "freeing.", mft_no);
2862                goto rollback;
2863        }
2864rollback_error:
2865        /* Unmap and throw away the now freed extent inode. */
2866        unmap_extent_mft_record(ni);
2867        ntfs_clear_extent_inode(ni);
2868
2869        /* Clear the bit in the $MFT/$BITMAP corresponding to this record. */
2870        down_write(&vol->mftbmp_lock);
2871        err = ntfs_bitmap_clear_bit(vol->mftbmp_ino, mft_no);
2872        up_write(&vol->mftbmp_lock);
2873        if (unlikely(err)) {
2874                /*
2875                 * The extent inode is gone but we failed to deallocate it in
2876                 * the mft bitmap.  Just emit a warning and leave the volume
2877                 * dirty on umount.
2878                 */
2879                ntfs_error(vol->sb, "Failed to clear bit in mft bitmap.%s", es);
2880                NVolSetErrors(vol);
2881        }
2882        return 0;
2883rollback:
2884        /* Rollback what we did... */
2885        mutex_lock(&base_ni->extent_lock);
2886        extent_nis = base_ni->ext.extent_ntfs_inos;
2887        if (!(base_ni->nr_extents & 3)) {
2888                int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*);
2889
2890                extent_nis = kmalloc(new_size, GFP_NOFS);
2891                if (unlikely(!extent_nis)) {
2892                        ntfs_error(vol->sb, "Failed to allocate internal "
2893                                        "buffer during rollback.%s", es);
2894                        mutex_unlock(&base_ni->extent_lock);
2895                        NVolSetErrors(vol);
2896                        goto rollback_error;
2897                }
2898                if (base_ni->nr_extents) {
2899                        BUG_ON(!base_ni->ext.extent_ntfs_inos);
2900                        memcpy(extent_nis, base_ni->ext.extent_ntfs_inos,
2901                                        new_size - 4 * sizeof(ntfs_inode*));
2902                        kfree(base_ni->ext.extent_ntfs_inos);
2903                }
2904                base_ni->ext.extent_ntfs_inos = extent_nis;
2905        }
2906        m->flags |= MFT_RECORD_IN_USE;
2907        m->sequence_number = old_seq_no;
2908        extent_nis[base_ni->nr_extents++] = ni;
2909        mutex_unlock(&base_ni->extent_lock);
2910        mark_mft_record_dirty(ni);
2911        return err;
2912}
2913#endif /* NTFS_RW */
2914