linux/fs/xfs/xfs_pnfs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2014 Christoph Hellwig.
   4 */
   5#include "xfs.h"
   6#include "xfs_shared.h"
   7#include "xfs_format.h"
   8#include "xfs_log_format.h"
   9#include "xfs_trans_resv.h"
  10#include "xfs_mount.h"
  11#include "xfs_inode.h"
  12#include "xfs_trans.h"
  13#include "xfs_bmap.h"
  14#include "xfs_iomap.h"
  15#include "xfs_pnfs.h"
  16
  17/*
  18 * Ensure that we do not have any outstanding pNFS layouts that can be used by
  19 * clients to directly read from or write to this inode.  This must be called
  20 * before every operation that can remove blocks from the extent map.
  21 * Additionally we call it during the write operation, where aren't concerned
  22 * about exposing unallocated blocks but just want to provide basic
  23 * synchronization between a local writer and pNFS clients.  mmap writes would
  24 * also benefit from this sort of synchronization, but due to the tricky locking
  25 * rules in the page fault path we don't bother.
  26 */
  27int
  28xfs_break_leased_layouts(
  29        struct inode            *inode,
  30        uint                    *iolock,
  31        bool                    *did_unlock)
  32{
  33        struct xfs_inode        *ip = XFS_I(inode);
  34        int                     error;
  35
  36        while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
  37                xfs_iunlock(ip, *iolock);
  38                *did_unlock = true;
  39                error = break_layout(inode, true);
  40                *iolock &= ~XFS_IOLOCK_SHARED;
  41                *iolock |= XFS_IOLOCK_EXCL;
  42                xfs_ilock(ip, *iolock);
  43        }
  44
  45        return error;
  46}
  47
  48/*
  49 * Get a unique ID including its location so that the client can identify
  50 * the exported device.
  51 */
  52int
  53xfs_fs_get_uuid(
  54        struct super_block      *sb,
  55        u8                      *buf,
  56        u32                     *len,
  57        u64                     *offset)
  58{
  59        struct xfs_mount        *mp = XFS_M(sb);
  60
  61        printk_once(KERN_NOTICE
  62"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
  63                mp->m_super->s_id);
  64
  65        if (*len < sizeof(uuid_t))
  66                return -EINVAL;
  67
  68        memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
  69        *len = sizeof(uuid_t);
  70        *offset = offsetof(struct xfs_dsb, sb_uuid);
  71        return 0;
  72}
  73
  74/*
  75 * Get a layout for the pNFS client.
  76 */
  77int
  78xfs_fs_map_blocks(
  79        struct inode            *inode,
  80        loff_t                  offset,
  81        u64                     length,
  82        struct iomap            *iomap,
  83        bool                    write,
  84        u32                     *device_generation)
  85{
  86        struct xfs_inode        *ip = XFS_I(inode);
  87        struct xfs_mount        *mp = ip->i_mount;
  88        struct xfs_bmbt_irec    imap;
  89        xfs_fileoff_t           offset_fsb, end_fsb;
  90        loff_t                  limit;
  91        int                     bmapi_flags = XFS_BMAPI_ENTIRE;
  92        int                     nimaps = 1;
  93        uint                    lock_flags;
  94        int                     error = 0;
  95
  96        if (XFS_FORCED_SHUTDOWN(mp))
  97                return -EIO;
  98
  99        /*
 100         * We can't export inodes residing on the realtime device.  The realtime
 101         * device doesn't have a UUID to identify it, so the client has no way
 102         * to find it.
 103         */
 104        if (XFS_IS_REALTIME_INODE(ip))
 105                return -ENXIO;
 106
 107        /*
 108         * The pNFS block layout spec actually supports reflink like
 109         * functionality, but the Linux pNFS server doesn't implement it yet.
 110         */
 111        if (xfs_is_reflink_inode(ip))
 112                return -ENXIO;
 113
 114        /*
 115         * Lock out any other I/O before we flush and invalidate the pagecache,
 116         * and then hand out a layout to the remote system.  This is very
 117         * similar to direct I/O, except that the synchronization is much more
 118         * complicated.  See the comment near xfs_break_leased_layouts
 119         * for a detailed explanation.
 120         */
 121        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 122
 123        error = -EINVAL;
 124        limit = mp->m_super->s_maxbytes;
 125        if (!write)
 126                limit = max(limit, round_up(i_size_read(inode),
 127                                     inode->i_sb->s_blocksize));
 128        if (offset > limit)
 129                goto out_unlock;
 130        if (offset > limit - length)
 131                length = limit - offset;
 132
 133        error = filemap_write_and_wait(inode->i_mapping);
 134        if (error)
 135                goto out_unlock;
 136        error = invalidate_inode_pages2(inode->i_mapping);
 137        if (WARN_ON_ONCE(error))
 138                return error;
 139
 140        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
 141        offset_fsb = XFS_B_TO_FSBT(mp, offset);
 142
 143        lock_flags = xfs_ilock_data_map_shared(ip);
 144        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
 145                                &imap, &nimaps, bmapi_flags);
 146
 147        ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK);
 148
 149        if (!error && write &&
 150            (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) {
 151                if (offset + length > XFS_ISIZE(ip))
 152                        end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
 153                else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
 154                        end_fsb = min(end_fsb, imap.br_startoff +
 155                                               imap.br_blockcount);
 156                xfs_iunlock(ip, lock_flags);
 157
 158                error = xfs_iomap_write_direct(ip, offset_fsb,
 159                                end_fsb - offset_fsb, &imap);
 160                if (error)
 161                        goto out_unlock;
 162
 163                /*
 164                 * Ensure the next transaction is committed synchronously so
 165                 * that the blocks allocated and handed out to the client are
 166                 * guaranteed to be present even after a server crash.
 167                 */
 168                error = xfs_update_prealloc_flags(ip,
 169                                XFS_PREALLOC_SET | XFS_PREALLOC_SYNC);
 170                if (error)
 171                        goto out_unlock;
 172        } else {
 173                xfs_iunlock(ip, lock_flags);
 174        }
 175        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 176
 177        error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
 178        *device_generation = mp->m_generation;
 179        return error;
 180out_unlock:
 181        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 182        return error;
 183}
 184
 185/*
 186 * Ensure the size update falls into a valid allocated block.
 187 */
 188static int
 189xfs_pnfs_validate_isize(
 190        struct xfs_inode        *ip,
 191        xfs_off_t               isize)
 192{
 193        struct xfs_bmbt_irec    imap;
 194        int                     nimaps = 1;
 195        int                     error = 0;
 196
 197        xfs_ilock(ip, XFS_ILOCK_SHARED);
 198        error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
 199                                &imap, &nimaps, 0);
 200        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 201        if (error)
 202                return error;
 203
 204        if (imap.br_startblock == HOLESTARTBLOCK ||
 205            imap.br_startblock == DELAYSTARTBLOCK ||
 206            imap.br_state == XFS_EXT_UNWRITTEN)
 207                return -EIO;
 208        return 0;
 209}
 210
 211/*
 212 * Make sure the blocks described by maps are stable on disk.  This includes
 213 * converting any unwritten extents, flushing the disk cache and updating the
 214 * time stamps.
 215 *
 216 * Note that we rely on the caller to always send us a timestamp update so that
 217 * we always commit a transaction here.  If that stops being true we will have
 218 * to manually flush the cache here similar to what the fsync code path does
 219 * for datasyncs on files that have no dirty metadata.
 220 */
 221int
 222xfs_fs_commit_blocks(
 223        struct inode            *inode,
 224        struct iomap            *maps,
 225        int                     nr_maps,
 226        struct iattr            *iattr)
 227{
 228        struct xfs_inode        *ip = XFS_I(inode);
 229        struct xfs_mount        *mp = ip->i_mount;
 230        struct xfs_trans        *tp;
 231        bool                    update_isize = false;
 232        int                     error, i;
 233        loff_t                  size;
 234
 235        ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
 236
 237        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 238
 239        size = i_size_read(inode);
 240        if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
 241                update_isize = true;
 242                size = iattr->ia_size;
 243        }
 244
 245        for (i = 0; i < nr_maps; i++) {
 246                u64 start, length, end;
 247
 248                start = maps[i].offset;
 249                if (start > size)
 250                        continue;
 251
 252                end = start + maps[i].length;
 253                if (end > size)
 254                        end = size;
 255
 256                length = end - start;
 257                if (!length)
 258                        continue;
 259        
 260                /*
 261                 * Make sure reads through the pagecache see the new data.
 262                 */
 263                error = invalidate_inode_pages2_range(inode->i_mapping,
 264                                        start >> PAGE_SHIFT,
 265                                        (end - 1) >> PAGE_SHIFT);
 266                WARN_ON_ONCE(error);
 267
 268                error = xfs_iomap_write_unwritten(ip, start, length, false);
 269                if (error)
 270                        goto out_drop_iolock;
 271        }
 272
 273        if (update_isize) {
 274                error = xfs_pnfs_validate_isize(ip, size);
 275                if (error)
 276                        goto out_drop_iolock;
 277        }
 278
 279        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
 280        if (error)
 281                goto out_drop_iolock;
 282
 283        xfs_ilock(ip, XFS_ILOCK_EXCL);
 284        xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 285        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 286
 287        xfs_setattr_time(ip, iattr);
 288        if (update_isize) {
 289                i_size_write(inode, iattr->ia_size);
 290                ip->i_d.di_size = iattr->ia_size;
 291        }
 292
 293        xfs_trans_set_sync(tp);
 294        error = xfs_trans_commit(tp);
 295
 296out_drop_iolock:
 297        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 298        return error;
 299}
 300