linux/fs/ceph/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3#include <linux/ceph/striper.h>
   4
   5#include <linux/module.h>
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/file.h>
   9#include <linux/mount.h>
  10#include <linux/namei.h>
  11#include <linux/writeback.h>
  12#include <linux/falloc.h>
  13#include <linux/iversion.h>
  14
  15#include "super.h"
  16#include "mds_client.h"
  17#include "cache.h"
  18
  19static __le32 ceph_flags_sys2wire(u32 flags)
  20{
  21        u32 wire_flags = 0;
  22
  23        switch (flags & O_ACCMODE) {
  24        case O_RDONLY:
  25                wire_flags |= CEPH_O_RDONLY;
  26                break;
  27        case O_WRONLY:
  28                wire_flags |= CEPH_O_WRONLY;
  29                break;
  30        case O_RDWR:
  31                wire_flags |= CEPH_O_RDWR;
  32                break;
  33        }
  34
  35        flags &= ~O_ACCMODE;
  36
  37#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
  38
  39        ceph_sys2wire(O_CREAT);
  40        ceph_sys2wire(O_EXCL);
  41        ceph_sys2wire(O_TRUNC);
  42        ceph_sys2wire(O_DIRECTORY);
  43        ceph_sys2wire(O_NOFOLLOW);
  44
  45#undef ceph_sys2wire
  46
  47        if (flags)
  48                dout("unused open flags: %x\n", flags);
  49
  50        return cpu_to_le32(wire_flags);
  51}
  52
  53/*
  54 * Ceph file operations
  55 *
  56 * Implement basic open/close functionality, and implement
  57 * read/write.
  58 *
  59 * We implement three modes of file I/O:
  60 *  - buffered uses the generic_file_aio_{read,write} helpers
  61 *
  62 *  - synchronous is used when there is multi-client read/write
  63 *    sharing, avoids the page cache, and synchronously waits for an
  64 *    ack from the OSD.
  65 *
  66 *  - direct io takes the variant of the sync path that references
  67 *    user pages directly.
  68 *
  69 * fsync() flushes and waits on dirty pages, but just queues metadata
  70 * for writeback: since the MDS can recover size and mtime there is no
  71 * need to wait for MDS acknowledgement.
  72 */
  73
  74/*
  75 * How many pages to get in one call to iov_iter_get_pages().  This
  76 * determines the size of the on-stack array used as a buffer.
  77 */
  78#define ITER_GET_BVECS_PAGES    64
  79
  80static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
  81                                struct bio_vec *bvecs)
  82{
  83        size_t size = 0;
  84        int bvec_idx = 0;
  85
  86        if (maxsize > iov_iter_count(iter))
  87                maxsize = iov_iter_count(iter);
  88
  89        while (size < maxsize) {
  90                struct page *pages[ITER_GET_BVECS_PAGES];
  91                ssize_t bytes;
  92                size_t start;
  93                int idx = 0;
  94
  95                bytes = iov_iter_get_pages(iter, pages, maxsize - size,
  96                                           ITER_GET_BVECS_PAGES, &start);
  97                if (bytes < 0)
  98                        return size ?: bytes;
  99
 100                iov_iter_advance(iter, bytes);
 101                size += bytes;
 102
 103                for ( ; bytes; idx++, bvec_idx++) {
 104                        struct bio_vec bv = {
 105                                .bv_page = pages[idx],
 106                                .bv_len = min_t(int, bytes, PAGE_SIZE - start),
 107                                .bv_offset = start,
 108                        };
 109
 110                        bvecs[bvec_idx] = bv;
 111                        bytes -= bv.bv_len;
 112                        start = 0;
 113                }
 114        }
 115
 116        return size;
 117}
 118
 119/*
 120 * iov_iter_get_pages() only considers one iov_iter segment, no matter
 121 * what maxsize or maxpages are given.  For ITER_BVEC that is a single
 122 * page.
 123 *
 124 * Attempt to get up to @maxsize bytes worth of pages from @iter.
 125 * Return the number of bytes in the created bio_vec array, or an error.
 126 */
 127static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
 128                                    struct bio_vec **bvecs, int *num_bvecs)
 129{
 130        struct bio_vec *bv;
 131        size_t orig_count = iov_iter_count(iter);
 132        ssize_t bytes;
 133        int npages;
 134
 135        iov_iter_truncate(iter, maxsize);
 136        npages = iov_iter_npages(iter, INT_MAX);
 137        iov_iter_reexpand(iter, orig_count);
 138
 139        /*
 140         * __iter_get_bvecs() may populate only part of the array -- zero it
 141         * out.
 142         */
 143        bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
 144        if (!bv)
 145                return -ENOMEM;
 146
 147        bytes = __iter_get_bvecs(iter, maxsize, bv);
 148        if (bytes < 0) {
 149                /*
 150                 * No pages were pinned -- just free the array.
 151                 */
 152                kvfree(bv);
 153                return bytes;
 154        }
 155
 156        *bvecs = bv;
 157        *num_bvecs = npages;
 158        return bytes;
 159}
 160
 161static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
 162{
 163        int i;
 164
 165        for (i = 0; i < num_bvecs; i++) {
 166                if (bvecs[i].bv_page) {
 167                        if (should_dirty)
 168                                set_page_dirty_lock(bvecs[i].bv_page);
 169                        put_page(bvecs[i].bv_page);
 170                }
 171        }
 172        kvfree(bvecs);
 173}
 174
 175/*
 176 * Prepare an open request.  Preallocate ceph_cap to avoid an
 177 * inopportune ENOMEM later.
 178 */
 179static struct ceph_mds_request *
 180prepare_open_request(struct super_block *sb, int flags, int create_mode)
 181{
 182        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 183        struct ceph_mds_client *mdsc = fsc->mdsc;
 184        struct ceph_mds_request *req;
 185        int want_auth = USE_ANY_MDS;
 186        int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 187
 188        if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 189                want_auth = USE_AUTH_MDS;
 190
 191        req = ceph_mdsc_create_request(mdsc, op, want_auth);
 192        if (IS_ERR(req))
 193                goto out;
 194        req->r_fmode = ceph_flags_to_mode(flags);
 195        req->r_args.open.flags = ceph_flags_sys2wire(flags);
 196        req->r_args.open.mode = cpu_to_le32(create_mode);
 197out:
 198        return req;
 199}
 200
 201static int ceph_init_file_info(struct inode *inode, struct file *file,
 202                                        int fmode, bool isdir)
 203{
 204        struct ceph_file_info *fi;
 205
 206        dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
 207                        inode->i_mode, isdir ? "dir" : "regular");
 208        BUG_ON(inode->i_fop->release != ceph_release);
 209
 210        if (isdir) {
 211                struct ceph_dir_file_info *dfi =
 212                        kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
 213                if (!dfi) {
 214                        ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 215                        return -ENOMEM;
 216                }
 217
 218                file->private_data = dfi;
 219                fi = &dfi->file_info;
 220                dfi->next_offset = 2;
 221                dfi->readdir_cache_idx = -1;
 222        } else {
 223                fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 224                if (!fi) {
 225                        ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 226                        return -ENOMEM;
 227                }
 228
 229                file->private_data = fi;
 230        }
 231
 232        fi->fmode = fmode;
 233        spin_lock_init(&fi->rw_contexts_lock);
 234        INIT_LIST_HEAD(&fi->rw_contexts);
 235
 236        return 0;
 237}
 238
 239/*
 240 * initialize private struct file data.
 241 * if we fail, clean up by dropping fmode reference on the ceph_inode
 242 */
 243static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 244{
 245        int ret = 0;
 246
 247        switch (inode->i_mode & S_IFMT) {
 248        case S_IFREG:
 249                ceph_fscache_register_inode_cookie(inode);
 250                ceph_fscache_file_set_cookie(inode, file);
 251                /* fall through */
 252        case S_IFDIR:
 253                ret = ceph_init_file_info(inode, file, fmode,
 254                                                S_ISDIR(inode->i_mode));
 255                if (ret)
 256                        return ret;
 257                break;
 258
 259        case S_IFLNK:
 260                dout("init_file %p %p 0%o (symlink)\n", inode, file,
 261                     inode->i_mode);
 262                ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 263                break;
 264
 265        default:
 266                dout("init_file %p %p 0%o (special)\n", inode, file,
 267                     inode->i_mode);
 268                /*
 269                 * we need to drop the open ref now, since we don't
 270                 * have .release set to ceph_release.
 271                 */
 272                ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 273                BUG_ON(inode->i_fop->release == ceph_release);
 274
 275                /* call the proper open fop */
 276                ret = inode->i_fop->open(inode, file);
 277        }
 278        return ret;
 279}
 280
 281/*
 282 * try renew caps after session gets killed.
 283 */
 284int ceph_renew_caps(struct inode *inode)
 285{
 286        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 287        struct ceph_inode_info *ci = ceph_inode(inode);
 288        struct ceph_mds_request *req;
 289        int err, flags, wanted;
 290
 291        spin_lock(&ci->i_ceph_lock);
 292        wanted = __ceph_caps_file_wanted(ci);
 293        if (__ceph_is_any_real_caps(ci) &&
 294            (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
 295                int issued = __ceph_caps_issued(ci, NULL);
 296                spin_unlock(&ci->i_ceph_lock);
 297                dout("renew caps %p want %s issued %s updating mds_wanted\n",
 298                     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
 299                ceph_check_caps(ci, 0, NULL);
 300                return 0;
 301        }
 302        spin_unlock(&ci->i_ceph_lock);
 303
 304        flags = 0;
 305        if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
 306                flags = O_RDWR;
 307        else if (wanted & CEPH_CAP_FILE_RD)
 308                flags = O_RDONLY;
 309        else if (wanted & CEPH_CAP_FILE_WR)
 310                flags = O_WRONLY;
 311#ifdef O_LAZY
 312        if (wanted & CEPH_CAP_FILE_LAZYIO)
 313                flags |= O_LAZY;
 314#endif
 315
 316        req = prepare_open_request(inode->i_sb, flags, 0);
 317        if (IS_ERR(req)) {
 318                err = PTR_ERR(req);
 319                goto out;
 320        }
 321
 322        req->r_inode = inode;
 323        ihold(inode);
 324        req->r_num_caps = 1;
 325        req->r_fmode = -1;
 326
 327        err = ceph_mdsc_do_request(mdsc, NULL, req);
 328        ceph_mdsc_put_request(req);
 329out:
 330        dout("renew caps %p open result=%d\n", inode, err);
 331        return err < 0 ? err : 0;
 332}
 333
 334/*
 335 * If we already have the requisite capabilities, we can satisfy
 336 * the open request locally (no need to request new caps from the
 337 * MDS).  We do, however, need to inform the MDS (asynchronously)
 338 * if our wanted caps set expands.
 339 */
 340int ceph_open(struct inode *inode, struct file *file)
 341{
 342        struct ceph_inode_info *ci = ceph_inode(inode);
 343        struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 344        struct ceph_mds_client *mdsc = fsc->mdsc;
 345        struct ceph_mds_request *req;
 346        struct ceph_file_info *fi = file->private_data;
 347        int err;
 348        int flags, fmode, wanted;
 349
 350        if (fi) {
 351                dout("open file %p is already opened\n", file);
 352                return 0;
 353        }
 354
 355        /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 356        flags = file->f_flags & ~(O_CREAT|O_EXCL);
 357        if (S_ISDIR(inode->i_mode))
 358                flags = O_DIRECTORY;  /* mds likes to know */
 359
 360        dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 361             ceph_vinop(inode), file, flags, file->f_flags);
 362        fmode = ceph_flags_to_mode(flags);
 363        wanted = ceph_caps_for_mode(fmode);
 364
 365        /* snapped files are read-only */
 366        if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 367                return -EROFS;
 368
 369        /* trivially open snapdir */
 370        if (ceph_snap(inode) == CEPH_SNAPDIR) {
 371                spin_lock(&ci->i_ceph_lock);
 372                __ceph_get_fmode(ci, fmode);
 373                spin_unlock(&ci->i_ceph_lock);
 374                return ceph_init_file(inode, file, fmode);
 375        }
 376
 377        /*
 378         * No need to block if we have caps on the auth MDS (for
 379         * write) or any MDS (for read).  Update wanted set
 380         * asynchronously.
 381         */
 382        spin_lock(&ci->i_ceph_lock);
 383        if (__ceph_is_any_real_caps(ci) &&
 384            (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 385                int mds_wanted = __ceph_caps_mds_wanted(ci, true);
 386                int issued = __ceph_caps_issued(ci, NULL);
 387
 388                dout("open %p fmode %d want %s issued %s using existing\n",
 389                     inode, fmode, ceph_cap_string(wanted),
 390                     ceph_cap_string(issued));
 391                __ceph_get_fmode(ci, fmode);
 392                spin_unlock(&ci->i_ceph_lock);
 393
 394                /* adjust wanted? */
 395                if ((issued & wanted) != wanted &&
 396                    (mds_wanted & wanted) != wanted &&
 397                    ceph_snap(inode) != CEPH_SNAPDIR)
 398                        ceph_check_caps(ci, 0, NULL);
 399
 400                return ceph_init_file(inode, file, fmode);
 401        } else if (ceph_snap(inode) != CEPH_NOSNAP &&
 402                   (ci->i_snap_caps & wanted) == wanted) {
 403                __ceph_get_fmode(ci, fmode);
 404                spin_unlock(&ci->i_ceph_lock);
 405                return ceph_init_file(inode, file, fmode);
 406        }
 407
 408        spin_unlock(&ci->i_ceph_lock);
 409
 410        dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 411        req = prepare_open_request(inode->i_sb, flags, 0);
 412        if (IS_ERR(req)) {
 413                err = PTR_ERR(req);
 414                goto out;
 415        }
 416        req->r_inode = inode;
 417        ihold(inode);
 418
 419        req->r_num_caps = 1;
 420        err = ceph_mdsc_do_request(mdsc, NULL, req);
 421        if (!err)
 422                err = ceph_init_file(inode, file, req->r_fmode);
 423        ceph_mdsc_put_request(req);
 424        dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 425out:
 426        return err;
 427}
 428
 429
 430/*
 431 * Do a lookup + open with a single request.  If we get a non-existent
 432 * file or symlink, return 1 so the VFS can retry.
 433 */
 434int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 435                     struct file *file, unsigned flags, umode_t mode)
 436{
 437        struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 438        struct ceph_mds_client *mdsc = fsc->mdsc;
 439        struct ceph_mds_request *req;
 440        struct dentry *dn;
 441        struct ceph_acl_sec_ctx as_ctx = {};
 442        int mask;
 443        int err;
 444
 445        dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 446             dir, dentry, dentry,
 447             d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 448
 449        if (dentry->d_name.len > NAME_MAX)
 450                return -ENAMETOOLONG;
 451
 452        if (flags & O_CREAT) {
 453                if (ceph_quota_is_max_files_exceeded(dir))
 454                        return -EDQUOT;
 455                err = ceph_pre_init_acls(dir, &mode, &as_ctx);
 456                if (err < 0)
 457                        return err;
 458                err = ceph_security_init_secctx(dentry, mode, &as_ctx);
 459                if (err < 0)
 460                        goto out_ctx;
 461        }
 462
 463        /* do the open */
 464        req = prepare_open_request(dir->i_sb, flags, mode);
 465        if (IS_ERR(req)) {
 466                err = PTR_ERR(req);
 467                goto out_ctx;
 468        }
 469        req->r_dentry = dget(dentry);
 470        req->r_num_caps = 2;
 471        if (flags & O_CREAT) {
 472                req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
 473                req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 474                if (as_ctx.pagelist) {
 475                        req->r_pagelist = as_ctx.pagelist;
 476                        as_ctx.pagelist = NULL;
 477                }
 478        }
 479
 480       mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 481       if (ceph_security_xattr_wanted(dir))
 482               mask |= CEPH_CAP_XATTR_SHARED;
 483       req->r_args.open.mask = cpu_to_le32(mask);
 484
 485        req->r_parent = dir;
 486        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
 487        err = ceph_mdsc_do_request(mdsc,
 488                                   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 489                                   req);
 490        err = ceph_handle_snapdir(req, dentry, err);
 491        if (err)
 492                goto out_req;
 493
 494        if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 495                err = ceph_handle_notrace_create(dir, dentry);
 496
 497        if (d_in_lookup(dentry)) {
 498                dn = ceph_finish_lookup(req, dentry, err);
 499                if (IS_ERR(dn))
 500                        err = PTR_ERR(dn);
 501        } else {
 502                /* we were given a hashed negative dentry */
 503                dn = NULL;
 504        }
 505        if (err)
 506                goto out_req;
 507        if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 508                /* make vfs retry on splice, ENOENT, or symlink */
 509                dout("atomic_open finish_no_open on dn %p\n", dn);
 510                err = finish_no_open(file, dn);
 511        } else {
 512                dout("atomic_open finish_open on dn %p\n", dn);
 513                if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 514                        ceph_init_inode_acls(d_inode(dentry), &as_ctx);
 515                        file->f_mode |= FMODE_CREATED;
 516                }
 517                err = finish_open(file, dentry, ceph_open);
 518        }
 519out_req:
 520        if (!req->r_err && req->r_target_inode)
 521                ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
 522        ceph_mdsc_put_request(req);
 523out_ctx:
 524        ceph_release_acl_sec_ctx(&as_ctx);
 525        dout("atomic_open result=%d\n", err);
 526        return err;
 527}
 528
 529int ceph_release(struct inode *inode, struct file *file)
 530{
 531        struct ceph_inode_info *ci = ceph_inode(inode);
 532
 533        if (S_ISDIR(inode->i_mode)) {
 534                struct ceph_dir_file_info *dfi = file->private_data;
 535                dout("release inode %p dir file %p\n", inode, file);
 536                WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
 537
 538                ceph_put_fmode(ci, dfi->file_info.fmode);
 539
 540                if (dfi->last_readdir)
 541                        ceph_mdsc_put_request(dfi->last_readdir);
 542                kfree(dfi->last_name);
 543                kfree(dfi->dir_info);
 544                kmem_cache_free(ceph_dir_file_cachep, dfi);
 545        } else {
 546                struct ceph_file_info *fi = file->private_data;
 547                dout("release inode %p regular file %p\n", inode, file);
 548                WARN_ON(!list_empty(&fi->rw_contexts));
 549
 550                ceph_put_fmode(ci, fi->fmode);
 551                kmem_cache_free(ceph_file_cachep, fi);
 552        }
 553
 554        /* wake up anyone waiting for caps on this inode */
 555        wake_up_all(&ci->i_cap_wq);
 556        return 0;
 557}
 558
 559enum {
 560        HAVE_RETRIED = 1,
 561        CHECK_EOF =    2,
 562        READ_INLINE =  3,
 563};
 564
 565/*
 566 * Completely synchronous read and write methods.  Direct from __user
 567 * buffer to osd, or directly to user pages (if O_DIRECT).
 568 *
 569 * If the read spans object boundary, just do multiple reads.  (That's not
 570 * atomic, but good enough for now.)
 571 *
 572 * If we get a short result from the OSD, check against i_size; we need to
 573 * only return a short read to the caller if we hit EOF.
 574 */
 575static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 576                              int *retry_op)
 577{
 578        struct file *file = iocb->ki_filp;
 579        struct inode *inode = file_inode(file);
 580        struct ceph_inode_info *ci = ceph_inode(inode);
 581        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 582        struct ceph_osd_client *osdc = &fsc->client->osdc;
 583        ssize_t ret;
 584        u64 off = iocb->ki_pos;
 585        u64 len = iov_iter_count(to);
 586
 587        dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
 588             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 589
 590        if (!len)
 591                return 0;
 592        /*
 593         * flush any page cache pages in this range.  this
 594         * will make concurrent normal and sync io slow,
 595         * but it will at least behave sensibly when they are
 596         * in sequence.
 597         */
 598        ret = filemap_write_and_wait_range(inode->i_mapping,
 599                                           off, off + len - 1);
 600        if (ret < 0)
 601                return ret;
 602
 603        ret = 0;
 604        while ((len = iov_iter_count(to)) > 0) {
 605                struct ceph_osd_request *req;
 606                struct page **pages;
 607                int num_pages;
 608                size_t page_off;
 609                u64 i_size;
 610                bool more;
 611
 612                req = ceph_osdc_new_request(osdc, &ci->i_layout,
 613                                        ci->i_vino, off, &len, 0, 1,
 614                                        CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
 615                                        NULL, ci->i_truncate_seq,
 616                                        ci->i_truncate_size, false);
 617                if (IS_ERR(req)) {
 618                        ret = PTR_ERR(req);
 619                        break;
 620                }
 621
 622                more = len < iov_iter_count(to);
 623
 624                if (unlikely(iov_iter_is_pipe(to))) {
 625                        ret = iov_iter_get_pages_alloc(to, &pages, len,
 626                                                       &page_off);
 627                        if (ret <= 0) {
 628                                ceph_osdc_put_request(req);
 629                                ret = -ENOMEM;
 630                                break;
 631                        }
 632                        num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
 633                        if (ret < len) {
 634                                len = ret;
 635                                osd_req_op_extent_update(req, 0, len);
 636                                more = false;
 637                        }
 638                } else {
 639                        num_pages = calc_pages_for(off, len);
 640                        page_off = off & ~PAGE_MASK;
 641                        pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 642                        if (IS_ERR(pages)) {
 643                                ceph_osdc_put_request(req);
 644                                ret = PTR_ERR(pages);
 645                                break;
 646                        }
 647                }
 648
 649                osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
 650                                                 false, false);
 651                ret = ceph_osdc_start_request(osdc, req, false);
 652                if (!ret)
 653                        ret = ceph_osdc_wait_request(osdc, req);
 654                ceph_osdc_put_request(req);
 655
 656                i_size = i_size_read(inode);
 657                dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
 658                     off, len, ret, i_size, (more ? " MORE" : ""));
 659
 660                if (ret == -ENOENT)
 661                        ret = 0;
 662                if (ret >= 0 && ret < len && (off + ret < i_size)) {
 663                        int zlen = min(len - ret, i_size - off - ret);
 664                        int zoff = page_off + ret;
 665                        dout("sync_read zero gap %llu~%llu\n",
 666                             off + ret, off + ret + zlen);
 667                        ceph_zero_page_vector_range(zoff, zlen, pages);
 668                        ret += zlen;
 669                }
 670
 671                if (unlikely(iov_iter_is_pipe(to))) {
 672                        if (ret > 0) {
 673                                iov_iter_advance(to, ret);
 674                                off += ret;
 675                        } else {
 676                                iov_iter_advance(to, 0);
 677                        }
 678                        ceph_put_page_vector(pages, num_pages, false);
 679                } else {
 680                        int idx = 0;
 681                        size_t left = ret > 0 ? ret : 0;
 682                        while (left > 0) {
 683                                size_t len, copied;
 684                                page_off = off & ~PAGE_MASK;
 685                                len = min_t(size_t, left, PAGE_SIZE - page_off);
 686                                copied = copy_page_to_iter(pages[idx++],
 687                                                           page_off, len, to);
 688                                off += copied;
 689                                left -= copied;
 690                                if (copied < len) {
 691                                        ret = -EFAULT;
 692                                        break;
 693                                }
 694                        }
 695                        ceph_release_page_vector(pages, num_pages);
 696                }
 697
 698                if (ret <= 0 || off >= i_size || !more)
 699                        break;
 700        }
 701
 702        if (off > iocb->ki_pos) {
 703                if (ret >= 0 &&
 704                    iov_iter_count(to) > 0 && off >= i_size_read(inode))
 705                        *retry_op = CHECK_EOF;
 706                ret = off - iocb->ki_pos;
 707                iocb->ki_pos = off;
 708        }
 709
 710        dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
 711        return ret;
 712}
 713
 714struct ceph_aio_request {
 715        struct kiocb *iocb;
 716        size_t total_len;
 717        bool write;
 718        bool should_dirty;
 719        int error;
 720        struct list_head osd_reqs;
 721        unsigned num_reqs;
 722        atomic_t pending_reqs;
 723        struct timespec64 mtime;
 724        struct ceph_cap_flush *prealloc_cf;
 725};
 726
 727struct ceph_aio_work {
 728        struct work_struct work;
 729        struct ceph_osd_request *req;
 730};
 731
 732static void ceph_aio_retry_work(struct work_struct *work);
 733
 734static void ceph_aio_complete(struct inode *inode,
 735                              struct ceph_aio_request *aio_req)
 736{
 737        struct ceph_inode_info *ci = ceph_inode(inode);
 738        int ret;
 739
 740        if (!atomic_dec_and_test(&aio_req->pending_reqs))
 741                return;
 742
 743        ret = aio_req->error;
 744        if (!ret)
 745                ret = aio_req->total_len;
 746
 747        dout("ceph_aio_complete %p rc %d\n", inode, ret);
 748
 749        if (ret >= 0 && aio_req->write) {
 750                int dirty;
 751
 752                loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
 753                if (endoff > i_size_read(inode)) {
 754                        if (ceph_inode_set_size(inode, endoff))
 755                                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 756                }
 757
 758                spin_lock(&ci->i_ceph_lock);
 759                ci->i_inline_version = CEPH_INLINE_NONE;
 760                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
 761                                               &aio_req->prealloc_cf);
 762                spin_unlock(&ci->i_ceph_lock);
 763                if (dirty)
 764                        __mark_inode_dirty(inode, dirty);
 765
 766        }
 767
 768        ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
 769                                                CEPH_CAP_FILE_RD));
 770
 771        aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
 772
 773        ceph_free_cap_flush(aio_req->prealloc_cf);
 774        kfree(aio_req);
 775}
 776
 777static void ceph_aio_complete_req(struct ceph_osd_request *req)
 778{
 779        int rc = req->r_result;
 780        struct inode *inode = req->r_inode;
 781        struct ceph_aio_request *aio_req = req->r_priv;
 782        struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
 783
 784        BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
 785        BUG_ON(!osd_data->num_bvecs);
 786
 787        dout("ceph_aio_complete_req %p rc %d bytes %u\n",
 788             inode, rc, osd_data->bvec_pos.iter.bi_size);
 789
 790        if (rc == -EOLDSNAPC) {
 791                struct ceph_aio_work *aio_work;
 792                BUG_ON(!aio_req->write);
 793
 794                aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
 795                if (aio_work) {
 796                        INIT_WORK(&aio_work->work, ceph_aio_retry_work);
 797                        aio_work->req = req;
 798                        queue_work(ceph_inode_to_client(inode)->inode_wq,
 799                                   &aio_work->work);
 800                        return;
 801                }
 802                rc = -ENOMEM;
 803        } else if (!aio_req->write) {
 804                if (rc == -ENOENT)
 805                        rc = 0;
 806                if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
 807                        struct iov_iter i;
 808                        int zlen = osd_data->bvec_pos.iter.bi_size - rc;
 809
 810                        /*
 811                         * If read is satisfied by single OSD request,
 812                         * it can pass EOF. Otherwise read is within
 813                         * i_size.
 814                         */
 815                        if (aio_req->num_reqs == 1) {
 816                                loff_t i_size = i_size_read(inode);
 817                                loff_t endoff = aio_req->iocb->ki_pos + rc;
 818                                if (endoff < i_size)
 819                                        zlen = min_t(size_t, zlen,
 820                                                     i_size - endoff);
 821                                aio_req->total_len = rc + zlen;
 822                        }
 823
 824                        iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
 825                                      osd_data->num_bvecs,
 826                                      osd_data->bvec_pos.iter.bi_size);
 827                        iov_iter_advance(&i, rc);
 828                        iov_iter_zero(zlen, &i);
 829                }
 830        }
 831
 832        put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
 833                  aio_req->should_dirty);
 834        ceph_osdc_put_request(req);
 835
 836        if (rc < 0)
 837                cmpxchg(&aio_req->error, 0, rc);
 838
 839        ceph_aio_complete(inode, aio_req);
 840        return;
 841}
 842
 843static void ceph_aio_retry_work(struct work_struct *work)
 844{
 845        struct ceph_aio_work *aio_work =
 846                container_of(work, struct ceph_aio_work, work);
 847        struct ceph_osd_request *orig_req = aio_work->req;
 848        struct ceph_aio_request *aio_req = orig_req->r_priv;
 849        struct inode *inode = orig_req->r_inode;
 850        struct ceph_inode_info *ci = ceph_inode(inode);
 851        struct ceph_snap_context *snapc;
 852        struct ceph_osd_request *req;
 853        int ret;
 854
 855        spin_lock(&ci->i_ceph_lock);
 856        if (__ceph_have_pending_cap_snap(ci)) {
 857                struct ceph_cap_snap *capsnap =
 858                        list_last_entry(&ci->i_cap_snaps,
 859                                        struct ceph_cap_snap,
 860                                        ci_item);
 861                snapc = ceph_get_snap_context(capsnap->context);
 862        } else {
 863                BUG_ON(!ci->i_head_snapc);
 864                snapc = ceph_get_snap_context(ci->i_head_snapc);
 865        }
 866        spin_unlock(&ci->i_ceph_lock);
 867
 868        req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
 869                        false, GFP_NOFS);
 870        if (!req) {
 871                ret = -ENOMEM;
 872                req = orig_req;
 873                goto out;
 874        }
 875
 876        req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 877        ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
 878        ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
 879
 880        req->r_ops[0] = orig_req->r_ops[0];
 881
 882        req->r_mtime = aio_req->mtime;
 883        req->r_data_offset = req->r_ops[0].extent.offset;
 884
 885        ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
 886        if (ret) {
 887                ceph_osdc_put_request(req);
 888                req = orig_req;
 889                goto out;
 890        }
 891
 892        ceph_osdc_put_request(orig_req);
 893
 894        req->r_callback = ceph_aio_complete_req;
 895        req->r_inode = inode;
 896        req->r_priv = aio_req;
 897
 898        ret = ceph_osdc_start_request(req->r_osdc, req, false);
 899out:
 900        if (ret < 0) {
 901                req->r_result = ret;
 902                ceph_aio_complete_req(req);
 903        }
 904
 905        ceph_put_snap_context(snapc);
 906        kfree(aio_work);
 907}
 908
 909static ssize_t
 910ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 911                       struct ceph_snap_context *snapc,
 912                       struct ceph_cap_flush **pcf)
 913{
 914        struct file *file = iocb->ki_filp;
 915        struct inode *inode = file_inode(file);
 916        struct ceph_inode_info *ci = ceph_inode(inode);
 917        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 918        struct ceph_vino vino;
 919        struct ceph_osd_request *req;
 920        struct bio_vec *bvecs;
 921        struct ceph_aio_request *aio_req = NULL;
 922        int num_pages = 0;
 923        int flags;
 924        int ret;
 925        struct timespec64 mtime = current_time(inode);
 926        size_t count = iov_iter_count(iter);
 927        loff_t pos = iocb->ki_pos;
 928        bool write = iov_iter_rw(iter) == WRITE;
 929        bool should_dirty = !write && iter_is_iovec(iter);
 930
 931        if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 932                return -EROFS;
 933
 934        dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
 935             (write ? "write" : "read"), file, pos, (unsigned)count,
 936             snapc, snapc ? snapc->seq : 0);
 937
 938        ret = filemap_write_and_wait_range(inode->i_mapping,
 939                                           pos, pos + count - 1);
 940        if (ret < 0)
 941                return ret;
 942
 943        if (write) {
 944                int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
 945                                        pos >> PAGE_SHIFT,
 946                                        (pos + count - 1) >> PAGE_SHIFT);
 947                if (ret2 < 0)
 948                        dout("invalidate_inode_pages2_range returned %d\n", ret2);
 949
 950                flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 951        } else {
 952                flags = CEPH_OSD_FLAG_READ;
 953        }
 954
 955        while (iov_iter_count(iter) > 0) {
 956                u64 size = iov_iter_count(iter);
 957                ssize_t len;
 958
 959                if (write)
 960                        size = min_t(u64, size, fsc->mount_options->wsize);
 961                else
 962                        size = min_t(u64, size, fsc->mount_options->rsize);
 963
 964                vino = ceph_vino(inode);
 965                req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 966                                            vino, pos, &size, 0,
 967                                            1,
 968                                            write ? CEPH_OSD_OP_WRITE :
 969                                                    CEPH_OSD_OP_READ,
 970                                            flags, snapc,
 971                                            ci->i_truncate_seq,
 972                                            ci->i_truncate_size,
 973                                            false);
 974                if (IS_ERR(req)) {
 975                        ret = PTR_ERR(req);
 976                        break;
 977                }
 978
 979                len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
 980                if (len < 0) {
 981                        ceph_osdc_put_request(req);
 982                        ret = len;
 983                        break;
 984                }
 985                if (len != size)
 986                        osd_req_op_extent_update(req, 0, len);
 987
 988                /*
 989                 * To simplify error handling, allow AIO when IO within i_size
 990                 * or IO can be satisfied by single OSD request.
 991                 */
 992                if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
 993                    (len == count || pos + count <= i_size_read(inode))) {
 994                        aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
 995                        if (aio_req) {
 996                                aio_req->iocb = iocb;
 997                                aio_req->write = write;
 998                                aio_req->should_dirty = should_dirty;
 999                                INIT_LIST_HEAD(&aio_req->osd_reqs);
1000                                if (write) {
1001                                        aio_req->mtime = mtime;
1002                                        swap(aio_req->prealloc_cf, *pcf);
1003                                }
1004                        }
1005                        /* ignore error */
1006                }
1007
1008                if (write) {
1009                        /*
1010                         * throw out any page cache pages in this range. this
1011                         * may block.
1012                         */
1013                        truncate_inode_pages_range(inode->i_mapping, pos,
1014                                                   PAGE_ALIGN(pos + len) - 1);
1015
1016                        req->r_mtime = mtime;
1017                }
1018
1019                osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1020
1021                if (aio_req) {
1022                        aio_req->total_len += len;
1023                        aio_req->num_reqs++;
1024                        atomic_inc(&aio_req->pending_reqs);
1025
1026                        req->r_callback = ceph_aio_complete_req;
1027                        req->r_inode = inode;
1028                        req->r_priv = aio_req;
1029                        list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1030
1031                        pos += len;
1032                        continue;
1033                }
1034
1035                ret = ceph_osdc_start_request(req->r_osdc, req, false);
1036                if (!ret)
1037                        ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1038
1039                size = i_size_read(inode);
1040                if (!write) {
1041                        if (ret == -ENOENT)
1042                                ret = 0;
1043                        if (ret >= 0 && ret < len && pos + ret < size) {
1044                                struct iov_iter i;
1045                                int zlen = min_t(size_t, len - ret,
1046                                                 size - pos - ret);
1047
1048                                iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1049                                iov_iter_advance(&i, ret);
1050                                iov_iter_zero(zlen, &i);
1051                                ret += zlen;
1052                        }
1053                        if (ret >= 0)
1054                                len = ret;
1055                }
1056
1057                put_bvecs(bvecs, num_pages, should_dirty);
1058                ceph_osdc_put_request(req);
1059                if (ret < 0)
1060                        break;
1061
1062                pos += len;
1063                if (!write && pos >= size)
1064                        break;
1065
1066                if (write && pos > size) {
1067                        if (ceph_inode_set_size(inode, pos))
1068                                ceph_check_caps(ceph_inode(inode),
1069                                                CHECK_CAPS_AUTHONLY,
1070                                                NULL);
1071                }
1072        }
1073
1074        if (aio_req) {
1075                LIST_HEAD(osd_reqs);
1076
1077                if (aio_req->num_reqs == 0) {
1078                        kfree(aio_req);
1079                        return ret;
1080                }
1081
1082                ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1083                                              CEPH_CAP_FILE_RD);
1084
1085                list_splice(&aio_req->osd_reqs, &osd_reqs);
1086                while (!list_empty(&osd_reqs)) {
1087                        req = list_first_entry(&osd_reqs,
1088                                               struct ceph_osd_request,
1089                                               r_private_item);
1090                        list_del_init(&req->r_private_item);
1091                        if (ret >= 0)
1092                                ret = ceph_osdc_start_request(req->r_osdc,
1093                                                              req, false);
1094                        if (ret < 0) {
1095                                req->r_result = ret;
1096                                ceph_aio_complete_req(req);
1097                        }
1098                }
1099                return -EIOCBQUEUED;
1100        }
1101
1102        if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1103                ret = pos - iocb->ki_pos;
1104                iocb->ki_pos = pos;
1105        }
1106        return ret;
1107}
1108
1109/*
1110 * Synchronous write, straight from __user pointer or user pages.
1111 *
1112 * If write spans object boundary, just do multiple writes.  (For a
1113 * correct atomic write, we should e.g. take write locks on all
1114 * objects, rollback on failure, etc.)
1115 */
1116static ssize_t
1117ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1118                struct ceph_snap_context *snapc)
1119{
1120        struct file *file = iocb->ki_filp;
1121        struct inode *inode = file_inode(file);
1122        struct ceph_inode_info *ci = ceph_inode(inode);
1123        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1124        struct ceph_vino vino;
1125        struct ceph_osd_request *req;
1126        struct page **pages;
1127        u64 len;
1128        int num_pages;
1129        int written = 0;
1130        int flags;
1131        int ret;
1132        bool check_caps = false;
1133        struct timespec64 mtime = current_time(inode);
1134        size_t count = iov_iter_count(from);
1135
1136        if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1137                return -EROFS;
1138
1139        dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1140             file, pos, (unsigned)count, snapc, snapc->seq);
1141
1142        ret = filemap_write_and_wait_range(inode->i_mapping,
1143                                           pos, pos + count - 1);
1144        if (ret < 0)
1145                return ret;
1146
1147        ret = invalidate_inode_pages2_range(inode->i_mapping,
1148                                            pos >> PAGE_SHIFT,
1149                                            (pos + count - 1) >> PAGE_SHIFT);
1150        if (ret < 0)
1151                dout("invalidate_inode_pages2_range returned %d\n", ret);
1152
1153        flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1154
1155        while ((len = iov_iter_count(from)) > 0) {
1156                size_t left;
1157                int n;
1158
1159                vino = ceph_vino(inode);
1160                req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1161                                            vino, pos, &len, 0, 1,
1162                                            CEPH_OSD_OP_WRITE, flags, snapc,
1163                                            ci->i_truncate_seq,
1164                                            ci->i_truncate_size,
1165                                            false);
1166                if (IS_ERR(req)) {
1167                        ret = PTR_ERR(req);
1168                        break;
1169                }
1170
1171                /*
1172                 * write from beginning of first page,
1173                 * regardless of io alignment
1174                 */
1175                num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1176
1177                pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1178                if (IS_ERR(pages)) {
1179                        ret = PTR_ERR(pages);
1180                        goto out;
1181                }
1182
1183                left = len;
1184                for (n = 0; n < num_pages; n++) {
1185                        size_t plen = min_t(size_t, left, PAGE_SIZE);
1186                        ret = copy_page_from_iter(pages[n], 0, plen, from);
1187                        if (ret != plen) {
1188                                ret = -EFAULT;
1189                                break;
1190                        }
1191                        left -= ret;
1192                }
1193
1194                if (ret < 0) {
1195                        ceph_release_page_vector(pages, num_pages);
1196                        goto out;
1197                }
1198
1199                req->r_inode = inode;
1200
1201                osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1202                                                false, true);
1203
1204                req->r_mtime = mtime;
1205                ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1206                if (!ret)
1207                        ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1208
1209out:
1210                ceph_osdc_put_request(req);
1211                if (ret != 0) {
1212                        ceph_set_error_write(ci);
1213                        break;
1214                }
1215
1216                ceph_clear_error_write(ci);
1217                pos += len;
1218                written += len;
1219                if (pos > i_size_read(inode)) {
1220                        check_caps = ceph_inode_set_size(inode, pos);
1221                        if (check_caps)
1222                                ceph_check_caps(ceph_inode(inode),
1223                                                CHECK_CAPS_AUTHONLY,
1224                                                NULL);
1225                }
1226
1227        }
1228
1229        if (ret != -EOLDSNAPC && written > 0) {
1230                ret = written;
1231                iocb->ki_pos = pos;
1232        }
1233        return ret;
1234}
1235
1236/*
1237 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1238 * Atomically grab references, so that those bits are not released
1239 * back to the MDS mid-read.
1240 *
1241 * Hmm, the sync read case isn't actually async... should it be?
1242 */
1243static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1244{
1245        struct file *filp = iocb->ki_filp;
1246        struct ceph_file_info *fi = filp->private_data;
1247        size_t len = iov_iter_count(to);
1248        struct inode *inode = file_inode(filp);
1249        struct ceph_inode_info *ci = ceph_inode(inode);
1250        struct page *pinned_page = NULL;
1251        ssize_t ret;
1252        int want, got = 0;
1253        int retry_op = 0, read = 0;
1254
1255again:
1256        dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1257             inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1258
1259        if (fi->fmode & CEPH_FILE_MODE_LAZY)
1260                want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1261        else
1262                want = CEPH_CAP_FILE_CACHE;
1263        ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1264        if (ret < 0)
1265                return ret;
1266
1267        if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1268            (iocb->ki_flags & IOCB_DIRECT) ||
1269            (fi->flags & CEPH_F_SYNC)) {
1270
1271                dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1272                     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1273                     ceph_cap_string(got));
1274
1275                if (ci->i_inline_version == CEPH_INLINE_NONE) {
1276                        if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1277                                ret = ceph_direct_read_write(iocb, to,
1278                                                             NULL, NULL);
1279                                if (ret >= 0 && ret < len)
1280                                        retry_op = CHECK_EOF;
1281                        } else {
1282                                ret = ceph_sync_read(iocb, to, &retry_op);
1283                        }
1284                } else {
1285                        retry_op = READ_INLINE;
1286                }
1287        } else {
1288                CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1289                dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1290                     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1291                     ceph_cap_string(got));
1292                ceph_add_rw_context(fi, &rw_ctx);
1293                ret = generic_file_read_iter(iocb, to);
1294                ceph_del_rw_context(fi, &rw_ctx);
1295        }
1296        dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1297             inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1298        if (pinned_page) {
1299                put_page(pinned_page);
1300                pinned_page = NULL;
1301        }
1302        ceph_put_cap_refs(ci, got);
1303        if (retry_op > HAVE_RETRIED && ret >= 0) {
1304                int statret;
1305                struct page *page = NULL;
1306                loff_t i_size;
1307                if (retry_op == READ_INLINE) {
1308                        page = __page_cache_alloc(GFP_KERNEL);
1309                        if (!page)
1310                                return -ENOMEM;
1311                }
1312
1313                statret = __ceph_do_getattr(inode, page,
1314                                            CEPH_STAT_CAP_INLINE_DATA, !!page);
1315                if (statret < 0) {
1316                        if (page)
1317                                __free_page(page);
1318                        if (statret == -ENODATA) {
1319                                BUG_ON(retry_op != READ_INLINE);
1320                                goto again;
1321                        }
1322                        return statret;
1323                }
1324
1325                i_size = i_size_read(inode);
1326                if (retry_op == READ_INLINE) {
1327                        BUG_ON(ret > 0 || read > 0);
1328                        if (iocb->ki_pos < i_size &&
1329                            iocb->ki_pos < PAGE_SIZE) {
1330                                loff_t end = min_t(loff_t, i_size,
1331                                                   iocb->ki_pos + len);
1332                                end = min_t(loff_t, end, PAGE_SIZE);
1333                                if (statret < end)
1334                                        zero_user_segment(page, statret, end);
1335                                ret = copy_page_to_iter(page,
1336                                                iocb->ki_pos & ~PAGE_MASK,
1337                                                end - iocb->ki_pos, to);
1338                                iocb->ki_pos += ret;
1339                                read += ret;
1340                        }
1341                        if (iocb->ki_pos < i_size && read < len) {
1342                                size_t zlen = min_t(size_t, len - read,
1343                                                    i_size - iocb->ki_pos);
1344                                ret = iov_iter_zero(zlen, to);
1345                                iocb->ki_pos += ret;
1346                                read += ret;
1347                        }
1348                        __free_pages(page, 0);
1349                        return read;
1350                }
1351
1352                /* hit EOF or hole? */
1353                if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1354                    ret < len) {
1355                        dout("sync_read hit hole, ppos %lld < size %lld"
1356                             ", reading more\n", iocb->ki_pos, i_size);
1357
1358                        read += ret;
1359                        len -= ret;
1360                        retry_op = HAVE_RETRIED;
1361                        goto again;
1362                }
1363        }
1364
1365        if (ret >= 0)
1366                ret += read;
1367
1368        return ret;
1369}
1370
1371/*
1372 * Take cap references to avoid releasing caps to MDS mid-write.
1373 *
1374 * If we are synchronous, and write with an old snap context, the OSD
1375 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1376 * dropping our cap refs and allowing the pending snap to logically
1377 * complete _before_ this write occurs.
1378 *
1379 * If we are near ENOSPC, write synchronously.
1380 */
1381static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1382{
1383        struct file *file = iocb->ki_filp;
1384        struct ceph_file_info *fi = file->private_data;
1385        struct inode *inode = file_inode(file);
1386        struct ceph_inode_info *ci = ceph_inode(inode);
1387        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1388        struct ceph_cap_flush *prealloc_cf;
1389        ssize_t count, written = 0;
1390        int err, want, got;
1391        loff_t pos;
1392        loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1393
1394        if (ceph_snap(inode) != CEPH_NOSNAP)
1395                return -EROFS;
1396
1397        prealloc_cf = ceph_alloc_cap_flush();
1398        if (!prealloc_cf)
1399                return -ENOMEM;
1400
1401retry_snap:
1402        inode_lock(inode);
1403
1404        /* We can write back this queue in page reclaim */
1405        current->backing_dev_info = inode_to_bdi(inode);
1406
1407        if (iocb->ki_flags & IOCB_APPEND) {
1408                err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1409                if (err < 0)
1410                        goto out;
1411        }
1412
1413        err = generic_write_checks(iocb, from);
1414        if (err <= 0)
1415                goto out;
1416
1417        pos = iocb->ki_pos;
1418        if (unlikely(pos >= limit)) {
1419                err = -EFBIG;
1420                goto out;
1421        } else {
1422                iov_iter_truncate(from, limit - pos);
1423        }
1424
1425        count = iov_iter_count(from);
1426        if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1427                err = -EDQUOT;
1428                goto out;
1429        }
1430
1431        err = file_remove_privs(file);
1432        if (err)
1433                goto out;
1434
1435        err = file_update_time(file);
1436        if (err)
1437                goto out;
1438
1439        inode_inc_iversion_raw(inode);
1440
1441        if (ci->i_inline_version != CEPH_INLINE_NONE) {
1442                err = ceph_uninline_data(file, NULL);
1443                if (err < 0)
1444                        goto out;
1445        }
1446
1447        /* FIXME: not complete since it doesn't account for being at quota */
1448        if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
1449                err = -ENOSPC;
1450                goto out;
1451        }
1452
1453        dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1454             inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1455        if (fi->fmode & CEPH_FILE_MODE_LAZY)
1456                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1457        else
1458                want = CEPH_CAP_FILE_BUFFER;
1459        got = 0;
1460        err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1461                            &got, NULL);
1462        if (err < 0)
1463                goto out;
1464
1465        dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1466             inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1467
1468        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1469            (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1470            (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1471                struct ceph_snap_context *snapc;
1472                struct iov_iter data;
1473                inode_unlock(inode);
1474
1475                spin_lock(&ci->i_ceph_lock);
1476                if (__ceph_have_pending_cap_snap(ci)) {
1477                        struct ceph_cap_snap *capsnap =
1478                                        list_last_entry(&ci->i_cap_snaps,
1479                                                        struct ceph_cap_snap,
1480                                                        ci_item);
1481                        snapc = ceph_get_snap_context(capsnap->context);
1482                } else {
1483                        BUG_ON(!ci->i_head_snapc);
1484                        snapc = ceph_get_snap_context(ci->i_head_snapc);
1485                }
1486                spin_unlock(&ci->i_ceph_lock);
1487
1488                /* we might need to revert back to that point */
1489                data = *from;
1490                if (iocb->ki_flags & IOCB_DIRECT)
1491                        written = ceph_direct_read_write(iocb, &data, snapc,
1492                                                         &prealloc_cf);
1493                else
1494                        written = ceph_sync_write(iocb, &data, pos, snapc);
1495                if (written > 0)
1496                        iov_iter_advance(from, written);
1497                ceph_put_snap_context(snapc);
1498        } else {
1499                /*
1500                 * No need to acquire the i_truncate_mutex. Because
1501                 * the MDS revokes Fwb caps before sending truncate
1502                 * message to us. We can't get Fwb cap while there
1503                 * are pending vmtruncate. So write and vmtruncate
1504                 * can not run at the same time
1505                 */
1506                written = generic_perform_write(file, from, pos);
1507                if (likely(written >= 0))
1508                        iocb->ki_pos = pos + written;
1509                inode_unlock(inode);
1510        }
1511
1512        if (written >= 0) {
1513                int dirty;
1514
1515                spin_lock(&ci->i_ceph_lock);
1516                ci->i_inline_version = CEPH_INLINE_NONE;
1517                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1518                                               &prealloc_cf);
1519                spin_unlock(&ci->i_ceph_lock);
1520                if (dirty)
1521                        __mark_inode_dirty(inode, dirty);
1522                if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1523                        ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1524        }
1525
1526        dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1527             inode, ceph_vinop(inode), pos, (unsigned)count,
1528             ceph_cap_string(got));
1529        ceph_put_cap_refs(ci, got);
1530
1531        if (written == -EOLDSNAPC) {
1532                dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1533                     inode, ceph_vinop(inode), pos, (unsigned)count);
1534                goto retry_snap;
1535        }
1536
1537        if (written >= 0) {
1538                if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
1539                        iocb->ki_flags |= IOCB_DSYNC;
1540                written = generic_write_sync(iocb, written);
1541        }
1542
1543        goto out_unlocked;
1544
1545out:
1546        inode_unlock(inode);
1547out_unlocked:
1548        ceph_free_cap_flush(prealloc_cf);
1549        current->backing_dev_info = NULL;
1550        return written ? written : err;
1551}
1552
1553/*
1554 * llseek.  be sure to verify file size on SEEK_END.
1555 */
1556static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1557{
1558        struct inode *inode = file->f_mapping->host;
1559        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1560        loff_t i_size;
1561        loff_t ret;
1562
1563        inode_lock(inode);
1564
1565        if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1566                ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1567                if (ret < 0)
1568                        goto out;
1569        }
1570
1571        i_size = i_size_read(inode);
1572        switch (whence) {
1573        case SEEK_END:
1574                offset += i_size;
1575                break;
1576        case SEEK_CUR:
1577                /*
1578                 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1579                 * position-querying operation.  Avoid rewriting the "same"
1580                 * f_pos value back to the file because a concurrent read(),
1581                 * write() or lseek() might have altered it
1582                 */
1583                if (offset == 0) {
1584                        ret = file->f_pos;
1585                        goto out;
1586                }
1587                offset += file->f_pos;
1588                break;
1589        case SEEK_DATA:
1590                if (offset < 0 || offset >= i_size) {
1591                        ret = -ENXIO;
1592                        goto out;
1593                }
1594                break;
1595        case SEEK_HOLE:
1596                if (offset < 0 || offset >= i_size) {
1597                        ret = -ENXIO;
1598                        goto out;
1599                }
1600                offset = i_size;
1601                break;
1602        }
1603
1604        ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1605
1606out:
1607        inode_unlock(inode);
1608        return ret;
1609}
1610
1611static inline void ceph_zero_partial_page(
1612        struct inode *inode, loff_t offset, unsigned size)
1613{
1614        struct page *page;
1615        pgoff_t index = offset >> PAGE_SHIFT;
1616
1617        page = find_lock_page(inode->i_mapping, index);
1618        if (page) {
1619                wait_on_page_writeback(page);
1620                zero_user(page, offset & (PAGE_SIZE - 1), size);
1621                unlock_page(page);
1622                put_page(page);
1623        }
1624}
1625
1626static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1627                                      loff_t length)
1628{
1629        loff_t nearly = round_up(offset, PAGE_SIZE);
1630        if (offset < nearly) {
1631                loff_t size = nearly - offset;
1632                if (length < size)
1633                        size = length;
1634                ceph_zero_partial_page(inode, offset, size);
1635                offset += size;
1636                length -= size;
1637        }
1638        if (length >= PAGE_SIZE) {
1639                loff_t size = round_down(length, PAGE_SIZE);
1640                truncate_pagecache_range(inode, offset, offset + size - 1);
1641                offset += size;
1642                length -= size;
1643        }
1644        if (length)
1645                ceph_zero_partial_page(inode, offset, length);
1646}
1647
1648static int ceph_zero_partial_object(struct inode *inode,
1649                                    loff_t offset, loff_t *length)
1650{
1651        struct ceph_inode_info *ci = ceph_inode(inode);
1652        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1653        struct ceph_osd_request *req;
1654        int ret = 0;
1655        loff_t zero = 0;
1656        int op;
1657
1658        if (!length) {
1659                op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1660                length = &zero;
1661        } else {
1662                op = CEPH_OSD_OP_ZERO;
1663        }
1664
1665        req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1666                                        ceph_vino(inode),
1667                                        offset, length,
1668                                        0, 1, op,
1669                                        CEPH_OSD_FLAG_WRITE,
1670                                        NULL, 0, 0, false);
1671        if (IS_ERR(req)) {
1672                ret = PTR_ERR(req);
1673                goto out;
1674        }
1675
1676        req->r_mtime = inode->i_mtime;
1677        ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1678        if (!ret) {
1679                ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1680                if (ret == -ENOENT)
1681                        ret = 0;
1682        }
1683        ceph_osdc_put_request(req);
1684
1685out:
1686        return ret;
1687}
1688
1689static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1690{
1691        int ret = 0;
1692        struct ceph_inode_info *ci = ceph_inode(inode);
1693        s32 stripe_unit = ci->i_layout.stripe_unit;
1694        s32 stripe_count = ci->i_layout.stripe_count;
1695        s32 object_size = ci->i_layout.object_size;
1696        u64 object_set_size = object_size * stripe_count;
1697        u64 nearly, t;
1698
1699        /* round offset up to next period boundary */
1700        nearly = offset + object_set_size - 1;
1701        t = nearly;
1702        nearly -= do_div(t, object_set_size);
1703
1704        while (length && offset < nearly) {
1705                loff_t size = length;
1706                ret = ceph_zero_partial_object(inode, offset, &size);
1707                if (ret < 0)
1708                        return ret;
1709                offset += size;
1710                length -= size;
1711        }
1712        while (length >= object_set_size) {
1713                int i;
1714                loff_t pos = offset;
1715                for (i = 0; i < stripe_count; ++i) {
1716                        ret = ceph_zero_partial_object(inode, pos, NULL);
1717                        if (ret < 0)
1718                                return ret;
1719                        pos += stripe_unit;
1720                }
1721                offset += object_set_size;
1722                length -= object_set_size;
1723        }
1724        while (length) {
1725                loff_t size = length;
1726                ret = ceph_zero_partial_object(inode, offset, &size);
1727                if (ret < 0)
1728                        return ret;
1729                offset += size;
1730                length -= size;
1731        }
1732        return ret;
1733}
1734
1735static long ceph_fallocate(struct file *file, int mode,
1736                                loff_t offset, loff_t length)
1737{
1738        struct ceph_file_info *fi = file->private_data;
1739        struct inode *inode = file_inode(file);
1740        struct ceph_inode_info *ci = ceph_inode(inode);
1741        struct ceph_cap_flush *prealloc_cf;
1742        int want, got = 0;
1743        int dirty;
1744        int ret = 0;
1745        loff_t endoff = 0;
1746        loff_t size;
1747
1748        if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1749                return -EOPNOTSUPP;
1750
1751        if (!S_ISREG(inode->i_mode))
1752                return -EOPNOTSUPP;
1753
1754        prealloc_cf = ceph_alloc_cap_flush();
1755        if (!prealloc_cf)
1756                return -ENOMEM;
1757
1758        inode_lock(inode);
1759
1760        if (ceph_snap(inode) != CEPH_NOSNAP) {
1761                ret = -EROFS;
1762                goto unlock;
1763        }
1764
1765        if (ci->i_inline_version != CEPH_INLINE_NONE) {
1766                ret = ceph_uninline_data(file, NULL);
1767                if (ret < 0)
1768                        goto unlock;
1769        }
1770
1771        size = i_size_read(inode);
1772
1773        /* Are we punching a hole beyond EOF? */
1774        if (offset >= size)
1775                goto unlock;
1776        if ((offset + length) > size)
1777                length = size - offset;
1778
1779        if (fi->fmode & CEPH_FILE_MODE_LAZY)
1780                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1781        else
1782                want = CEPH_CAP_FILE_BUFFER;
1783
1784        ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1785        if (ret < 0)
1786                goto unlock;
1787
1788        ceph_zero_pagecache_range(inode, offset, length);
1789        ret = ceph_zero_objects(inode, offset, length);
1790
1791        if (!ret) {
1792                spin_lock(&ci->i_ceph_lock);
1793                ci->i_inline_version = CEPH_INLINE_NONE;
1794                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1795                                               &prealloc_cf);
1796                spin_unlock(&ci->i_ceph_lock);
1797                if (dirty)
1798                        __mark_inode_dirty(inode, dirty);
1799        }
1800
1801        ceph_put_cap_refs(ci, got);
1802unlock:
1803        inode_unlock(inode);
1804        ceph_free_cap_flush(prealloc_cf);
1805        return ret;
1806}
1807
1808/*
1809 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
1810 * src_ci.  Two attempts are made to obtain both caps, and an error is return if
1811 * this fails; zero is returned on success.
1812 */
1813static int get_rd_wr_caps(struct ceph_inode_info *src_ci,
1814                          loff_t src_endoff, int *src_got,
1815                          struct ceph_inode_info *dst_ci,
1816                          loff_t dst_endoff, int *dst_got)
1817{
1818        int ret = 0;
1819        bool retrying = false;
1820
1821retry_caps:
1822        ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
1823                            dst_endoff, dst_got, NULL);
1824        if (ret < 0)
1825                return ret;
1826
1827        /*
1828         * Since we're already holding the FILE_WR capability for the dst file,
1829         * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
1830         * retry dance instead to try to get both capabilities.
1831         */
1832        ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
1833                                false, src_got);
1834        if (ret <= 0) {
1835                /* Start by dropping dst_ci caps and getting src_ci caps */
1836                ceph_put_cap_refs(dst_ci, *dst_got);
1837                if (retrying) {
1838                        if (!ret)
1839                                /* ceph_try_get_caps masks EAGAIN */
1840                                ret = -EAGAIN;
1841                        return ret;
1842                }
1843                ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD,
1844                                    CEPH_CAP_FILE_SHARED, src_endoff,
1845                                    src_got, NULL);
1846                if (ret < 0)
1847                        return ret;
1848                /*... drop src_ci caps too, and retry */
1849                ceph_put_cap_refs(src_ci, *src_got);
1850                retrying = true;
1851                goto retry_caps;
1852        }
1853        return ret;
1854}
1855
1856static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
1857                           struct ceph_inode_info *dst_ci, int dst_got)
1858{
1859        ceph_put_cap_refs(src_ci, src_got);
1860        ceph_put_cap_refs(dst_ci, dst_got);
1861}
1862
1863/*
1864 * This function does several size-related checks, returning an error if:
1865 *  - source file is smaller than off+len
1866 *  - destination file size is not OK (inode_newsize_ok())
1867 *  - max bytes quotas is exceeded
1868 */
1869static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
1870                           loff_t src_off, loff_t dst_off, size_t len)
1871{
1872        loff_t size, endoff;
1873
1874        size = i_size_read(src_inode);
1875        /*
1876         * Don't copy beyond source file EOF.  Instead of simply setting length
1877         * to (size - src_off), just drop to VFS default implementation, as the
1878         * local i_size may be stale due to other clients writing to the source
1879         * inode.
1880         */
1881        if (src_off + len > size) {
1882                dout("Copy beyond EOF (%llu + %zu > %llu)\n",
1883                     src_off, len, size);
1884                return -EOPNOTSUPP;
1885        }
1886        size = i_size_read(dst_inode);
1887
1888        endoff = dst_off + len;
1889        if (inode_newsize_ok(dst_inode, endoff))
1890                return -EOPNOTSUPP;
1891
1892        if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
1893                return -EDQUOT;
1894
1895        return 0;
1896}
1897
1898static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
1899                                      struct file *dst_file, loff_t dst_off,
1900                                      size_t len, unsigned int flags)
1901{
1902        struct inode *src_inode = file_inode(src_file);
1903        struct inode *dst_inode = file_inode(dst_file);
1904        struct ceph_inode_info *src_ci = ceph_inode(src_inode);
1905        struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
1906        struct ceph_cap_flush *prealloc_cf;
1907        struct ceph_object_locator src_oloc, dst_oloc;
1908        struct ceph_object_id src_oid, dst_oid;
1909        loff_t endoff = 0, size;
1910        ssize_t ret = -EIO;
1911        u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
1912        u32 src_objlen, dst_objlen, object_size;
1913        int src_got = 0, dst_got = 0, err, dirty;
1914        bool do_final_copy = false;
1915
1916        if (src_inode == dst_inode)
1917                return -EINVAL;
1918        if (src_inode->i_sb != dst_inode->i_sb)
1919                return -EXDEV;
1920        if (ceph_snap(dst_inode) != CEPH_NOSNAP)
1921                return -EROFS;
1922
1923        /*
1924         * Some of the checks below will return -EOPNOTSUPP, which will force a
1925         * fallback to the default VFS copy_file_range implementation.  This is
1926         * desirable in several cases (for ex, the 'len' is smaller than the
1927         * size of the objects, or in cases where that would be more
1928         * efficient).
1929         */
1930
1931        if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM))
1932                return -EOPNOTSUPP;
1933
1934        if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
1935            (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
1936            (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
1937                return -EOPNOTSUPP;
1938
1939        if (len < src_ci->i_layout.object_size)
1940                return -EOPNOTSUPP; /* no remote copy will be done */
1941
1942        prealloc_cf = ceph_alloc_cap_flush();
1943        if (!prealloc_cf)
1944                return -ENOMEM;
1945
1946        /* Start by sync'ing the source and destination files */
1947        ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
1948        if (ret < 0) {
1949                dout("failed to write src file (%zd)\n", ret);
1950                goto out;
1951        }
1952        ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
1953        if (ret < 0) {
1954                dout("failed to write dst file (%zd)\n", ret);
1955                goto out;
1956        }
1957
1958        /*
1959         * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
1960         * clients may have dirty data in their caches.  And OSDs know nothing
1961         * about caps, so they can't safely do the remote object copies.
1962         */
1963        err = get_rd_wr_caps(src_ci, (src_off + len), &src_got,
1964                             dst_ci, (dst_off + len), &dst_got);
1965        if (err < 0) {
1966                dout("get_rd_wr_caps returned %d\n", err);
1967                ret = -EOPNOTSUPP;
1968                goto out;
1969        }
1970
1971        ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
1972        if (ret < 0)
1973                goto out_caps;
1974
1975        size = i_size_read(dst_inode);
1976        endoff = dst_off + len;
1977
1978        /* Drop dst file cached pages */
1979        ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
1980                                            dst_off >> PAGE_SHIFT,
1981                                            endoff >> PAGE_SHIFT);
1982        if (ret < 0) {
1983                dout("Failed to invalidate inode pages (%zd)\n", ret);
1984                ret = 0; /* XXX */
1985        }
1986        src_oloc.pool = src_ci->i_layout.pool_id;
1987        src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
1988        dst_oloc.pool = dst_ci->i_layout.pool_id;
1989        dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
1990
1991        ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
1992                                      src_ci->i_layout.object_size,
1993                                      &src_objnum, &src_objoff, &src_objlen);
1994        ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
1995                                      dst_ci->i_layout.object_size,
1996                                      &dst_objnum, &dst_objoff, &dst_objlen);
1997        /* object-level offsets need to the same */
1998        if (src_objoff != dst_objoff) {
1999                ret = -EOPNOTSUPP;
2000                goto out_caps;
2001        }
2002
2003        /*
2004         * Do a manual copy if the object offset isn't object aligned.
2005         * 'src_objlen' contains the bytes left until the end of the object,
2006         * starting at the src_off
2007         */
2008        if (src_objoff) {
2009                /*
2010                 * we need to temporarily drop all caps as we'll be calling
2011                 * {read,write}_iter, which will get caps again.
2012                 */
2013                put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2014                ret = do_splice_direct(src_file, &src_off, dst_file,
2015                                       &dst_off, src_objlen, flags);
2016                if (ret < 0) {
2017                        dout("do_splice_direct returned %d\n", err);
2018                        goto out;
2019                }
2020                len -= ret;
2021                err = get_rd_wr_caps(src_ci, (src_off + len),
2022                                     &src_got, dst_ci,
2023                                     (dst_off + len), &dst_got);
2024                if (err < 0)
2025                        goto out;
2026                err = is_file_size_ok(src_inode, dst_inode,
2027                                      src_off, dst_off, len);
2028                if (err < 0)
2029                        goto out_caps;
2030        }
2031        object_size = src_ci->i_layout.object_size;
2032        while (len >= object_size) {
2033                ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2034                                              object_size, &src_objnum,
2035                                              &src_objoff, &src_objlen);
2036                ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2037                                              object_size, &dst_objnum,
2038                                              &dst_objoff, &dst_objlen);
2039                ceph_oid_init(&src_oid);
2040                ceph_oid_printf(&src_oid, "%llx.%08llx",
2041                                src_ci->i_vino.ino, src_objnum);
2042                ceph_oid_init(&dst_oid);
2043                ceph_oid_printf(&dst_oid, "%llx.%08llx",
2044                                dst_ci->i_vino.ino, dst_objnum);
2045                /* Do an object remote copy */
2046                err = ceph_osdc_copy_from(
2047                        &ceph_inode_to_client(src_inode)->client->osdc,
2048                        src_ci->i_vino.snap, 0,
2049                        &src_oid, &src_oloc,
2050                        CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2051                        CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2052                        &dst_oid, &dst_oloc,
2053                        CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2054                        CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
2055                if (err) {
2056                        dout("ceph_osdc_copy_from returned %d\n", err);
2057                        if (!ret)
2058                                ret = err;
2059                        goto out_caps;
2060                }
2061                len -= object_size;
2062                src_off += object_size;
2063                dst_off += object_size;
2064                ret += object_size;
2065        }
2066
2067        if (len)
2068                /* We still need one final local copy */
2069                do_final_copy = true;
2070
2071        file_update_time(dst_file);
2072        inode_inc_iversion_raw(dst_inode);
2073
2074        if (endoff > size) {
2075                int caps_flags = 0;
2076
2077                /* Let the MDS know about dst file size change */
2078                if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
2079                        caps_flags |= CHECK_CAPS_NODELAY;
2080                if (ceph_inode_set_size(dst_inode, endoff))
2081                        caps_flags |= CHECK_CAPS_AUTHONLY;
2082                if (caps_flags)
2083                        ceph_check_caps(dst_ci, caps_flags, NULL);
2084        }
2085        /* Mark Fw dirty */
2086        spin_lock(&dst_ci->i_ceph_lock);
2087        dst_ci->i_inline_version = CEPH_INLINE_NONE;
2088        dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2089        spin_unlock(&dst_ci->i_ceph_lock);
2090        if (dirty)
2091                __mark_inode_dirty(dst_inode, dirty);
2092
2093out_caps:
2094        put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2095
2096        if (do_final_copy) {
2097                err = do_splice_direct(src_file, &src_off, dst_file,
2098                                       &dst_off, len, flags);
2099                if (err < 0) {
2100                        dout("do_splice_direct returned %d\n", err);
2101                        goto out;
2102                }
2103                len -= err;
2104                ret += err;
2105        }
2106
2107out:
2108        ceph_free_cap_flush(prealloc_cf);
2109
2110        return ret;
2111}
2112
2113static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2114                                    struct file *dst_file, loff_t dst_off,
2115                                    size_t len, unsigned int flags)
2116{
2117        ssize_t ret;
2118
2119        ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2120                                     len, flags);
2121
2122        if (ret == -EOPNOTSUPP || ret == -EXDEV)
2123                ret = generic_copy_file_range(src_file, src_off, dst_file,
2124                                              dst_off, len, flags);
2125        return ret;
2126}
2127
2128const struct file_operations ceph_file_fops = {
2129        .open = ceph_open,
2130        .release = ceph_release,
2131        .llseek = ceph_llseek,
2132        .read_iter = ceph_read_iter,
2133        .write_iter = ceph_write_iter,
2134        .mmap = ceph_mmap,
2135        .fsync = ceph_fsync,
2136        .lock = ceph_lock,
2137        .flock = ceph_flock,
2138        .splice_read = generic_file_splice_read,
2139        .splice_write = iter_file_splice_write,
2140        .unlocked_ioctl = ceph_ioctl,
2141        .compat_ioctl   = ceph_ioctl,
2142        .fallocate      = ceph_fallocate,
2143        .copy_file_range = ceph_copy_file_range,
2144};
2145