linux/fs/ceph/file.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3#include <linux/ceph/striper.h>
   4
   5#include <linux/module.h>
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/file.h>
   9#include <linux/mount.h>
  10#include <linux/namei.h>
  11#include <linux/writeback.h>
  12#include <linux/falloc.h>
  13
  14#include "super.h"
  15#include "mds_client.h"
  16#include "cache.h"
  17
  18static __le32 ceph_flags_sys2wire(u32 flags)
  19{
  20        u32 wire_flags = 0;
  21
  22        switch (flags & O_ACCMODE) {
  23        case O_RDONLY:
  24                wire_flags |= CEPH_O_RDONLY;
  25                break;
  26        case O_WRONLY:
  27                wire_flags |= CEPH_O_WRONLY;
  28                break;
  29        case O_RDWR:
  30                wire_flags |= CEPH_O_RDWR;
  31                break;
  32        }
  33
  34        flags &= ~O_ACCMODE;
  35
  36#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
  37
  38        ceph_sys2wire(O_CREAT);
  39        ceph_sys2wire(O_EXCL);
  40        ceph_sys2wire(O_TRUNC);
  41        ceph_sys2wire(O_DIRECTORY);
  42        ceph_sys2wire(O_NOFOLLOW);
  43
  44#undef ceph_sys2wire
  45
  46        if (flags)
  47                dout("unused open flags: %x\n", flags);
  48
  49        return cpu_to_le32(wire_flags);
  50}
  51
  52/*
  53 * Ceph file operations
  54 *
  55 * Implement basic open/close functionality, and implement
  56 * read/write.
  57 *
  58 * We implement three modes of file I/O:
  59 *  - buffered uses the generic_file_aio_{read,write} helpers
  60 *
  61 *  - synchronous is used when there is multi-client read/write
  62 *    sharing, avoids the page cache, and synchronously waits for an
  63 *    ack from the OSD.
  64 *
  65 *  - direct io takes the variant of the sync path that references
  66 *    user pages directly.
  67 *
  68 * fsync() flushes and waits on dirty pages, but just queues metadata
  69 * for writeback: since the MDS can recover size and mtime there is no
  70 * need to wait for MDS acknowledgement.
  71 */
  72
  73/*
  74 * How many pages to get in one call to iov_iter_get_pages().  This
  75 * determines the size of the on-stack array used as a buffer.
  76 */
  77#define ITER_GET_BVECS_PAGES    64
  78
  79static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
  80                                struct bio_vec *bvecs)
  81{
  82        size_t size = 0;
  83        int bvec_idx = 0;
  84
  85        if (maxsize > iov_iter_count(iter))
  86                maxsize = iov_iter_count(iter);
  87
  88        while (size < maxsize) {
  89                struct page *pages[ITER_GET_BVECS_PAGES];
  90                ssize_t bytes;
  91                size_t start;
  92                int idx = 0;
  93
  94                bytes = iov_iter_get_pages(iter, pages, maxsize - size,
  95                                           ITER_GET_BVECS_PAGES, &start);
  96                if (bytes < 0)
  97                        return size ?: bytes;
  98
  99                iov_iter_advance(iter, bytes);
 100                size += bytes;
 101
 102                for ( ; bytes; idx++, bvec_idx++) {
 103                        struct bio_vec bv = {
 104                                .bv_page = pages[idx],
 105                                .bv_len = min_t(int, bytes, PAGE_SIZE - start),
 106                                .bv_offset = start,
 107                        };
 108
 109                        bvecs[bvec_idx] = bv;
 110                        bytes -= bv.bv_len;
 111                        start = 0;
 112                }
 113        }
 114
 115        return size;
 116}
 117
 118/*
 119 * iov_iter_get_pages() only considers one iov_iter segment, no matter
 120 * what maxsize or maxpages are given.  For ITER_BVEC that is a single
 121 * page.
 122 *
 123 * Attempt to get up to @maxsize bytes worth of pages from @iter.
 124 * Return the number of bytes in the created bio_vec array, or an error.
 125 */
 126static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
 127                                    struct bio_vec **bvecs, int *num_bvecs)
 128{
 129        struct bio_vec *bv;
 130        size_t orig_count = iov_iter_count(iter);
 131        ssize_t bytes;
 132        int npages;
 133
 134        iov_iter_truncate(iter, maxsize);
 135        npages = iov_iter_npages(iter, INT_MAX);
 136        iov_iter_reexpand(iter, orig_count);
 137
 138        /*
 139         * __iter_get_bvecs() may populate only part of the array -- zero it
 140         * out.
 141         */
 142        bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
 143        if (!bv)
 144                return -ENOMEM;
 145
 146        bytes = __iter_get_bvecs(iter, maxsize, bv);
 147        if (bytes < 0) {
 148                /*
 149                 * No pages were pinned -- just free the array.
 150                 */
 151                kvfree(bv);
 152                return bytes;
 153        }
 154
 155        *bvecs = bv;
 156        *num_bvecs = npages;
 157        return bytes;
 158}
 159
 160static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
 161{
 162        int i;
 163
 164        for (i = 0; i < num_bvecs; i++) {
 165                if (bvecs[i].bv_page) {
 166                        if (should_dirty)
 167                                set_page_dirty_lock(bvecs[i].bv_page);
 168                        put_page(bvecs[i].bv_page);
 169                }
 170        }
 171        kvfree(bvecs);
 172}
 173
 174/*
 175 * Prepare an open request.  Preallocate ceph_cap to avoid an
 176 * inopportune ENOMEM later.
 177 */
 178static struct ceph_mds_request *
 179prepare_open_request(struct super_block *sb, int flags, int create_mode)
 180{
 181        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 182        struct ceph_mds_client *mdsc = fsc->mdsc;
 183        struct ceph_mds_request *req;
 184        int want_auth = USE_ANY_MDS;
 185        int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 186
 187        if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 188                want_auth = USE_AUTH_MDS;
 189
 190        req = ceph_mdsc_create_request(mdsc, op, want_auth);
 191        if (IS_ERR(req))
 192                goto out;
 193        req->r_fmode = ceph_flags_to_mode(flags);
 194        req->r_args.open.flags = ceph_flags_sys2wire(flags);
 195        req->r_args.open.mode = cpu_to_le32(create_mode);
 196out:
 197        return req;
 198}
 199
 200static int ceph_init_file_info(struct inode *inode, struct file *file,
 201                                        int fmode, bool isdir)
 202{
 203        struct ceph_file_info *fi;
 204
 205        dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
 206                        inode->i_mode, isdir ? "dir" : "regular");
 207        BUG_ON(inode->i_fop->release != ceph_release);
 208
 209        if (isdir) {
 210                struct ceph_dir_file_info *dfi =
 211                        kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
 212                if (!dfi) {
 213                        ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 214                        return -ENOMEM;
 215                }
 216
 217                file->private_data = dfi;
 218                fi = &dfi->file_info;
 219                dfi->next_offset = 2;
 220                dfi->readdir_cache_idx = -1;
 221        } else {
 222                fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 223                if (!fi) {
 224                        ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 225                        return -ENOMEM;
 226                }
 227
 228                file->private_data = fi;
 229        }
 230
 231        fi->fmode = fmode;
 232        spin_lock_init(&fi->rw_contexts_lock);
 233        INIT_LIST_HEAD(&fi->rw_contexts);
 234
 235        return 0;
 236}
 237
 238/*
 239 * initialize private struct file data.
 240 * if we fail, clean up by dropping fmode reference on the ceph_inode
 241 */
 242static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 243{
 244        int ret = 0;
 245
 246        switch (inode->i_mode & S_IFMT) {
 247        case S_IFREG:
 248                ceph_fscache_register_inode_cookie(inode);
 249                ceph_fscache_file_set_cookie(inode, file);
 250        case S_IFDIR:
 251                ret = ceph_init_file_info(inode, file, fmode,
 252                                                S_ISDIR(inode->i_mode));
 253                if (ret)
 254                        return ret;
 255                break;
 256
 257        case S_IFLNK:
 258                dout("init_file %p %p 0%o (symlink)\n", inode, file,
 259                     inode->i_mode);
 260                ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 261                break;
 262
 263        default:
 264                dout("init_file %p %p 0%o (special)\n", inode, file,
 265                     inode->i_mode);
 266                /*
 267                 * we need to drop the open ref now, since we don't
 268                 * have .release set to ceph_release.
 269                 */
 270                ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 271                BUG_ON(inode->i_fop->release == ceph_release);
 272
 273                /* call the proper open fop */
 274                ret = inode->i_fop->open(inode, file);
 275        }
 276        return ret;
 277}
 278
 279/*
 280 * try renew caps after session gets killed.
 281 */
 282int ceph_renew_caps(struct inode *inode)
 283{
 284        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 285        struct ceph_inode_info *ci = ceph_inode(inode);
 286        struct ceph_mds_request *req;
 287        int err, flags, wanted;
 288
 289        spin_lock(&ci->i_ceph_lock);
 290        wanted = __ceph_caps_file_wanted(ci);
 291        if (__ceph_is_any_real_caps(ci) &&
 292            (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
 293                int issued = __ceph_caps_issued(ci, NULL);
 294                spin_unlock(&ci->i_ceph_lock);
 295                dout("renew caps %p want %s issued %s updating mds_wanted\n",
 296                     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
 297                ceph_check_caps(ci, 0, NULL);
 298                return 0;
 299        }
 300        spin_unlock(&ci->i_ceph_lock);
 301
 302        flags = 0;
 303        if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
 304                flags = O_RDWR;
 305        else if (wanted & CEPH_CAP_FILE_RD)
 306                flags = O_RDONLY;
 307        else if (wanted & CEPH_CAP_FILE_WR)
 308                flags = O_WRONLY;
 309#ifdef O_LAZY
 310        if (wanted & CEPH_CAP_FILE_LAZYIO)
 311                flags |= O_LAZY;
 312#endif
 313
 314        req = prepare_open_request(inode->i_sb, flags, 0);
 315        if (IS_ERR(req)) {
 316                err = PTR_ERR(req);
 317                goto out;
 318        }
 319
 320        req->r_inode = inode;
 321        ihold(inode);
 322        req->r_num_caps = 1;
 323        req->r_fmode = -1;
 324
 325        err = ceph_mdsc_do_request(mdsc, NULL, req);
 326        ceph_mdsc_put_request(req);
 327out:
 328        dout("renew caps %p open result=%d\n", inode, err);
 329        return err < 0 ? err : 0;
 330}
 331
 332/*
 333 * If we already have the requisite capabilities, we can satisfy
 334 * the open request locally (no need to request new caps from the
 335 * MDS).  We do, however, need to inform the MDS (asynchronously)
 336 * if our wanted caps set expands.
 337 */
 338int ceph_open(struct inode *inode, struct file *file)
 339{
 340        struct ceph_inode_info *ci = ceph_inode(inode);
 341        struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 342        struct ceph_mds_client *mdsc = fsc->mdsc;
 343        struct ceph_mds_request *req;
 344        struct ceph_file_info *fi = file->private_data;
 345        int err;
 346        int flags, fmode, wanted;
 347
 348        if (fi) {
 349                dout("open file %p is already opened\n", file);
 350                return 0;
 351        }
 352
 353        /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 354        flags = file->f_flags & ~(O_CREAT|O_EXCL);
 355        if (S_ISDIR(inode->i_mode))
 356                flags = O_DIRECTORY;  /* mds likes to know */
 357
 358        dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 359             ceph_vinop(inode), file, flags, file->f_flags);
 360        fmode = ceph_flags_to_mode(flags);
 361        wanted = ceph_caps_for_mode(fmode);
 362
 363        /* snapped files are read-only */
 364        if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 365                return -EROFS;
 366
 367        /* trivially open snapdir */
 368        if (ceph_snap(inode) == CEPH_SNAPDIR) {
 369                spin_lock(&ci->i_ceph_lock);
 370                __ceph_get_fmode(ci, fmode);
 371                spin_unlock(&ci->i_ceph_lock);
 372                return ceph_init_file(inode, file, fmode);
 373        }
 374
 375        /*
 376         * No need to block if we have caps on the auth MDS (for
 377         * write) or any MDS (for read).  Update wanted set
 378         * asynchronously.
 379         */
 380        spin_lock(&ci->i_ceph_lock);
 381        if (__ceph_is_any_real_caps(ci) &&
 382            (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 383                int mds_wanted = __ceph_caps_mds_wanted(ci, true);
 384                int issued = __ceph_caps_issued(ci, NULL);
 385
 386                dout("open %p fmode %d want %s issued %s using existing\n",
 387                     inode, fmode, ceph_cap_string(wanted),
 388                     ceph_cap_string(issued));
 389                __ceph_get_fmode(ci, fmode);
 390                spin_unlock(&ci->i_ceph_lock);
 391
 392                /* adjust wanted? */
 393                if ((issued & wanted) != wanted &&
 394                    (mds_wanted & wanted) != wanted &&
 395                    ceph_snap(inode) != CEPH_SNAPDIR)
 396                        ceph_check_caps(ci, 0, NULL);
 397
 398                return ceph_init_file(inode, file, fmode);
 399        } else if (ceph_snap(inode) != CEPH_NOSNAP &&
 400                   (ci->i_snap_caps & wanted) == wanted) {
 401                __ceph_get_fmode(ci, fmode);
 402                spin_unlock(&ci->i_ceph_lock);
 403                return ceph_init_file(inode, file, fmode);
 404        }
 405
 406        spin_unlock(&ci->i_ceph_lock);
 407
 408        dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 409        req = prepare_open_request(inode->i_sb, flags, 0);
 410        if (IS_ERR(req)) {
 411                err = PTR_ERR(req);
 412                goto out;
 413        }
 414        req->r_inode = inode;
 415        ihold(inode);
 416
 417        req->r_num_caps = 1;
 418        err = ceph_mdsc_do_request(mdsc, NULL, req);
 419        if (!err)
 420                err = ceph_init_file(inode, file, req->r_fmode);
 421        ceph_mdsc_put_request(req);
 422        dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 423out:
 424        return err;
 425}
 426
 427
 428/*
 429 * Do a lookup + open with a single request.  If we get a non-existent
 430 * file or symlink, return 1 so the VFS can retry.
 431 */
 432int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 433                     struct file *file, unsigned flags, umode_t mode)
 434{
 435        struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 436        struct ceph_mds_client *mdsc = fsc->mdsc;
 437        struct ceph_mds_request *req;
 438        struct dentry *dn;
 439        struct ceph_acls_info acls = {};
 440        int mask;
 441        int err;
 442
 443        dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 444             dir, dentry, dentry,
 445             d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 446
 447        if (dentry->d_name.len > NAME_MAX)
 448                return -ENAMETOOLONG;
 449
 450        if (flags & O_CREAT) {
 451                if (ceph_quota_is_max_files_exceeded(dir))
 452                        return -EDQUOT;
 453                err = ceph_pre_init_acls(dir, &mode, &acls);
 454                if (err < 0)
 455                        return err;
 456        }
 457
 458        /* do the open */
 459        req = prepare_open_request(dir->i_sb, flags, mode);
 460        if (IS_ERR(req)) {
 461                err = PTR_ERR(req);
 462                goto out_acl;
 463        }
 464        req->r_dentry = dget(dentry);
 465        req->r_num_caps = 2;
 466        if (flags & O_CREAT) {
 467                req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
 468                req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 469                if (acls.pagelist) {
 470                        req->r_pagelist = acls.pagelist;
 471                        acls.pagelist = NULL;
 472                }
 473        }
 474
 475       mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 476       if (ceph_security_xattr_wanted(dir))
 477               mask |= CEPH_CAP_XATTR_SHARED;
 478       req->r_args.open.mask = cpu_to_le32(mask);
 479
 480        req->r_parent = dir;
 481        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
 482        err = ceph_mdsc_do_request(mdsc,
 483                                   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 484                                   req);
 485        err = ceph_handle_snapdir(req, dentry, err);
 486        if (err)
 487                goto out_req;
 488
 489        if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 490                err = ceph_handle_notrace_create(dir, dentry);
 491
 492        if (d_in_lookup(dentry)) {
 493                dn = ceph_finish_lookup(req, dentry, err);
 494                if (IS_ERR(dn))
 495                        err = PTR_ERR(dn);
 496        } else {
 497                /* we were given a hashed negative dentry */
 498                dn = NULL;
 499        }
 500        if (err)
 501                goto out_req;
 502        if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 503                /* make vfs retry on splice, ENOENT, or symlink */
 504                dout("atomic_open finish_no_open on dn %p\n", dn);
 505                err = finish_no_open(file, dn);
 506        } else {
 507                dout("atomic_open finish_open on dn %p\n", dn);
 508                if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 509                        ceph_init_inode_acls(d_inode(dentry), &acls);
 510                        file->f_mode |= FMODE_CREATED;
 511                }
 512                err = finish_open(file, dentry, ceph_open);
 513        }
 514out_req:
 515        if (!req->r_err && req->r_target_inode)
 516                ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
 517        ceph_mdsc_put_request(req);
 518out_acl:
 519        ceph_release_acls_info(&acls);
 520        dout("atomic_open result=%d\n", err);
 521        return err;
 522}
 523
 524int ceph_release(struct inode *inode, struct file *file)
 525{
 526        struct ceph_inode_info *ci = ceph_inode(inode);
 527
 528        if (S_ISDIR(inode->i_mode)) {
 529                struct ceph_dir_file_info *dfi = file->private_data;
 530                dout("release inode %p dir file %p\n", inode, file);
 531                WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
 532
 533                ceph_put_fmode(ci, dfi->file_info.fmode);
 534
 535                if (dfi->last_readdir)
 536                        ceph_mdsc_put_request(dfi->last_readdir);
 537                kfree(dfi->last_name);
 538                kfree(dfi->dir_info);
 539                kmem_cache_free(ceph_dir_file_cachep, dfi);
 540        } else {
 541                struct ceph_file_info *fi = file->private_data;
 542                dout("release inode %p regular file %p\n", inode, file);
 543                WARN_ON(!list_empty(&fi->rw_contexts));
 544
 545                ceph_put_fmode(ci, fi->fmode);
 546                kmem_cache_free(ceph_file_cachep, fi);
 547        }
 548
 549        /* wake up anyone waiting for caps on this inode */
 550        wake_up_all(&ci->i_cap_wq);
 551        return 0;
 552}
 553
 554enum {
 555        HAVE_RETRIED = 1,
 556        CHECK_EOF =    2,
 557        READ_INLINE =  3,
 558};
 559
 560/*
 561 * Completely synchronous read and write methods.  Direct from __user
 562 * buffer to osd, or directly to user pages (if O_DIRECT).
 563 *
 564 * If the read spans object boundary, just do multiple reads.  (That's not
 565 * atomic, but good enough for now.)
 566 *
 567 * If we get a short result from the OSD, check against i_size; we need to
 568 * only return a short read to the caller if we hit EOF.
 569 */
 570static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 571                              int *retry_op)
 572{
 573        struct file *file = iocb->ki_filp;
 574        struct inode *inode = file_inode(file);
 575        struct ceph_inode_info *ci = ceph_inode(inode);
 576        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 577        struct ceph_osd_client *osdc = &fsc->client->osdc;
 578        ssize_t ret;
 579        u64 off = iocb->ki_pos;
 580        u64 len = iov_iter_count(to);
 581
 582        dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
 583             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 584
 585        if (!len)
 586                return 0;
 587        /*
 588         * flush any page cache pages in this range.  this
 589         * will make concurrent normal and sync io slow,
 590         * but it will at least behave sensibly when they are
 591         * in sequence.
 592         */
 593        ret = filemap_write_and_wait_range(inode->i_mapping,
 594                                           off, off + len - 1);
 595        if (ret < 0)
 596                return ret;
 597
 598        ret = 0;
 599        while ((len = iov_iter_count(to)) > 0) {
 600                struct ceph_osd_request *req;
 601                struct page **pages;
 602                int num_pages;
 603                size_t page_off;
 604                u64 i_size;
 605                bool more;
 606
 607                req = ceph_osdc_new_request(osdc, &ci->i_layout,
 608                                        ci->i_vino, off, &len, 0, 1,
 609                                        CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
 610                                        NULL, ci->i_truncate_seq,
 611                                        ci->i_truncate_size, false);
 612                if (IS_ERR(req)) {
 613                        ret = PTR_ERR(req);
 614                        break;
 615                }
 616
 617                more = len < iov_iter_count(to);
 618
 619                if (unlikely(iov_iter_is_pipe(to))) {
 620                        ret = iov_iter_get_pages_alloc(to, &pages, len,
 621                                                       &page_off);
 622                        if (ret <= 0) {
 623                                ceph_osdc_put_request(req);
 624                                ret = -ENOMEM;
 625                                break;
 626                        }
 627                        num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
 628                        if (ret < len) {
 629                                len = ret;
 630                                osd_req_op_extent_update(req, 0, len);
 631                                more = false;
 632                        }
 633                } else {
 634                        num_pages = calc_pages_for(off, len);
 635                        page_off = off & ~PAGE_MASK;
 636                        pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 637                        if (IS_ERR(pages)) {
 638                                ceph_osdc_put_request(req);
 639                                ret = PTR_ERR(pages);
 640                                break;
 641                        }
 642                }
 643
 644                osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
 645                                                 false, false);
 646                ret = ceph_osdc_start_request(osdc, req, false);
 647                if (!ret)
 648                        ret = ceph_osdc_wait_request(osdc, req);
 649                ceph_osdc_put_request(req);
 650
 651                i_size = i_size_read(inode);
 652                dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
 653                     off, len, ret, i_size, (more ? " MORE" : ""));
 654
 655                if (ret == -ENOENT)
 656                        ret = 0;
 657                if (ret >= 0 && ret < len && (off + ret < i_size)) {
 658                        int zlen = min(len - ret, i_size - off - ret);
 659                        int zoff = page_off + ret;
 660                        dout("sync_read zero gap %llu~%llu\n",
 661                             off + ret, off + ret + zlen);
 662                        ceph_zero_page_vector_range(zoff, zlen, pages);
 663                        ret += zlen;
 664                }
 665
 666                if (unlikely(iov_iter_is_pipe(to))) {
 667                        if (ret > 0) {
 668                                iov_iter_advance(to, ret);
 669                                off += ret;
 670                        } else {
 671                                iov_iter_advance(to, 0);
 672                        }
 673                        ceph_put_page_vector(pages, num_pages, false);
 674                } else {
 675                        int idx = 0;
 676                        size_t left = ret > 0 ? ret : 0;
 677                        while (left > 0) {
 678                                size_t len, copied;
 679                                page_off = off & ~PAGE_MASK;
 680                                len = min_t(size_t, left, PAGE_SIZE - page_off);
 681                                copied = copy_page_to_iter(pages[idx++],
 682                                                           page_off, len, to);
 683                                off += copied;
 684                                left -= copied;
 685                                if (copied < len) {
 686                                        ret = -EFAULT;
 687                                        break;
 688                                }
 689                        }
 690                        ceph_release_page_vector(pages, num_pages);
 691                }
 692
 693                if (ret <= 0 || off >= i_size || !more)
 694                        break;
 695        }
 696
 697        if (off > iocb->ki_pos) {
 698                if (ret >= 0 &&
 699                    iov_iter_count(to) > 0 && off >= i_size_read(inode))
 700                        *retry_op = CHECK_EOF;
 701                ret = off - iocb->ki_pos;
 702                iocb->ki_pos = off;
 703        }
 704
 705        dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
 706        return ret;
 707}
 708
 709struct ceph_aio_request {
 710        struct kiocb *iocb;
 711        size_t total_len;
 712        bool write;
 713        bool should_dirty;
 714        int error;
 715        struct list_head osd_reqs;
 716        unsigned num_reqs;
 717        atomic_t pending_reqs;
 718        struct timespec64 mtime;
 719        struct ceph_cap_flush *prealloc_cf;
 720};
 721
 722struct ceph_aio_work {
 723        struct work_struct work;
 724        struct ceph_osd_request *req;
 725};
 726
 727static void ceph_aio_retry_work(struct work_struct *work);
 728
 729static void ceph_aio_complete(struct inode *inode,
 730                              struct ceph_aio_request *aio_req)
 731{
 732        struct ceph_inode_info *ci = ceph_inode(inode);
 733        int ret;
 734
 735        if (!atomic_dec_and_test(&aio_req->pending_reqs))
 736                return;
 737
 738        ret = aio_req->error;
 739        if (!ret)
 740                ret = aio_req->total_len;
 741
 742        dout("ceph_aio_complete %p rc %d\n", inode, ret);
 743
 744        if (ret >= 0 && aio_req->write) {
 745                int dirty;
 746
 747                loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
 748                if (endoff > i_size_read(inode)) {
 749                        if (ceph_inode_set_size(inode, endoff))
 750                                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 751                }
 752
 753                spin_lock(&ci->i_ceph_lock);
 754                ci->i_inline_version = CEPH_INLINE_NONE;
 755                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
 756                                               &aio_req->prealloc_cf);
 757                spin_unlock(&ci->i_ceph_lock);
 758                if (dirty)
 759                        __mark_inode_dirty(inode, dirty);
 760
 761        }
 762
 763        ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
 764                                                CEPH_CAP_FILE_RD));
 765
 766        aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
 767
 768        ceph_free_cap_flush(aio_req->prealloc_cf);
 769        kfree(aio_req);
 770}
 771
 772static void ceph_aio_complete_req(struct ceph_osd_request *req)
 773{
 774        int rc = req->r_result;
 775        struct inode *inode = req->r_inode;
 776        struct ceph_aio_request *aio_req = req->r_priv;
 777        struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
 778
 779        BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
 780        BUG_ON(!osd_data->num_bvecs);
 781
 782        dout("ceph_aio_complete_req %p rc %d bytes %u\n",
 783             inode, rc, osd_data->bvec_pos.iter.bi_size);
 784
 785        if (rc == -EOLDSNAPC) {
 786                struct ceph_aio_work *aio_work;
 787                BUG_ON(!aio_req->write);
 788
 789                aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
 790                if (aio_work) {
 791                        INIT_WORK(&aio_work->work, ceph_aio_retry_work);
 792                        aio_work->req = req;
 793                        queue_work(ceph_inode_to_client(inode)->wb_wq,
 794                                   &aio_work->work);
 795                        return;
 796                }
 797                rc = -ENOMEM;
 798        } else if (!aio_req->write) {
 799                if (rc == -ENOENT)
 800                        rc = 0;
 801                if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
 802                        struct iov_iter i;
 803                        int zlen = osd_data->bvec_pos.iter.bi_size - rc;
 804
 805                        /*
 806                         * If read is satisfied by single OSD request,
 807                         * it can pass EOF. Otherwise read is within
 808                         * i_size.
 809                         */
 810                        if (aio_req->num_reqs == 1) {
 811                                loff_t i_size = i_size_read(inode);
 812                                loff_t endoff = aio_req->iocb->ki_pos + rc;
 813                                if (endoff < i_size)
 814                                        zlen = min_t(size_t, zlen,
 815                                                     i_size - endoff);
 816                                aio_req->total_len = rc + zlen;
 817                        }
 818
 819                        iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
 820                                      osd_data->num_bvecs,
 821                                      osd_data->bvec_pos.iter.bi_size);
 822                        iov_iter_advance(&i, rc);
 823                        iov_iter_zero(zlen, &i);
 824                }
 825        }
 826
 827        put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
 828                  aio_req->should_dirty);
 829        ceph_osdc_put_request(req);
 830
 831        if (rc < 0)
 832                cmpxchg(&aio_req->error, 0, rc);
 833
 834        ceph_aio_complete(inode, aio_req);
 835        return;
 836}
 837
 838static void ceph_aio_retry_work(struct work_struct *work)
 839{
 840        struct ceph_aio_work *aio_work =
 841                container_of(work, struct ceph_aio_work, work);
 842        struct ceph_osd_request *orig_req = aio_work->req;
 843        struct ceph_aio_request *aio_req = orig_req->r_priv;
 844        struct inode *inode = orig_req->r_inode;
 845        struct ceph_inode_info *ci = ceph_inode(inode);
 846        struct ceph_snap_context *snapc;
 847        struct ceph_osd_request *req;
 848        int ret;
 849
 850        spin_lock(&ci->i_ceph_lock);
 851        if (__ceph_have_pending_cap_snap(ci)) {
 852                struct ceph_cap_snap *capsnap =
 853                        list_last_entry(&ci->i_cap_snaps,
 854                                        struct ceph_cap_snap,
 855                                        ci_item);
 856                snapc = ceph_get_snap_context(capsnap->context);
 857        } else {
 858                BUG_ON(!ci->i_head_snapc);
 859                snapc = ceph_get_snap_context(ci->i_head_snapc);
 860        }
 861        spin_unlock(&ci->i_ceph_lock);
 862
 863        req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
 864                        false, GFP_NOFS);
 865        if (!req) {
 866                ret = -ENOMEM;
 867                req = orig_req;
 868                goto out;
 869        }
 870
 871        req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 872        ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
 873        ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
 874
 875        req->r_ops[0] = orig_req->r_ops[0];
 876
 877        req->r_mtime = aio_req->mtime;
 878        req->r_data_offset = req->r_ops[0].extent.offset;
 879
 880        ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
 881        if (ret) {
 882                ceph_osdc_put_request(req);
 883                req = orig_req;
 884                goto out;
 885        }
 886
 887        ceph_osdc_put_request(orig_req);
 888
 889        req->r_callback = ceph_aio_complete_req;
 890        req->r_inode = inode;
 891        req->r_priv = aio_req;
 892
 893        ret = ceph_osdc_start_request(req->r_osdc, req, false);
 894out:
 895        if (ret < 0) {
 896                req->r_result = ret;
 897                ceph_aio_complete_req(req);
 898        }
 899
 900        ceph_put_snap_context(snapc);
 901        kfree(aio_work);
 902}
 903
 904static ssize_t
 905ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 906                       struct ceph_snap_context *snapc,
 907                       struct ceph_cap_flush **pcf)
 908{
 909        struct file *file = iocb->ki_filp;
 910        struct inode *inode = file_inode(file);
 911        struct ceph_inode_info *ci = ceph_inode(inode);
 912        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 913        struct ceph_vino vino;
 914        struct ceph_osd_request *req;
 915        struct bio_vec *bvecs;
 916        struct ceph_aio_request *aio_req = NULL;
 917        int num_pages = 0;
 918        int flags;
 919        int ret;
 920        struct timespec64 mtime = current_time(inode);
 921        size_t count = iov_iter_count(iter);
 922        loff_t pos = iocb->ki_pos;
 923        bool write = iov_iter_rw(iter) == WRITE;
 924        bool should_dirty = !write && iter_is_iovec(iter);
 925
 926        if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 927                return -EROFS;
 928
 929        dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
 930             (write ? "write" : "read"), file, pos, (unsigned)count,
 931             snapc, snapc->seq);
 932
 933        ret = filemap_write_and_wait_range(inode->i_mapping,
 934                                           pos, pos + count - 1);
 935        if (ret < 0)
 936                return ret;
 937
 938        if (write) {
 939                int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
 940                                        pos >> PAGE_SHIFT,
 941                                        (pos + count - 1) >> PAGE_SHIFT);
 942                if (ret2 < 0)
 943                        dout("invalidate_inode_pages2_range returned %d\n", ret2);
 944
 945                flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 946        } else {
 947                flags = CEPH_OSD_FLAG_READ;
 948        }
 949
 950        while (iov_iter_count(iter) > 0) {
 951                u64 size = iov_iter_count(iter);
 952                ssize_t len;
 953
 954                if (write)
 955                        size = min_t(u64, size, fsc->mount_options->wsize);
 956                else
 957                        size = min_t(u64, size, fsc->mount_options->rsize);
 958
 959                vino = ceph_vino(inode);
 960                req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 961                                            vino, pos, &size, 0,
 962                                            1,
 963                                            write ? CEPH_OSD_OP_WRITE :
 964                                                    CEPH_OSD_OP_READ,
 965                                            flags, snapc,
 966                                            ci->i_truncate_seq,
 967                                            ci->i_truncate_size,
 968                                            false);
 969                if (IS_ERR(req)) {
 970                        ret = PTR_ERR(req);
 971                        break;
 972                }
 973
 974                len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
 975                if (len < 0) {
 976                        ceph_osdc_put_request(req);
 977                        ret = len;
 978                        break;
 979                }
 980                if (len != size)
 981                        osd_req_op_extent_update(req, 0, len);
 982
 983                /*
 984                 * To simplify error handling, allow AIO when IO within i_size
 985                 * or IO can be satisfied by single OSD request.
 986                 */
 987                if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
 988                    (len == count || pos + count <= i_size_read(inode))) {
 989                        aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
 990                        if (aio_req) {
 991                                aio_req->iocb = iocb;
 992                                aio_req->write = write;
 993                                aio_req->should_dirty = should_dirty;
 994                                INIT_LIST_HEAD(&aio_req->osd_reqs);
 995                                if (write) {
 996                                        aio_req->mtime = mtime;
 997                                        swap(aio_req->prealloc_cf, *pcf);
 998                                }
 999                        }
1000                        /* ignore error */
1001                }
1002
1003                if (write) {
1004                        /*
1005                         * throw out any page cache pages in this range. this
1006                         * may block.
1007                         */
1008                        truncate_inode_pages_range(inode->i_mapping, pos,
1009                                        (pos+len) | (PAGE_SIZE - 1));
1010
1011                        req->r_mtime = mtime;
1012                }
1013
1014                osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1015
1016                if (aio_req) {
1017                        aio_req->total_len += len;
1018                        aio_req->num_reqs++;
1019                        atomic_inc(&aio_req->pending_reqs);
1020
1021                        req->r_callback = ceph_aio_complete_req;
1022                        req->r_inode = inode;
1023                        req->r_priv = aio_req;
1024                        list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
1025
1026                        pos += len;
1027                        continue;
1028                }
1029
1030                ret = ceph_osdc_start_request(req->r_osdc, req, false);
1031                if (!ret)
1032                        ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1033
1034                size = i_size_read(inode);
1035                if (!write) {
1036                        if (ret == -ENOENT)
1037                                ret = 0;
1038                        if (ret >= 0 && ret < len && pos + ret < size) {
1039                                struct iov_iter i;
1040                                int zlen = min_t(size_t, len - ret,
1041                                                 size - pos - ret);
1042
1043                                iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1044                                iov_iter_advance(&i, ret);
1045                                iov_iter_zero(zlen, &i);
1046                                ret += zlen;
1047                        }
1048                        if (ret >= 0)
1049                                len = ret;
1050                }
1051
1052                put_bvecs(bvecs, num_pages, should_dirty);
1053                ceph_osdc_put_request(req);
1054                if (ret < 0)
1055                        break;
1056
1057                pos += len;
1058                if (!write && pos >= size)
1059                        break;
1060
1061                if (write && pos > size) {
1062                        if (ceph_inode_set_size(inode, pos))
1063                                ceph_check_caps(ceph_inode(inode),
1064                                                CHECK_CAPS_AUTHONLY,
1065                                                NULL);
1066                }
1067        }
1068
1069        if (aio_req) {
1070                LIST_HEAD(osd_reqs);
1071
1072                if (aio_req->num_reqs == 0) {
1073                        kfree(aio_req);
1074                        return ret;
1075                }
1076
1077                ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1078                                              CEPH_CAP_FILE_RD);
1079
1080                list_splice(&aio_req->osd_reqs, &osd_reqs);
1081                while (!list_empty(&osd_reqs)) {
1082                        req = list_first_entry(&osd_reqs,
1083                                               struct ceph_osd_request,
1084                                               r_unsafe_item);
1085                        list_del_init(&req->r_unsafe_item);
1086                        if (ret >= 0)
1087                                ret = ceph_osdc_start_request(req->r_osdc,
1088                                                              req, false);
1089                        if (ret < 0) {
1090                                req->r_result = ret;
1091                                ceph_aio_complete_req(req);
1092                        }
1093                }
1094                return -EIOCBQUEUED;
1095        }
1096
1097        if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1098                ret = pos - iocb->ki_pos;
1099                iocb->ki_pos = pos;
1100        }
1101        return ret;
1102}
1103
1104/*
1105 * Synchronous write, straight from __user pointer or user pages.
1106 *
1107 * If write spans object boundary, just do multiple writes.  (For a
1108 * correct atomic write, we should e.g. take write locks on all
1109 * objects, rollback on failure, etc.)
1110 */
1111static ssize_t
1112ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1113                struct ceph_snap_context *snapc)
1114{
1115        struct file *file = iocb->ki_filp;
1116        struct inode *inode = file_inode(file);
1117        struct ceph_inode_info *ci = ceph_inode(inode);
1118        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1119        struct ceph_vino vino;
1120        struct ceph_osd_request *req;
1121        struct page **pages;
1122        u64 len;
1123        int num_pages;
1124        int written = 0;
1125        int flags;
1126        int ret;
1127        bool check_caps = false;
1128        struct timespec64 mtime = current_time(inode);
1129        size_t count = iov_iter_count(from);
1130
1131        if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1132                return -EROFS;
1133
1134        dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1135             file, pos, (unsigned)count, snapc, snapc->seq);
1136
1137        ret = filemap_write_and_wait_range(inode->i_mapping,
1138                                           pos, pos + count - 1);
1139        if (ret < 0)
1140                return ret;
1141
1142        ret = invalidate_inode_pages2_range(inode->i_mapping,
1143                                            pos >> PAGE_SHIFT,
1144                                            (pos + count - 1) >> PAGE_SHIFT);
1145        if (ret < 0)
1146                dout("invalidate_inode_pages2_range returned %d\n", ret);
1147
1148        flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1149
1150        while ((len = iov_iter_count(from)) > 0) {
1151                size_t left;
1152                int n;
1153
1154                vino = ceph_vino(inode);
1155                req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1156                                            vino, pos, &len, 0, 1,
1157                                            CEPH_OSD_OP_WRITE, flags, snapc,
1158                                            ci->i_truncate_seq,
1159                                            ci->i_truncate_size,
1160                                            false);
1161                if (IS_ERR(req)) {
1162                        ret = PTR_ERR(req);
1163                        break;
1164                }
1165
1166                /*
1167                 * write from beginning of first page,
1168                 * regardless of io alignment
1169                 */
1170                num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1171
1172                pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1173                if (IS_ERR(pages)) {
1174                        ret = PTR_ERR(pages);
1175                        goto out;
1176                }
1177
1178                left = len;
1179                for (n = 0; n < num_pages; n++) {
1180                        size_t plen = min_t(size_t, left, PAGE_SIZE);
1181                        ret = copy_page_from_iter(pages[n], 0, plen, from);
1182                        if (ret != plen) {
1183                                ret = -EFAULT;
1184                                break;
1185                        }
1186                        left -= ret;
1187                }
1188
1189                if (ret < 0) {
1190                        ceph_release_page_vector(pages, num_pages);
1191                        goto out;
1192                }
1193
1194                req->r_inode = inode;
1195
1196                osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1197                                                false, true);
1198
1199                req->r_mtime = mtime;
1200                ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1201                if (!ret)
1202                        ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1203
1204out:
1205                ceph_osdc_put_request(req);
1206                if (ret != 0) {
1207                        ceph_set_error_write(ci);
1208                        break;
1209                }
1210
1211                ceph_clear_error_write(ci);
1212                pos += len;
1213                written += len;
1214                if (pos > i_size_read(inode)) {
1215                        check_caps = ceph_inode_set_size(inode, pos);
1216                        if (check_caps)
1217                                ceph_check_caps(ceph_inode(inode),
1218                                                CHECK_CAPS_AUTHONLY,
1219                                                NULL);
1220                }
1221
1222        }
1223
1224        if (ret != -EOLDSNAPC && written > 0) {
1225                ret = written;
1226                iocb->ki_pos = pos;
1227        }
1228        return ret;
1229}
1230
1231/*
1232 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1233 * Atomically grab references, so that those bits are not released
1234 * back to the MDS mid-read.
1235 *
1236 * Hmm, the sync read case isn't actually async... should it be?
1237 */
1238static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1239{
1240        struct file *filp = iocb->ki_filp;
1241        struct ceph_file_info *fi = filp->private_data;
1242        size_t len = iov_iter_count(to);
1243        struct inode *inode = file_inode(filp);
1244        struct ceph_inode_info *ci = ceph_inode(inode);
1245        struct page *pinned_page = NULL;
1246        ssize_t ret;
1247        int want, got = 0;
1248        int retry_op = 0, read = 0;
1249
1250again:
1251        dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1252             inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1253
1254        if (fi->fmode & CEPH_FILE_MODE_LAZY)
1255                want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1256        else
1257                want = CEPH_CAP_FILE_CACHE;
1258        ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1259        if (ret < 0)
1260                return ret;
1261
1262        if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1263            (iocb->ki_flags & IOCB_DIRECT) ||
1264            (fi->flags & CEPH_F_SYNC)) {
1265
1266                dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1267                     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1268                     ceph_cap_string(got));
1269
1270                if (ci->i_inline_version == CEPH_INLINE_NONE) {
1271                        if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1272                                ret = ceph_direct_read_write(iocb, to,
1273                                                             NULL, NULL);
1274                                if (ret >= 0 && ret < len)
1275                                        retry_op = CHECK_EOF;
1276                        } else {
1277                                ret = ceph_sync_read(iocb, to, &retry_op);
1278                        }
1279                } else {
1280                        retry_op = READ_INLINE;
1281                }
1282        } else {
1283                CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1284                dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1285                     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1286                     ceph_cap_string(got));
1287                ceph_add_rw_context(fi, &rw_ctx);
1288                ret = generic_file_read_iter(iocb, to);
1289                ceph_del_rw_context(fi, &rw_ctx);
1290        }
1291        dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1292             inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1293        if (pinned_page) {
1294                put_page(pinned_page);
1295                pinned_page = NULL;
1296        }
1297        ceph_put_cap_refs(ci, got);
1298        if (retry_op > HAVE_RETRIED && ret >= 0) {
1299                int statret;
1300                struct page *page = NULL;
1301                loff_t i_size;
1302                if (retry_op == READ_INLINE) {
1303                        page = __page_cache_alloc(GFP_KERNEL);
1304                        if (!page)
1305                                return -ENOMEM;
1306                }
1307
1308                statret = __ceph_do_getattr(inode, page,
1309                                            CEPH_STAT_CAP_INLINE_DATA, !!page);
1310                if (statret < 0) {
1311                        if (page)
1312                                __free_page(page);
1313                        if (statret == -ENODATA) {
1314                                BUG_ON(retry_op != READ_INLINE);
1315                                goto again;
1316                        }
1317                        return statret;
1318                }
1319
1320                i_size = i_size_read(inode);
1321                if (retry_op == READ_INLINE) {
1322                        BUG_ON(ret > 0 || read > 0);
1323                        if (iocb->ki_pos < i_size &&
1324                            iocb->ki_pos < PAGE_SIZE) {
1325                                loff_t end = min_t(loff_t, i_size,
1326                                                   iocb->ki_pos + len);
1327                                end = min_t(loff_t, end, PAGE_SIZE);
1328                                if (statret < end)
1329                                        zero_user_segment(page, statret, end);
1330                                ret = copy_page_to_iter(page,
1331                                                iocb->ki_pos & ~PAGE_MASK,
1332                                                end - iocb->ki_pos, to);
1333                                iocb->ki_pos += ret;
1334                                read += ret;
1335                        }
1336                        if (iocb->ki_pos < i_size && read < len) {
1337                                size_t zlen = min_t(size_t, len - read,
1338                                                    i_size - iocb->ki_pos);
1339                                ret = iov_iter_zero(zlen, to);
1340                                iocb->ki_pos += ret;
1341                                read += ret;
1342                        }
1343                        __free_pages(page, 0);
1344                        return read;
1345                }
1346
1347                /* hit EOF or hole? */
1348                if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1349                    ret < len) {
1350                        dout("sync_read hit hole, ppos %lld < size %lld"
1351                             ", reading more\n", iocb->ki_pos, i_size);
1352
1353                        read += ret;
1354                        len -= ret;
1355                        retry_op = HAVE_RETRIED;
1356                        goto again;
1357                }
1358        }
1359
1360        if (ret >= 0)
1361                ret += read;
1362
1363        return ret;
1364}
1365
1366/*
1367 * Take cap references to avoid releasing caps to MDS mid-write.
1368 *
1369 * If we are synchronous, and write with an old snap context, the OSD
1370 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1371 * dropping our cap refs and allowing the pending snap to logically
1372 * complete _before_ this write occurs.
1373 *
1374 * If we are near ENOSPC, write synchronously.
1375 */
1376static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1377{
1378        struct file *file = iocb->ki_filp;
1379        struct ceph_file_info *fi = file->private_data;
1380        struct inode *inode = file_inode(file);
1381        struct ceph_inode_info *ci = ceph_inode(inode);
1382        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1383        struct ceph_cap_flush *prealloc_cf;
1384        ssize_t count, written = 0;
1385        int err, want, got;
1386        loff_t pos;
1387        loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1388
1389        if (ceph_snap(inode) != CEPH_NOSNAP)
1390                return -EROFS;
1391
1392        prealloc_cf = ceph_alloc_cap_flush();
1393        if (!prealloc_cf)
1394                return -ENOMEM;
1395
1396retry_snap:
1397        inode_lock(inode);
1398
1399        /* We can write back this queue in page reclaim */
1400        current->backing_dev_info = inode_to_bdi(inode);
1401
1402        if (iocb->ki_flags & IOCB_APPEND) {
1403                err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1404                if (err < 0)
1405                        goto out;
1406        }
1407
1408        err = generic_write_checks(iocb, from);
1409        if (err <= 0)
1410                goto out;
1411
1412        pos = iocb->ki_pos;
1413        if (unlikely(pos >= limit)) {
1414                err = -EFBIG;
1415                goto out;
1416        } else {
1417                iov_iter_truncate(from, limit - pos);
1418        }
1419
1420        count = iov_iter_count(from);
1421        if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1422                err = -EDQUOT;
1423                goto out;
1424        }
1425
1426        err = file_remove_privs(file);
1427        if (err)
1428                goto out;
1429
1430        err = file_update_time(file);
1431        if (err)
1432                goto out;
1433
1434        if (ci->i_inline_version != CEPH_INLINE_NONE) {
1435                err = ceph_uninline_data(file, NULL);
1436                if (err < 0)
1437                        goto out;
1438        }
1439
1440        /* FIXME: not complete since it doesn't account for being at quota */
1441        if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
1442                err = -ENOSPC;
1443                goto out;
1444        }
1445
1446        dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1447             inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1448        if (fi->fmode & CEPH_FILE_MODE_LAZY)
1449                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1450        else
1451                want = CEPH_CAP_FILE_BUFFER;
1452        got = 0;
1453        err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1454                            &got, NULL);
1455        if (err < 0)
1456                goto out;
1457
1458        dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1459             inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1460
1461        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1462            (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1463            (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1464                struct ceph_snap_context *snapc;
1465                struct iov_iter data;
1466                inode_unlock(inode);
1467
1468                spin_lock(&ci->i_ceph_lock);
1469                if (__ceph_have_pending_cap_snap(ci)) {
1470                        struct ceph_cap_snap *capsnap =
1471                                        list_last_entry(&ci->i_cap_snaps,
1472                                                        struct ceph_cap_snap,
1473                                                        ci_item);
1474                        snapc = ceph_get_snap_context(capsnap->context);
1475                } else {
1476                        BUG_ON(!ci->i_head_snapc);
1477                        snapc = ceph_get_snap_context(ci->i_head_snapc);
1478                }
1479                spin_unlock(&ci->i_ceph_lock);
1480
1481                /* we might need to revert back to that point */
1482                data = *from;
1483                if (iocb->ki_flags & IOCB_DIRECT)
1484                        written = ceph_direct_read_write(iocb, &data, snapc,
1485                                                         &prealloc_cf);
1486                else
1487                        written = ceph_sync_write(iocb, &data, pos, snapc);
1488                if (written > 0)
1489                        iov_iter_advance(from, written);
1490                ceph_put_snap_context(snapc);
1491        } else {
1492                /*
1493                 * No need to acquire the i_truncate_mutex. Because
1494                 * the MDS revokes Fwb caps before sending truncate
1495                 * message to us. We can't get Fwb cap while there
1496                 * are pending vmtruncate. So write and vmtruncate
1497                 * can not run at the same time
1498                 */
1499                written = generic_perform_write(file, from, pos);
1500                if (likely(written >= 0))
1501                        iocb->ki_pos = pos + written;
1502                inode_unlock(inode);
1503        }
1504
1505        if (written >= 0) {
1506                int dirty;
1507
1508                spin_lock(&ci->i_ceph_lock);
1509                ci->i_inline_version = CEPH_INLINE_NONE;
1510                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1511                                               &prealloc_cf);
1512                spin_unlock(&ci->i_ceph_lock);
1513                if (dirty)
1514                        __mark_inode_dirty(inode, dirty);
1515                if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1516                        ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1517        }
1518
1519        dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1520             inode, ceph_vinop(inode), pos, (unsigned)count,
1521             ceph_cap_string(got));
1522        ceph_put_cap_refs(ci, got);
1523
1524        if (written == -EOLDSNAPC) {
1525                dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1526                     inode, ceph_vinop(inode), pos, (unsigned)count);
1527                goto retry_snap;
1528        }
1529
1530        if (written >= 0) {
1531                if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
1532                        iocb->ki_flags |= IOCB_DSYNC;
1533                written = generic_write_sync(iocb, written);
1534        }
1535
1536        goto out_unlocked;
1537
1538out:
1539        inode_unlock(inode);
1540out_unlocked:
1541        ceph_free_cap_flush(prealloc_cf);
1542        current->backing_dev_info = NULL;
1543        return written ? written : err;
1544}
1545
1546/*
1547 * llseek.  be sure to verify file size on SEEK_END.
1548 */
1549static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1550{
1551        struct inode *inode = file->f_mapping->host;
1552        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1553        loff_t i_size;
1554        loff_t ret;
1555
1556        inode_lock(inode);
1557
1558        if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1559                ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1560                if (ret < 0)
1561                        goto out;
1562        }
1563
1564        i_size = i_size_read(inode);
1565        switch (whence) {
1566        case SEEK_END:
1567                offset += i_size;
1568                break;
1569        case SEEK_CUR:
1570                /*
1571                 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1572                 * position-querying operation.  Avoid rewriting the "same"
1573                 * f_pos value back to the file because a concurrent read(),
1574                 * write() or lseek() might have altered it
1575                 */
1576                if (offset == 0) {
1577                        ret = file->f_pos;
1578                        goto out;
1579                }
1580                offset += file->f_pos;
1581                break;
1582        case SEEK_DATA:
1583                if (offset < 0 || offset >= i_size) {
1584                        ret = -ENXIO;
1585                        goto out;
1586                }
1587                break;
1588        case SEEK_HOLE:
1589                if (offset < 0 || offset >= i_size) {
1590                        ret = -ENXIO;
1591                        goto out;
1592                }
1593                offset = i_size;
1594                break;
1595        }
1596
1597        ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1598
1599out:
1600        inode_unlock(inode);
1601        return ret;
1602}
1603
1604static inline void ceph_zero_partial_page(
1605        struct inode *inode, loff_t offset, unsigned size)
1606{
1607        struct page *page;
1608        pgoff_t index = offset >> PAGE_SHIFT;
1609
1610        page = find_lock_page(inode->i_mapping, index);
1611        if (page) {
1612                wait_on_page_writeback(page);
1613                zero_user(page, offset & (PAGE_SIZE - 1), size);
1614                unlock_page(page);
1615                put_page(page);
1616        }
1617}
1618
1619static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1620                                      loff_t length)
1621{
1622        loff_t nearly = round_up(offset, PAGE_SIZE);
1623        if (offset < nearly) {
1624                loff_t size = nearly - offset;
1625                if (length < size)
1626                        size = length;
1627                ceph_zero_partial_page(inode, offset, size);
1628                offset += size;
1629                length -= size;
1630        }
1631        if (length >= PAGE_SIZE) {
1632                loff_t size = round_down(length, PAGE_SIZE);
1633                truncate_pagecache_range(inode, offset, offset + size - 1);
1634                offset += size;
1635                length -= size;
1636        }
1637        if (length)
1638                ceph_zero_partial_page(inode, offset, length);
1639}
1640
1641static int ceph_zero_partial_object(struct inode *inode,
1642                                    loff_t offset, loff_t *length)
1643{
1644        struct ceph_inode_info *ci = ceph_inode(inode);
1645        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1646        struct ceph_osd_request *req;
1647        int ret = 0;
1648        loff_t zero = 0;
1649        int op;
1650
1651        if (!length) {
1652                op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1653                length = &zero;
1654        } else {
1655                op = CEPH_OSD_OP_ZERO;
1656        }
1657
1658        req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1659                                        ceph_vino(inode),
1660                                        offset, length,
1661                                        0, 1, op,
1662                                        CEPH_OSD_FLAG_WRITE,
1663                                        NULL, 0, 0, false);
1664        if (IS_ERR(req)) {
1665                ret = PTR_ERR(req);
1666                goto out;
1667        }
1668
1669        req->r_mtime = inode->i_mtime;
1670        ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1671        if (!ret) {
1672                ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1673                if (ret == -ENOENT)
1674                        ret = 0;
1675        }
1676        ceph_osdc_put_request(req);
1677
1678out:
1679        return ret;
1680}
1681
1682static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1683{
1684        int ret = 0;
1685        struct ceph_inode_info *ci = ceph_inode(inode);
1686        s32 stripe_unit = ci->i_layout.stripe_unit;
1687        s32 stripe_count = ci->i_layout.stripe_count;
1688        s32 object_size = ci->i_layout.object_size;
1689        u64 object_set_size = object_size * stripe_count;
1690        u64 nearly, t;
1691
1692        /* round offset up to next period boundary */
1693        nearly = offset + object_set_size - 1;
1694        t = nearly;
1695        nearly -= do_div(t, object_set_size);
1696
1697        while (length && offset < nearly) {
1698                loff_t size = length;
1699                ret = ceph_zero_partial_object(inode, offset, &size);
1700                if (ret < 0)
1701                        return ret;
1702                offset += size;
1703                length -= size;
1704        }
1705        while (length >= object_set_size) {
1706                int i;
1707                loff_t pos = offset;
1708                for (i = 0; i < stripe_count; ++i) {
1709                        ret = ceph_zero_partial_object(inode, pos, NULL);
1710                        if (ret < 0)
1711                                return ret;
1712                        pos += stripe_unit;
1713                }
1714                offset += object_set_size;
1715                length -= object_set_size;
1716        }
1717        while (length) {
1718                loff_t size = length;
1719                ret = ceph_zero_partial_object(inode, offset, &size);
1720                if (ret < 0)
1721                        return ret;
1722                offset += size;
1723                length -= size;
1724        }
1725        return ret;
1726}
1727
1728static long ceph_fallocate(struct file *file, int mode,
1729                                loff_t offset, loff_t length)
1730{
1731        struct ceph_file_info *fi = file->private_data;
1732        struct inode *inode = file_inode(file);
1733        struct ceph_inode_info *ci = ceph_inode(inode);
1734        struct ceph_cap_flush *prealloc_cf;
1735        int want, got = 0;
1736        int dirty;
1737        int ret = 0;
1738        loff_t endoff = 0;
1739        loff_t size;
1740
1741        if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1742                return -EOPNOTSUPP;
1743
1744        if (!S_ISREG(inode->i_mode))
1745                return -EOPNOTSUPP;
1746
1747        prealloc_cf = ceph_alloc_cap_flush();
1748        if (!prealloc_cf)
1749                return -ENOMEM;
1750
1751        inode_lock(inode);
1752
1753        if (ceph_snap(inode) != CEPH_NOSNAP) {
1754                ret = -EROFS;
1755                goto unlock;
1756        }
1757
1758        if (ci->i_inline_version != CEPH_INLINE_NONE) {
1759                ret = ceph_uninline_data(file, NULL);
1760                if (ret < 0)
1761                        goto unlock;
1762        }
1763
1764        size = i_size_read(inode);
1765
1766        /* Are we punching a hole beyond EOF? */
1767        if (offset >= size)
1768                goto unlock;
1769        if ((offset + length) > size)
1770                length = size - offset;
1771
1772        if (fi->fmode & CEPH_FILE_MODE_LAZY)
1773                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1774        else
1775                want = CEPH_CAP_FILE_BUFFER;
1776
1777        ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1778        if (ret < 0)
1779                goto unlock;
1780
1781        ceph_zero_pagecache_range(inode, offset, length);
1782        ret = ceph_zero_objects(inode, offset, length);
1783
1784        if (!ret) {
1785                spin_lock(&ci->i_ceph_lock);
1786                ci->i_inline_version = CEPH_INLINE_NONE;
1787                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1788                                               &prealloc_cf);
1789                spin_unlock(&ci->i_ceph_lock);
1790                if (dirty)
1791                        __mark_inode_dirty(inode, dirty);
1792        }
1793
1794        ceph_put_cap_refs(ci, got);
1795unlock:
1796        inode_unlock(inode);
1797        ceph_free_cap_flush(prealloc_cf);
1798        return ret;
1799}
1800
1801/*
1802 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
1803 * src_ci.  Two attempts are made to obtain both caps, and an error is return if
1804 * this fails; zero is returned on success.
1805 */
1806static int get_rd_wr_caps(struct ceph_inode_info *src_ci,
1807                          loff_t src_endoff, int *src_got,
1808                          struct ceph_inode_info *dst_ci,
1809                          loff_t dst_endoff, int *dst_got)
1810{
1811        int ret = 0;
1812        bool retrying = false;
1813
1814retry_caps:
1815        ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
1816                            dst_endoff, dst_got, NULL);
1817        if (ret < 0)
1818                return ret;
1819
1820        /*
1821         * Since we're already holding the FILE_WR capability for the dst file,
1822         * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
1823         * retry dance instead to try to get both capabilities.
1824         */
1825        ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
1826                                false, src_got);
1827        if (ret <= 0) {
1828                /* Start by dropping dst_ci caps and getting src_ci caps */
1829                ceph_put_cap_refs(dst_ci, *dst_got);
1830                if (retrying) {
1831                        if (!ret)
1832                                /* ceph_try_get_caps masks EAGAIN */
1833                                ret = -EAGAIN;
1834                        return ret;
1835                }
1836                ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD,
1837                                    CEPH_CAP_FILE_SHARED, src_endoff,
1838                                    src_got, NULL);
1839                if (ret < 0)
1840                        return ret;
1841                /*... drop src_ci caps too, and retry */
1842                ceph_put_cap_refs(src_ci, *src_got);
1843                retrying = true;
1844                goto retry_caps;
1845        }
1846        return ret;
1847}
1848
1849static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
1850                           struct ceph_inode_info *dst_ci, int dst_got)
1851{
1852        ceph_put_cap_refs(src_ci, src_got);
1853        ceph_put_cap_refs(dst_ci, dst_got);
1854}
1855
1856/*
1857 * This function does several size-related checks, returning an error if:
1858 *  - source file is smaller than off+len
1859 *  - destination file size is not OK (inode_newsize_ok())
1860 *  - max bytes quotas is exceeded
1861 */
1862static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
1863                           loff_t src_off, loff_t dst_off, size_t len)
1864{
1865        loff_t size, endoff;
1866
1867        size = i_size_read(src_inode);
1868        /*
1869         * Don't copy beyond source file EOF.  Instead of simply setting length
1870         * to (size - src_off), just drop to VFS default implementation, as the
1871         * local i_size may be stale due to other clients writing to the source
1872         * inode.
1873         */
1874        if (src_off + len > size) {
1875                dout("Copy beyond EOF (%llu + %zu > %llu)\n",
1876                     src_off, len, size);
1877                return -EOPNOTSUPP;
1878        }
1879        size = i_size_read(dst_inode);
1880
1881        endoff = dst_off + len;
1882        if (inode_newsize_ok(dst_inode, endoff))
1883                return -EOPNOTSUPP;
1884
1885        if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
1886                return -EDQUOT;
1887
1888        return 0;
1889}
1890
1891static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
1892                                    struct file *dst_file, loff_t dst_off,
1893                                    size_t len, unsigned int flags)
1894{
1895        struct inode *src_inode = file_inode(src_file);
1896        struct inode *dst_inode = file_inode(dst_file);
1897        struct ceph_inode_info *src_ci = ceph_inode(src_inode);
1898        struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
1899        struct ceph_cap_flush *prealloc_cf;
1900        struct ceph_object_locator src_oloc, dst_oloc;
1901        struct ceph_object_id src_oid, dst_oid;
1902        loff_t endoff = 0, size;
1903        ssize_t ret = -EIO;
1904        u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
1905        u32 src_objlen, dst_objlen, object_size;
1906        int src_got = 0, dst_got = 0, err, dirty;
1907        bool do_final_copy = false;
1908
1909        if (src_inode == dst_inode)
1910                return -EINVAL;
1911        if (ceph_snap(dst_inode) != CEPH_NOSNAP)
1912                return -EROFS;
1913
1914        /*
1915         * Some of the checks below will return -EOPNOTSUPP, which will force a
1916         * fallback to the default VFS copy_file_range implementation.  This is
1917         * desirable in several cases (for ex, the 'len' is smaller than the
1918         * size of the objects, or in cases where that would be more
1919         * efficient).
1920         */
1921
1922        if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM))
1923                return -EOPNOTSUPP;
1924
1925        if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
1926            (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
1927            (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
1928                return -EOPNOTSUPP;
1929
1930        if (len < src_ci->i_layout.object_size)
1931                return -EOPNOTSUPP; /* no remote copy will be done */
1932
1933        prealloc_cf = ceph_alloc_cap_flush();
1934        if (!prealloc_cf)
1935                return -ENOMEM;
1936
1937        /* Start by sync'ing the source and destination files */
1938        ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
1939        if (ret < 0) {
1940                dout("failed to write src file (%zd)\n", ret);
1941                goto out;
1942        }
1943        ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
1944        if (ret < 0) {
1945                dout("failed to write dst file (%zd)\n", ret);
1946                goto out;
1947        }
1948
1949        /*
1950         * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
1951         * clients may have dirty data in their caches.  And OSDs know nothing
1952         * about caps, so they can't safely do the remote object copies.
1953         */
1954        err = get_rd_wr_caps(src_ci, (src_off + len), &src_got,
1955                             dst_ci, (dst_off + len), &dst_got);
1956        if (err < 0) {
1957                dout("get_rd_wr_caps returned %d\n", err);
1958                ret = -EOPNOTSUPP;
1959                goto out;
1960        }
1961
1962        ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
1963        if (ret < 0)
1964                goto out_caps;
1965
1966        size = i_size_read(dst_inode);
1967        endoff = dst_off + len;
1968
1969        /* Drop dst file cached pages */
1970        ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
1971                                            dst_off >> PAGE_SHIFT,
1972                                            endoff >> PAGE_SHIFT);
1973        if (ret < 0) {
1974                dout("Failed to invalidate inode pages (%zd)\n", ret);
1975                ret = 0; /* XXX */
1976        }
1977        src_oloc.pool = src_ci->i_layout.pool_id;
1978        src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
1979        dst_oloc.pool = dst_ci->i_layout.pool_id;
1980        dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
1981
1982        ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
1983                                      src_ci->i_layout.object_size,
1984                                      &src_objnum, &src_objoff, &src_objlen);
1985        ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
1986                                      dst_ci->i_layout.object_size,
1987                                      &dst_objnum, &dst_objoff, &dst_objlen);
1988        /* object-level offsets need to the same */
1989        if (src_objoff != dst_objoff) {
1990                ret = -EOPNOTSUPP;
1991                goto out_caps;
1992        }
1993
1994        /*
1995         * Do a manual copy if the object offset isn't object aligned.
1996         * 'src_objlen' contains the bytes left until the end of the object,
1997         * starting at the src_off
1998         */
1999        if (src_objoff) {
2000                /*
2001                 * we need to temporarily drop all caps as we'll be calling
2002                 * {read,write}_iter, which will get caps again.
2003                 */
2004                put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2005                ret = do_splice_direct(src_file, &src_off, dst_file,
2006                                       &dst_off, src_objlen, flags);
2007                if (ret < 0) {
2008                        dout("do_splice_direct returned %d\n", err);
2009                        goto out;
2010                }
2011                len -= ret;
2012                err = get_rd_wr_caps(src_ci, (src_off + len),
2013                                     &src_got, dst_ci,
2014                                     (dst_off + len), &dst_got);
2015                if (err < 0)
2016                        goto out;
2017                err = is_file_size_ok(src_inode, dst_inode,
2018                                      src_off, dst_off, len);
2019                if (err < 0)
2020                        goto out_caps;
2021        }
2022        object_size = src_ci->i_layout.object_size;
2023        while (len >= object_size) {
2024                ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2025                                              object_size, &src_objnum,
2026                                              &src_objoff, &src_objlen);
2027                ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2028                                              object_size, &dst_objnum,
2029                                              &dst_objoff, &dst_objlen);
2030                ceph_oid_init(&src_oid);
2031                ceph_oid_printf(&src_oid, "%llx.%08llx",
2032                                src_ci->i_vino.ino, src_objnum);
2033                ceph_oid_init(&dst_oid);
2034                ceph_oid_printf(&dst_oid, "%llx.%08llx",
2035                                dst_ci->i_vino.ino, dst_objnum);
2036                /* Do an object remote copy */
2037                err = ceph_osdc_copy_from(
2038                        &ceph_inode_to_client(src_inode)->client->osdc,
2039                        src_ci->i_vino.snap, 0,
2040                        &src_oid, &src_oloc,
2041                        CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2042                        CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2043                        &dst_oid, &dst_oloc,
2044                        CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2045                        CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
2046                if (err) {
2047                        dout("ceph_osdc_copy_from returned %d\n", err);
2048                        if (!ret)
2049                                ret = err;
2050                        goto out_caps;
2051                }
2052                len -= object_size;
2053                src_off += object_size;
2054                dst_off += object_size;
2055                ret += object_size;
2056        }
2057
2058        if (len)
2059                /* We still need one final local copy */
2060                do_final_copy = true;
2061
2062        file_update_time(dst_file);
2063        if (endoff > size) {
2064                int caps_flags = 0;
2065
2066                /* Let the MDS know about dst file size change */
2067                if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
2068                        caps_flags |= CHECK_CAPS_NODELAY;
2069                if (ceph_inode_set_size(dst_inode, endoff))
2070                        caps_flags |= CHECK_CAPS_AUTHONLY;
2071                if (caps_flags)
2072                        ceph_check_caps(dst_ci, caps_flags, NULL);
2073        }
2074        /* Mark Fw dirty */
2075        spin_lock(&dst_ci->i_ceph_lock);
2076        dst_ci->i_inline_version = CEPH_INLINE_NONE;
2077        dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2078        spin_unlock(&dst_ci->i_ceph_lock);
2079        if (dirty)
2080                __mark_inode_dirty(dst_inode, dirty);
2081
2082out_caps:
2083        put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2084
2085        if (do_final_copy) {
2086                err = do_splice_direct(src_file, &src_off, dst_file,
2087                                       &dst_off, len, flags);
2088                if (err < 0) {
2089                        dout("do_splice_direct returned %d\n", err);
2090                        goto out;
2091                }
2092                len -= err;
2093                ret += err;
2094        }
2095
2096out:
2097        ceph_free_cap_flush(prealloc_cf);
2098
2099        return ret;
2100}
2101
2102const struct file_operations ceph_file_fops = {
2103        .open = ceph_open,
2104        .release = ceph_release,
2105        .llseek = ceph_llseek,
2106        .read_iter = ceph_read_iter,
2107        .write_iter = ceph_write_iter,
2108        .mmap = ceph_mmap,
2109        .fsync = ceph_fsync,
2110        .lock = ceph_lock,
2111        .flock = ceph_flock,
2112        .splice_read = generic_file_splice_read,
2113        .splice_write = iter_file_splice_write,
2114        .unlocked_ioctl = ceph_ioctl,
2115        .compat_ioctl   = ceph_ioctl,
2116        .fallocate      = ceph_fallocate,
2117        .copy_file_range = ceph_copy_file_range,
2118};
2119