linux/fs/ceph/caps.c
<<
>>
Prefs
   1#include <linux/ceph/ceph_debug.h>
   2
   3#include <linux/fs.h>
   4#include <linux/kernel.h>
   5#include <linux/sched.h>
   6#include <linux/slab.h>
   7#include <linux/vmalloc.h>
   8#include <linux/wait.h>
   9#include <linux/writeback.h>
  10
  11#include "super.h"
  12#include "mds_client.h"
  13#include "cache.h"
  14#include <linux/ceph/decode.h>
  15#include <linux/ceph/messenger.h>
  16
  17/*
  18 * Capability management
  19 *
  20 * The Ceph metadata servers control client access to inode metadata
  21 * and file data by issuing capabilities, granting clients permission
  22 * to read and/or write both inode field and file data to OSDs
  23 * (storage nodes).  Each capability consists of a set of bits
  24 * indicating which operations are allowed.
  25 *
  26 * If the client holds a *_SHARED cap, the client has a coherent value
  27 * that can be safely read from the cached inode.
  28 *
  29 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
  30 * client is allowed to change inode attributes (e.g., file size,
  31 * mtime), note its dirty state in the ceph_cap, and asynchronously
  32 * flush that metadata change to the MDS.
  33 *
  34 * In the event of a conflicting operation (perhaps by another
  35 * client), the MDS will revoke the conflicting client capabilities.
  36 *
  37 * In order for a client to cache an inode, it must hold a capability
  38 * with at least one MDS server.  When inodes are released, release
  39 * notifications are batched and periodically sent en masse to the MDS
  40 * cluster to release server state.
  41 */
  42
  43
  44/*
  45 * Generate readable cap strings for debugging output.
  46 */
  47#define MAX_CAP_STR 20
  48static char cap_str[MAX_CAP_STR][40];
  49static DEFINE_SPINLOCK(cap_str_lock);
  50static int last_cap_str;
  51
  52static char *gcap_string(char *s, int c)
  53{
  54        if (c & CEPH_CAP_GSHARED)
  55                *s++ = 's';
  56        if (c & CEPH_CAP_GEXCL)
  57                *s++ = 'x';
  58        if (c & CEPH_CAP_GCACHE)
  59                *s++ = 'c';
  60        if (c & CEPH_CAP_GRD)
  61                *s++ = 'r';
  62        if (c & CEPH_CAP_GWR)
  63                *s++ = 'w';
  64        if (c & CEPH_CAP_GBUFFER)
  65                *s++ = 'b';
  66        if (c & CEPH_CAP_GLAZYIO)
  67                *s++ = 'l';
  68        return s;
  69}
  70
  71const char *ceph_cap_string(int caps)
  72{
  73        int i;
  74        char *s;
  75        int c;
  76
  77        spin_lock(&cap_str_lock);
  78        i = last_cap_str++;
  79        if (last_cap_str == MAX_CAP_STR)
  80                last_cap_str = 0;
  81        spin_unlock(&cap_str_lock);
  82
  83        s = cap_str[i];
  84
  85        if (caps & CEPH_CAP_PIN)
  86                *s++ = 'p';
  87
  88        c = (caps >> CEPH_CAP_SAUTH) & 3;
  89        if (c) {
  90                *s++ = 'A';
  91                s = gcap_string(s, c);
  92        }
  93
  94        c = (caps >> CEPH_CAP_SLINK) & 3;
  95        if (c) {
  96                *s++ = 'L';
  97                s = gcap_string(s, c);
  98        }
  99
 100        c = (caps >> CEPH_CAP_SXATTR) & 3;
 101        if (c) {
 102                *s++ = 'X';
 103                s = gcap_string(s, c);
 104        }
 105
 106        c = caps >> CEPH_CAP_SFILE;
 107        if (c) {
 108                *s++ = 'F';
 109                s = gcap_string(s, c);
 110        }
 111
 112        if (s == cap_str[i])
 113                *s++ = '-';
 114        *s = 0;
 115        return cap_str[i];
 116}
 117
 118void ceph_caps_init(struct ceph_mds_client *mdsc)
 119{
 120        INIT_LIST_HEAD(&mdsc->caps_list);
 121        spin_lock_init(&mdsc->caps_list_lock);
 122}
 123
 124void ceph_caps_finalize(struct ceph_mds_client *mdsc)
 125{
 126        struct ceph_cap *cap;
 127
 128        spin_lock(&mdsc->caps_list_lock);
 129        while (!list_empty(&mdsc->caps_list)) {
 130                cap = list_first_entry(&mdsc->caps_list,
 131                                       struct ceph_cap, caps_item);
 132                list_del(&cap->caps_item);
 133                kmem_cache_free(ceph_cap_cachep, cap);
 134        }
 135        mdsc->caps_total_count = 0;
 136        mdsc->caps_avail_count = 0;
 137        mdsc->caps_use_count = 0;
 138        mdsc->caps_reserve_count = 0;
 139        mdsc->caps_min_count = 0;
 140        spin_unlock(&mdsc->caps_list_lock);
 141}
 142
 143void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
 144{
 145        spin_lock(&mdsc->caps_list_lock);
 146        mdsc->caps_min_count += delta;
 147        BUG_ON(mdsc->caps_min_count < 0);
 148        spin_unlock(&mdsc->caps_list_lock);
 149}
 150
 151void ceph_reserve_caps(struct ceph_mds_client *mdsc,
 152                      struct ceph_cap_reservation *ctx, int need)
 153{
 154        int i;
 155        struct ceph_cap *cap;
 156        int have;
 157        int alloc = 0;
 158        LIST_HEAD(newcaps);
 159
 160        dout("reserve caps ctx=%p need=%d\n", ctx, need);
 161
 162        /* first reserve any caps that are already allocated */
 163        spin_lock(&mdsc->caps_list_lock);
 164        if (mdsc->caps_avail_count >= need)
 165                have = need;
 166        else
 167                have = mdsc->caps_avail_count;
 168        mdsc->caps_avail_count -= have;
 169        mdsc->caps_reserve_count += have;
 170        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 171                                         mdsc->caps_reserve_count +
 172                                         mdsc->caps_avail_count);
 173        spin_unlock(&mdsc->caps_list_lock);
 174
 175        for (i = have; i < need; i++) {
 176                cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
 177                if (!cap)
 178                        break;
 179                list_add(&cap->caps_item, &newcaps);
 180                alloc++;
 181        }
 182        /* we didn't manage to reserve as much as we needed */
 183        if (have + alloc != need)
 184                pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
 185                        ctx, need, have + alloc);
 186
 187        spin_lock(&mdsc->caps_list_lock);
 188        mdsc->caps_total_count += alloc;
 189        mdsc->caps_reserve_count += alloc;
 190        list_splice(&newcaps, &mdsc->caps_list);
 191
 192        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 193                                         mdsc->caps_reserve_count +
 194                                         mdsc->caps_avail_count);
 195        spin_unlock(&mdsc->caps_list_lock);
 196
 197        ctx->count = need;
 198        dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
 199             ctx, mdsc->caps_total_count, mdsc->caps_use_count,
 200             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 201}
 202
 203int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
 204                        struct ceph_cap_reservation *ctx)
 205{
 206        dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
 207        if (ctx->count) {
 208                spin_lock(&mdsc->caps_list_lock);
 209                BUG_ON(mdsc->caps_reserve_count < ctx->count);
 210                mdsc->caps_reserve_count -= ctx->count;
 211                mdsc->caps_avail_count += ctx->count;
 212                ctx->count = 0;
 213                dout("unreserve caps %d = %d used + %d resv + %d avail\n",
 214                     mdsc->caps_total_count, mdsc->caps_use_count,
 215                     mdsc->caps_reserve_count, mdsc->caps_avail_count);
 216                BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 217                                                 mdsc->caps_reserve_count +
 218                                                 mdsc->caps_avail_count);
 219                spin_unlock(&mdsc->caps_list_lock);
 220        }
 221        return 0;
 222}
 223
 224struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
 225                              struct ceph_cap_reservation *ctx)
 226{
 227        struct ceph_cap *cap = NULL;
 228
 229        /* temporary, until we do something about cap import/export */
 230        if (!ctx) {
 231                cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
 232                if (cap) {
 233                        spin_lock(&mdsc->caps_list_lock);
 234                        mdsc->caps_use_count++;
 235                        mdsc->caps_total_count++;
 236                        spin_unlock(&mdsc->caps_list_lock);
 237                }
 238                return cap;
 239        }
 240
 241        spin_lock(&mdsc->caps_list_lock);
 242        dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
 243             ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
 244             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 245        BUG_ON(!ctx->count);
 246        BUG_ON(ctx->count > mdsc->caps_reserve_count);
 247        BUG_ON(list_empty(&mdsc->caps_list));
 248
 249        ctx->count--;
 250        mdsc->caps_reserve_count--;
 251        mdsc->caps_use_count++;
 252
 253        cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
 254        list_del(&cap->caps_item);
 255
 256        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 257               mdsc->caps_reserve_count + mdsc->caps_avail_count);
 258        spin_unlock(&mdsc->caps_list_lock);
 259        return cap;
 260}
 261
 262void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
 263{
 264        spin_lock(&mdsc->caps_list_lock);
 265        dout("put_cap %p %d = %d used + %d resv + %d avail\n",
 266             cap, mdsc->caps_total_count, mdsc->caps_use_count,
 267             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 268        mdsc->caps_use_count--;
 269        /*
 270         * Keep some preallocated caps around (ceph_min_count), to
 271         * avoid lots of free/alloc churn.
 272         */
 273        if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
 274                                      mdsc->caps_min_count) {
 275                mdsc->caps_total_count--;
 276                kmem_cache_free(ceph_cap_cachep, cap);
 277        } else {
 278                mdsc->caps_avail_count++;
 279                list_add(&cap->caps_item, &mdsc->caps_list);
 280        }
 281
 282        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 283               mdsc->caps_reserve_count + mdsc->caps_avail_count);
 284        spin_unlock(&mdsc->caps_list_lock);
 285}
 286
 287void ceph_reservation_status(struct ceph_fs_client *fsc,
 288                             int *total, int *avail, int *used, int *reserved,
 289                             int *min)
 290{
 291        struct ceph_mds_client *mdsc = fsc->mdsc;
 292
 293        if (total)
 294                *total = mdsc->caps_total_count;
 295        if (avail)
 296                *avail = mdsc->caps_avail_count;
 297        if (used)
 298                *used = mdsc->caps_use_count;
 299        if (reserved)
 300                *reserved = mdsc->caps_reserve_count;
 301        if (min)
 302                *min = mdsc->caps_min_count;
 303}
 304
 305/*
 306 * Find ceph_cap for given mds, if any.
 307 *
 308 * Called with i_ceph_lock held.
 309 */
 310static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 311{
 312        struct ceph_cap *cap;
 313        struct rb_node *n = ci->i_caps.rb_node;
 314
 315        while (n) {
 316                cap = rb_entry(n, struct ceph_cap, ci_node);
 317                if (mds < cap->mds)
 318                        n = n->rb_left;
 319                else if (mds > cap->mds)
 320                        n = n->rb_right;
 321                else
 322                        return cap;
 323        }
 324        return NULL;
 325}
 326
 327struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 328{
 329        struct ceph_cap *cap;
 330
 331        spin_lock(&ci->i_ceph_lock);
 332        cap = __get_cap_for_mds(ci, mds);
 333        spin_unlock(&ci->i_ceph_lock);
 334        return cap;
 335}
 336
 337/*
 338 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
 339 */
 340static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
 341{
 342        struct ceph_cap *cap;
 343        int mds = -1;
 344        struct rb_node *p;
 345
 346        /* prefer mds with WR|BUFFER|EXCL caps */
 347        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 348                cap = rb_entry(p, struct ceph_cap, ci_node);
 349                mds = cap->mds;
 350                if (cap->issued & (CEPH_CAP_FILE_WR |
 351                                   CEPH_CAP_FILE_BUFFER |
 352                                   CEPH_CAP_FILE_EXCL))
 353                        break;
 354        }
 355        return mds;
 356}
 357
 358int ceph_get_cap_mds(struct inode *inode)
 359{
 360        struct ceph_inode_info *ci = ceph_inode(inode);
 361        int mds;
 362        spin_lock(&ci->i_ceph_lock);
 363        mds = __ceph_get_cap_mds(ceph_inode(inode));
 364        spin_unlock(&ci->i_ceph_lock);
 365        return mds;
 366}
 367
 368/*
 369 * Called under i_ceph_lock.
 370 */
 371static void __insert_cap_node(struct ceph_inode_info *ci,
 372                              struct ceph_cap *new)
 373{
 374        struct rb_node **p = &ci->i_caps.rb_node;
 375        struct rb_node *parent = NULL;
 376        struct ceph_cap *cap = NULL;
 377
 378        while (*p) {
 379                parent = *p;
 380                cap = rb_entry(parent, struct ceph_cap, ci_node);
 381                if (new->mds < cap->mds)
 382                        p = &(*p)->rb_left;
 383                else if (new->mds > cap->mds)
 384                        p = &(*p)->rb_right;
 385                else
 386                        BUG();
 387        }
 388
 389        rb_link_node(&new->ci_node, parent, p);
 390        rb_insert_color(&new->ci_node, &ci->i_caps);
 391}
 392
 393/*
 394 * (re)set cap hold timeouts, which control the delayed release
 395 * of unused caps back to the MDS.  Should be called on cap use.
 396 */
 397static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
 398                               struct ceph_inode_info *ci)
 399{
 400        struct ceph_mount_options *ma = mdsc->fsc->mount_options;
 401
 402        ci->i_hold_caps_min = round_jiffies(jiffies +
 403                                            ma->caps_wanted_delay_min * HZ);
 404        ci->i_hold_caps_max = round_jiffies(jiffies +
 405                                            ma->caps_wanted_delay_max * HZ);
 406        dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
 407             ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
 408}
 409
 410/*
 411 * (Re)queue cap at the end of the delayed cap release list.
 412 *
 413 * If I_FLUSH is set, leave the inode at the front of the list.
 414 *
 415 * Caller holds i_ceph_lock
 416 *    -> we take mdsc->cap_delay_lock
 417 */
 418static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
 419                                struct ceph_inode_info *ci)
 420{
 421        __cap_set_timeouts(mdsc, ci);
 422        dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
 423             ci->i_ceph_flags, ci->i_hold_caps_max);
 424        if (!mdsc->stopping) {
 425                spin_lock(&mdsc->cap_delay_lock);
 426                if (!list_empty(&ci->i_cap_delay_list)) {
 427                        if (ci->i_ceph_flags & CEPH_I_FLUSH)
 428                                goto no_change;
 429                        list_del_init(&ci->i_cap_delay_list);
 430                }
 431                list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
 432no_change:
 433                spin_unlock(&mdsc->cap_delay_lock);
 434        }
 435}
 436
 437/*
 438 * Queue an inode for immediate writeback.  Mark inode with I_FLUSH,
 439 * indicating we should send a cap message to flush dirty metadata
 440 * asap, and move to the front of the delayed cap list.
 441 */
 442static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 443                                      struct ceph_inode_info *ci)
 444{
 445        dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
 446        spin_lock(&mdsc->cap_delay_lock);
 447        ci->i_ceph_flags |= CEPH_I_FLUSH;
 448        if (!list_empty(&ci->i_cap_delay_list))
 449                list_del_init(&ci->i_cap_delay_list);
 450        list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
 451        spin_unlock(&mdsc->cap_delay_lock);
 452}
 453
 454/*
 455 * Cancel delayed work on cap.
 456 *
 457 * Caller must hold i_ceph_lock.
 458 */
 459static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
 460                               struct ceph_inode_info *ci)
 461{
 462        dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
 463        if (list_empty(&ci->i_cap_delay_list))
 464                return;
 465        spin_lock(&mdsc->cap_delay_lock);
 466        list_del_init(&ci->i_cap_delay_list);
 467        spin_unlock(&mdsc->cap_delay_lock);
 468}
 469
 470/*
 471 * Common issue checks for add_cap, handle_cap_grant.
 472 */
 473static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
 474                              unsigned issued)
 475{
 476        unsigned had = __ceph_caps_issued(ci, NULL);
 477
 478        /*
 479         * Each time we receive FILE_CACHE anew, we increment
 480         * i_rdcache_gen.
 481         */
 482        if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
 483            (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
 484                ci->i_rdcache_gen++;
 485        }
 486
 487        /*
 488         * if we are newly issued FILE_SHARED, mark dir not complete; we
 489         * don't know what happened to this directory while we didn't
 490         * have the cap.
 491         */
 492        if ((issued & CEPH_CAP_FILE_SHARED) &&
 493            (had & CEPH_CAP_FILE_SHARED) == 0) {
 494                ci->i_shared_gen++;
 495                if (S_ISDIR(ci->vfs_inode.i_mode)) {
 496                        dout(" marking %p NOT complete\n", &ci->vfs_inode);
 497                        __ceph_dir_clear_complete(ci);
 498                }
 499        }
 500}
 501
 502/*
 503 * Add a capability under the given MDS session.
 504 *
 505 * Caller should hold session snap_rwsem (read) and s_mutex.
 506 *
 507 * @fmode is the open file mode, if we are opening a file, otherwise
 508 * it is < 0.  (This is so we can atomically add the cap and add an
 509 * open file reference to it.)
 510 */
 511void ceph_add_cap(struct inode *inode,
 512                  struct ceph_mds_session *session, u64 cap_id,
 513                  int fmode, unsigned issued, unsigned wanted,
 514                  unsigned seq, unsigned mseq, u64 realmino, int flags,
 515                  struct ceph_cap **new_cap)
 516{
 517        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
 518        struct ceph_inode_info *ci = ceph_inode(inode);
 519        struct ceph_cap *cap;
 520        int mds = session->s_mds;
 521        int actual_wanted;
 522
 523        dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
 524             session->s_mds, cap_id, ceph_cap_string(issued), seq);
 525
 526        /*
 527         * If we are opening the file, include file mode wanted bits
 528         * in wanted.
 529         */
 530        if (fmode >= 0)
 531                wanted |= ceph_caps_for_mode(fmode);
 532
 533        cap = __get_cap_for_mds(ci, mds);
 534        if (!cap) {
 535                cap = *new_cap;
 536                *new_cap = NULL;
 537
 538                cap->issued = 0;
 539                cap->implemented = 0;
 540                cap->mds = mds;
 541                cap->mds_wanted = 0;
 542                cap->mseq = 0;
 543
 544                cap->ci = ci;
 545                __insert_cap_node(ci, cap);
 546
 547                /* add to session cap list */
 548                cap->session = session;
 549                spin_lock(&session->s_cap_lock);
 550                list_add_tail(&cap->session_caps, &session->s_caps);
 551                session->s_nr_caps++;
 552                spin_unlock(&session->s_cap_lock);
 553        } else {
 554                /*
 555                 * auth mds of the inode changed. we received the cap export
 556                 * message, but still haven't received the cap import message.
 557                 * handle_cap_export() updated the new auth MDS' cap.
 558                 *
 559                 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing
 560                 * a message that was send before the cap import message. So
 561                 * don't remove caps.
 562                 */
 563                if (ceph_seq_cmp(seq, cap->seq) <= 0) {
 564                        WARN_ON(cap != ci->i_auth_cap);
 565                        WARN_ON(cap->cap_id != cap_id);
 566                        seq = cap->seq;
 567                        mseq = cap->mseq;
 568                        issued |= cap->issued;
 569                        flags |= CEPH_CAP_FLAG_AUTH;
 570                }
 571        }
 572
 573        if (!ci->i_snap_realm) {
 574                /*
 575                 * add this inode to the appropriate snap realm
 576                 */
 577                struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
 578                                                               realmino);
 579                if (realm) {
 580                        spin_lock(&realm->inodes_with_caps_lock);
 581                        ci->i_snap_realm = realm;
 582                        list_add(&ci->i_snap_realm_item,
 583                                 &realm->inodes_with_caps);
 584                        spin_unlock(&realm->inodes_with_caps_lock);
 585                } else {
 586                        pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
 587                               realmino);
 588                        WARN_ON(!realm);
 589                }
 590        }
 591
 592        __check_cap_issue(ci, cap, issued);
 593
 594        /*
 595         * If we are issued caps we don't want, or the mds' wanted
 596         * value appears to be off, queue a check so we'll release
 597         * later and/or update the mds wanted value.
 598         */
 599        actual_wanted = __ceph_caps_wanted(ci);
 600        if ((wanted & ~actual_wanted) ||
 601            (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
 602                dout(" issued %s, mds wanted %s, actual %s, queueing\n",
 603                     ceph_cap_string(issued), ceph_cap_string(wanted),
 604                     ceph_cap_string(actual_wanted));
 605                __cap_delay_requeue(mdsc, ci);
 606        }
 607
 608        if (flags & CEPH_CAP_FLAG_AUTH) {
 609                if (ci->i_auth_cap == NULL ||
 610                    ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
 611                        ci->i_auth_cap = cap;
 612                        cap->mds_wanted = wanted;
 613                }
 614        } else {
 615                WARN_ON(ci->i_auth_cap == cap);
 616        }
 617
 618        dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
 619             inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
 620             ceph_cap_string(issued|cap->issued), seq, mds);
 621        cap->cap_id = cap_id;
 622        cap->issued = issued;
 623        cap->implemented |= issued;
 624        if (ceph_seq_cmp(mseq, cap->mseq) > 0)
 625                cap->mds_wanted = wanted;
 626        else
 627                cap->mds_wanted |= wanted;
 628        cap->seq = seq;
 629        cap->issue_seq = seq;
 630        cap->mseq = mseq;
 631        cap->cap_gen = session->s_cap_gen;
 632
 633        if (fmode >= 0)
 634                __ceph_get_fmode(ci, fmode);
 635}
 636
 637/*
 638 * Return true if cap has not timed out and belongs to the current
 639 * generation of the MDS session (i.e. has not gone 'stale' due to
 640 * us losing touch with the mds).
 641 */
 642static int __cap_is_valid(struct ceph_cap *cap)
 643{
 644        unsigned long ttl;
 645        u32 gen;
 646
 647        spin_lock(&cap->session->s_gen_ttl_lock);
 648        gen = cap->session->s_cap_gen;
 649        ttl = cap->session->s_cap_ttl;
 650        spin_unlock(&cap->session->s_gen_ttl_lock);
 651
 652        if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
 653                dout("__cap_is_valid %p cap %p issued %s "
 654                     "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
 655                     cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
 656                return 0;
 657        }
 658
 659        return 1;
 660}
 661
 662/*
 663 * Return set of valid cap bits issued to us.  Note that caps time
 664 * out, and may be invalidated in bulk if the client session times out
 665 * and session->s_cap_gen is bumped.
 666 */
 667int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
 668{
 669        int have = ci->i_snap_caps;
 670        struct ceph_cap *cap;
 671        struct rb_node *p;
 672
 673        if (implemented)
 674                *implemented = 0;
 675        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 676                cap = rb_entry(p, struct ceph_cap, ci_node);
 677                if (!__cap_is_valid(cap))
 678                        continue;
 679                dout("__ceph_caps_issued %p cap %p issued %s\n",
 680                     &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
 681                have |= cap->issued;
 682                if (implemented)
 683                        *implemented |= cap->implemented;
 684        }
 685        /*
 686         * exclude caps issued by non-auth MDS, but are been revoking
 687         * by the auth MDS. The non-auth MDS should be revoking/exporting
 688         * these caps, but the message is delayed.
 689         */
 690        if (ci->i_auth_cap) {
 691                cap = ci->i_auth_cap;
 692                have &= ~cap->implemented | cap->issued;
 693        }
 694        return have;
 695}
 696
 697/*
 698 * Get cap bits issued by caps other than @ocap
 699 */
 700int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
 701{
 702        int have = ci->i_snap_caps;
 703        struct ceph_cap *cap;
 704        struct rb_node *p;
 705
 706        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 707                cap = rb_entry(p, struct ceph_cap, ci_node);
 708                if (cap == ocap)
 709                        continue;
 710                if (!__cap_is_valid(cap))
 711                        continue;
 712                have |= cap->issued;
 713        }
 714        return have;
 715}
 716
 717/*
 718 * Move a cap to the end of the LRU (oldest caps at list head, newest
 719 * at list tail).
 720 */
 721static void __touch_cap(struct ceph_cap *cap)
 722{
 723        struct ceph_mds_session *s = cap->session;
 724
 725        spin_lock(&s->s_cap_lock);
 726        if (s->s_cap_iterator == NULL) {
 727                dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
 728                     s->s_mds);
 729                list_move_tail(&cap->session_caps, &s->s_caps);
 730        } else {
 731                dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
 732                     &cap->ci->vfs_inode, cap, s->s_mds);
 733        }
 734        spin_unlock(&s->s_cap_lock);
 735}
 736
 737/*
 738 * Check if we hold the given mask.  If so, move the cap(s) to the
 739 * front of their respective LRUs.  (This is the preferred way for
 740 * callers to check for caps they want.)
 741 */
 742int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 743{
 744        struct ceph_cap *cap;
 745        struct rb_node *p;
 746        int have = ci->i_snap_caps;
 747
 748        if ((have & mask) == mask) {
 749                dout("__ceph_caps_issued_mask %p snap issued %s"
 750                     " (mask %s)\n", &ci->vfs_inode,
 751                     ceph_cap_string(have),
 752                     ceph_cap_string(mask));
 753                return 1;
 754        }
 755
 756        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 757                cap = rb_entry(p, struct ceph_cap, ci_node);
 758                if (!__cap_is_valid(cap))
 759                        continue;
 760                if ((cap->issued & mask) == mask) {
 761                        dout("__ceph_caps_issued_mask %p cap %p issued %s"
 762                             " (mask %s)\n", &ci->vfs_inode, cap,
 763                             ceph_cap_string(cap->issued),
 764                             ceph_cap_string(mask));
 765                        if (touch)
 766                                __touch_cap(cap);
 767                        return 1;
 768                }
 769
 770                /* does a combination of caps satisfy mask? */
 771                have |= cap->issued;
 772                if ((have & mask) == mask) {
 773                        dout("__ceph_caps_issued_mask %p combo issued %s"
 774                             " (mask %s)\n", &ci->vfs_inode,
 775                             ceph_cap_string(cap->issued),
 776                             ceph_cap_string(mask));
 777                        if (touch) {
 778                                struct rb_node *q;
 779
 780                                /* touch this + preceding caps */
 781                                __touch_cap(cap);
 782                                for (q = rb_first(&ci->i_caps); q != p;
 783                                     q = rb_next(q)) {
 784                                        cap = rb_entry(q, struct ceph_cap,
 785                                                       ci_node);
 786                                        if (!__cap_is_valid(cap))
 787                                                continue;
 788                                        __touch_cap(cap);
 789                                }
 790                        }
 791                        return 1;
 792                }
 793        }
 794
 795        return 0;
 796}
 797
 798/*
 799 * Return true if mask caps are currently being revoked by an MDS.
 800 */
 801int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
 802                               struct ceph_cap *ocap, int mask)
 803{
 804        struct ceph_cap *cap;
 805        struct rb_node *p;
 806
 807        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 808                cap = rb_entry(p, struct ceph_cap, ci_node);
 809                if (cap != ocap &&
 810                    (cap->implemented & ~cap->issued & mask))
 811                        return 1;
 812        }
 813        return 0;
 814}
 815
 816int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
 817{
 818        struct inode *inode = &ci->vfs_inode;
 819        int ret;
 820
 821        spin_lock(&ci->i_ceph_lock);
 822        ret = __ceph_caps_revoking_other(ci, NULL, mask);
 823        spin_unlock(&ci->i_ceph_lock);
 824        dout("ceph_caps_revoking %p %s = %d\n", inode,
 825             ceph_cap_string(mask), ret);
 826        return ret;
 827}
 828
 829int __ceph_caps_used(struct ceph_inode_info *ci)
 830{
 831        int used = 0;
 832        if (ci->i_pin_ref)
 833                used |= CEPH_CAP_PIN;
 834        if (ci->i_rd_ref)
 835                used |= CEPH_CAP_FILE_RD;
 836        if (ci->i_rdcache_ref ||
 837            (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
 838             ci->vfs_inode.i_data.nrpages))
 839                used |= CEPH_CAP_FILE_CACHE;
 840        if (ci->i_wr_ref)
 841                used |= CEPH_CAP_FILE_WR;
 842        if (ci->i_wb_ref || ci->i_wrbuffer_ref)
 843                used |= CEPH_CAP_FILE_BUFFER;
 844        return used;
 845}
 846
 847/*
 848 * wanted, by virtue of open file modes
 849 */
 850int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
 851{
 852        int want = 0;
 853        int mode;
 854        for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
 855                if (ci->i_nr_by_mode[mode])
 856                        want |= ceph_caps_for_mode(mode);
 857        return want;
 858}
 859
 860/*
 861 * Return caps we have registered with the MDS(s) as 'wanted'.
 862 */
 863int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
 864{
 865        struct ceph_cap *cap;
 866        struct rb_node *p;
 867        int mds_wanted = 0;
 868
 869        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 870                cap = rb_entry(p, struct ceph_cap, ci_node);
 871                if (!__cap_is_valid(cap))
 872                        continue;
 873                if (cap == ci->i_auth_cap)
 874                        mds_wanted |= cap->mds_wanted;
 875                else
 876                        mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR);
 877        }
 878        return mds_wanted;
 879}
 880
 881/*
 882 * called under i_ceph_lock
 883 */
 884static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 885{
 886        return !RB_EMPTY_ROOT(&ci->i_caps);
 887}
 888
 889int ceph_is_any_caps(struct inode *inode)
 890{
 891        struct ceph_inode_info *ci = ceph_inode(inode);
 892        int ret;
 893
 894        spin_lock(&ci->i_ceph_lock);
 895        ret = __ceph_is_any_caps(ci);
 896        spin_unlock(&ci->i_ceph_lock);
 897
 898        return ret;
 899}
 900
 901static void drop_inode_snap_realm(struct ceph_inode_info *ci)
 902{
 903        struct ceph_snap_realm *realm = ci->i_snap_realm;
 904        spin_lock(&realm->inodes_with_caps_lock);
 905        list_del_init(&ci->i_snap_realm_item);
 906        ci->i_snap_realm_counter++;
 907        ci->i_snap_realm = NULL;
 908        spin_unlock(&realm->inodes_with_caps_lock);
 909        ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
 910                            realm);
 911}
 912
 913/*
 914 * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
 915 *
 916 * caller should hold i_ceph_lock.
 917 * caller will not hold session s_mutex if called from destroy_inode.
 918 */
 919void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 920{
 921        struct ceph_mds_session *session = cap->session;
 922        struct ceph_inode_info *ci = cap->ci;
 923        struct ceph_mds_client *mdsc =
 924                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
 925        int removed = 0;
 926
 927        dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
 928
 929        /* remove from session list */
 930        spin_lock(&session->s_cap_lock);
 931        if (session->s_cap_iterator == cap) {
 932                /* not yet, we are iterating over this very cap */
 933                dout("__ceph_remove_cap  delaying %p removal from session %p\n",
 934                     cap, cap->session);
 935        } else {
 936                list_del_init(&cap->session_caps);
 937                session->s_nr_caps--;
 938                cap->session = NULL;
 939                removed = 1;
 940        }
 941        /* protect backpointer with s_cap_lock: see iterate_session_caps */
 942        cap->ci = NULL;
 943
 944        /*
 945         * s_cap_reconnect is protected by s_cap_lock. no one changes
 946         * s_cap_gen while session is in the reconnect state.
 947         */
 948        if (queue_release &&
 949            (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
 950                cap->queue_release = 1;
 951                if (removed) {
 952                        list_add_tail(&cap->session_caps,
 953                                      &session->s_cap_releases);
 954                        session->s_num_cap_releases++;
 955                        removed = 0;
 956                }
 957        } else {
 958                cap->queue_release = 0;
 959        }
 960        cap->cap_ino = ci->i_vino.ino;
 961
 962        spin_unlock(&session->s_cap_lock);
 963
 964        /* remove from inode list */
 965        rb_erase(&cap->ci_node, &ci->i_caps);
 966        if (ci->i_auth_cap == cap)
 967                ci->i_auth_cap = NULL;
 968
 969        if (removed)
 970                ceph_put_cap(mdsc, cap);
 971
 972        /* when reconnect denied, we remove session caps forcibly,
 973         * i_wr_ref can be non-zero. If there are ongoing write,
 974         * keep i_snap_realm.
 975         */
 976        if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
 977                drop_inode_snap_realm(ci);
 978
 979        if (!__ceph_is_any_real_caps(ci))
 980                __cap_delay_cancel(mdsc, ci);
 981}
 982
 983/*
 984 * Build and send a cap message to the given MDS.
 985 *
 986 * Caller should be holding s_mutex.
 987 */
 988static int send_cap_msg(struct ceph_mds_session *session,
 989                        u64 ino, u64 cid, int op,
 990                        int caps, int wanted, int dirty,
 991                        u32 seq, u64 flush_tid, u64 oldest_flush_tid,
 992                        u32 issue_seq, u32 mseq, u64 size, u64 max_size,
 993                        struct timespec *mtime, struct timespec *atime,
 994                        struct timespec *ctime, u64 time_warp_seq,
 995                        kuid_t uid, kgid_t gid, umode_t mode,
 996                        u64 xattr_version,
 997                        struct ceph_buffer *xattrs_buf,
 998                        u64 follows, bool inline_data)
 999{
1000        struct ceph_mds_caps *fc;
1001        struct ceph_msg *msg;
1002        void *p;
1003        size_t extra_len;
1004
1005        dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
1006             " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
1007             " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
1008             cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
1009             ceph_cap_string(dirty),
1010             seq, issue_seq, flush_tid, oldest_flush_tid,
1011             mseq, follows, size, max_size,
1012             xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
1013
1014        /* flock buffer size + inline version + inline data size +
1015         * osd_epoch_barrier + oldest_flush_tid */
1016        extra_len = 4 + 8 + 4 + 4 + 8;
1017        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
1018                           GFP_NOFS, false);
1019        if (!msg)
1020                return -ENOMEM;
1021
1022        msg->hdr.version = cpu_to_le16(6);
1023        msg->hdr.tid = cpu_to_le64(flush_tid);
1024
1025        fc = msg->front.iov_base;
1026        memset(fc, 0, sizeof(*fc));
1027
1028        fc->cap_id = cpu_to_le64(cid);
1029        fc->op = cpu_to_le32(op);
1030        fc->seq = cpu_to_le32(seq);
1031        fc->issue_seq = cpu_to_le32(issue_seq);
1032        fc->migrate_seq = cpu_to_le32(mseq);
1033        fc->caps = cpu_to_le32(caps);
1034        fc->wanted = cpu_to_le32(wanted);
1035        fc->dirty = cpu_to_le32(dirty);
1036        fc->ino = cpu_to_le64(ino);
1037        fc->snap_follows = cpu_to_le64(follows);
1038
1039        fc->size = cpu_to_le64(size);
1040        fc->max_size = cpu_to_le64(max_size);
1041        if (mtime)
1042                ceph_encode_timespec(&fc->mtime, mtime);
1043        if (atime)
1044                ceph_encode_timespec(&fc->atime, atime);
1045        if (ctime)
1046                ceph_encode_timespec(&fc->ctime, ctime);
1047        fc->time_warp_seq = cpu_to_le32(time_warp_seq);
1048
1049        fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
1050        fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
1051        fc->mode = cpu_to_le32(mode);
1052
1053        p = fc + 1;
1054        /* flock buffer size */
1055        ceph_encode_32(&p, 0);
1056        /* inline version */
1057        ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
1058        /* inline data size */
1059        ceph_encode_32(&p, 0);
1060        /* osd_epoch_barrier */
1061        ceph_encode_32(&p, 0);
1062        /* oldest_flush_tid */
1063        ceph_encode_64(&p, oldest_flush_tid);
1064
1065        fc->xattr_version = cpu_to_le64(xattr_version);
1066        if (xattrs_buf) {
1067                msg->middle = ceph_buffer_get(xattrs_buf);
1068                fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1069                msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1070        }
1071
1072        ceph_con_send(&session->s_con, msg);
1073        return 0;
1074}
1075
1076/*
1077 * Queue cap releases when an inode is dropped from our cache.  Since
1078 * inode is about to be destroyed, there is no need for i_ceph_lock.
1079 */
1080void ceph_queue_caps_release(struct inode *inode)
1081{
1082        struct ceph_inode_info *ci = ceph_inode(inode);
1083        struct rb_node *p;
1084
1085        p = rb_first(&ci->i_caps);
1086        while (p) {
1087                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1088                p = rb_next(p);
1089                __ceph_remove_cap(cap, true);
1090        }
1091}
1092
1093/*
1094 * Send a cap msg on the given inode.  Update our caps state, then
1095 * drop i_ceph_lock and send the message.
1096 *
1097 * Make note of max_size reported/requested from mds, revoked caps
1098 * that have now been implemented.
1099 *
1100 * Make half-hearted attempt ot to invalidate page cache if we are
1101 * dropping RDCACHE.  Note that this will leave behind locked pages
1102 * that we'll then need to deal with elsewhere.
1103 *
1104 * Return non-zero if delayed release, or we experienced an error
1105 * such that the caller should requeue + retry later.
1106 *
1107 * called with i_ceph_lock, then drops it.
1108 * caller should hold snap_rwsem (read), s_mutex.
1109 */
1110static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1111                      int op, int used, int want, int retain, int flushing,
1112                      u64 flush_tid, u64 oldest_flush_tid)
1113        __releases(cap->ci->i_ceph_lock)
1114{
1115        struct ceph_inode_info *ci = cap->ci;
1116        struct inode *inode = &ci->vfs_inode;
1117        u64 cap_id = cap->cap_id;
1118        int held, revoking, dropping, keep;
1119        u64 seq, issue_seq, mseq, time_warp_seq, follows;
1120        u64 size, max_size;
1121        struct timespec mtime, atime, ctime;
1122        int wake = 0;
1123        umode_t mode;
1124        kuid_t uid;
1125        kgid_t gid;
1126        struct ceph_mds_session *session;
1127        u64 xattr_version = 0;
1128        struct ceph_buffer *xattr_blob = NULL;
1129        int delayed = 0;
1130        int ret;
1131        bool inline_data;
1132
1133        held = cap->issued | cap->implemented;
1134        revoking = cap->implemented & ~cap->issued;
1135        retain &= ~revoking;
1136        dropping = cap->issued & ~retain;
1137
1138        dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1139             inode, cap, cap->session,
1140             ceph_cap_string(held), ceph_cap_string(held & retain),
1141             ceph_cap_string(revoking));
1142        BUG_ON((retain & CEPH_CAP_PIN) == 0);
1143
1144        session = cap->session;
1145
1146        /* don't release wanted unless we've waited a bit. */
1147        if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1148            time_before(jiffies, ci->i_hold_caps_min)) {
1149                dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1150                     ceph_cap_string(cap->issued),
1151                     ceph_cap_string(cap->issued & retain),
1152                     ceph_cap_string(cap->mds_wanted),
1153                     ceph_cap_string(want));
1154                want |= cap->mds_wanted;
1155                retain |= cap->issued;
1156                delayed = 1;
1157        }
1158        ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1159
1160        cap->issued &= retain;  /* drop bits we don't want */
1161        if (cap->implemented & ~cap->issued) {
1162                /*
1163                 * Wake up any waiters on wanted -> needed transition.
1164                 * This is due to the weird transition from buffered
1165                 * to sync IO... we need to flush dirty pages _before_
1166                 * allowing sync writes to avoid reordering.
1167                 */
1168                wake = 1;
1169        }
1170        cap->implemented &= cap->issued | used;
1171        cap->mds_wanted = want;
1172
1173        follows = flushing ? ci->i_head_snapc->seq : 0;
1174
1175        keep = cap->implemented;
1176        seq = cap->seq;
1177        issue_seq = cap->issue_seq;
1178        mseq = cap->mseq;
1179        size = inode->i_size;
1180        ci->i_reported_size = size;
1181        max_size = ci->i_wanted_max_size;
1182        ci->i_requested_max_size = max_size;
1183        mtime = inode->i_mtime;
1184        atime = inode->i_atime;
1185        ctime = inode->i_ctime;
1186        time_warp_seq = ci->i_time_warp_seq;
1187        uid = inode->i_uid;
1188        gid = inode->i_gid;
1189        mode = inode->i_mode;
1190
1191        if (flushing & CEPH_CAP_XATTR_EXCL) {
1192                __ceph_build_xattrs_blob(ci);
1193                xattr_blob = ci->i_xattrs.blob;
1194                xattr_version = ci->i_xattrs.version;
1195        }
1196
1197        inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
1198
1199        spin_unlock(&ci->i_ceph_lock);
1200
1201        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1202                op, keep, want, flushing, seq,
1203                flush_tid, oldest_flush_tid, issue_seq, mseq,
1204                size, max_size, &mtime, &atime, &ctime, time_warp_seq,
1205                uid, gid, mode, xattr_version, xattr_blob,
1206                follows, inline_data);
1207        if (ret < 0) {
1208                dout("error sending cap msg, must requeue %p\n", inode);
1209                delayed = 1;
1210        }
1211
1212        if (wake)
1213                wake_up_all(&ci->i_cap_wq);
1214
1215        return delayed;
1216}
1217
1218/*
1219 * When a snapshot is taken, clients accumulate dirty metadata on
1220 * inodes with capabilities in ceph_cap_snaps to describe the file
1221 * state at the time the snapshot was taken.  This must be flushed
1222 * asynchronously back to the MDS once sync writes complete and dirty
1223 * data is written out.
1224 *
1225 * Unless @kick is true, skip cap_snaps that were already sent to
1226 * the MDS (i.e., during this session).
1227 *
1228 * Called under i_ceph_lock.  Takes s_mutex as needed.
1229 */
1230void __ceph_flush_snaps(struct ceph_inode_info *ci,
1231                        struct ceph_mds_session **psession,
1232                        int kick)
1233                __releases(ci->i_ceph_lock)
1234                __acquires(ci->i_ceph_lock)
1235{
1236        struct inode *inode = &ci->vfs_inode;
1237        int mds;
1238        struct ceph_cap_snap *capsnap;
1239        u32 mseq;
1240        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1241        struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1242                                                    session->s_mutex */
1243        u64 next_follows = 0;  /* keep track of how far we've gotten through the
1244                             i_cap_snaps list, and skip these entries next time
1245                             around to avoid an infinite loop */
1246
1247        if (psession)
1248                session = *psession;
1249
1250        dout("__flush_snaps %p\n", inode);
1251retry:
1252        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1253                /* avoid an infiniute loop after retry */
1254                if (capsnap->follows < next_follows)
1255                        continue;
1256                /*
1257                 * we need to wait for sync writes to complete and for dirty
1258                 * pages to be written out.
1259                 */
1260                if (capsnap->dirty_pages || capsnap->writing)
1261                        break;
1262
1263                /* should be removed by ceph_try_drop_cap_snap() */
1264                BUG_ON(!capsnap->need_flush);
1265
1266                /* pick mds, take s_mutex */
1267                if (ci->i_auth_cap == NULL) {
1268                        dout("no auth cap (migrating?), doing nothing\n");
1269                        goto out;
1270                }
1271
1272                /* only flush each capsnap once */
1273                if (!kick && !list_empty(&capsnap->flushing_item)) {
1274                        dout("already flushed %p, skipping\n", capsnap);
1275                        continue;
1276                }
1277
1278                mds = ci->i_auth_cap->session->s_mds;
1279                mseq = ci->i_auth_cap->mseq;
1280
1281                if (session && session->s_mds != mds) {
1282                        dout("oops, wrong session %p mutex\n", session);
1283                        if (kick)
1284                                goto out;
1285
1286                        mutex_unlock(&session->s_mutex);
1287                        ceph_put_mds_session(session);
1288                        session = NULL;
1289                }
1290                if (!session) {
1291                        spin_unlock(&ci->i_ceph_lock);
1292                        mutex_lock(&mdsc->mutex);
1293                        session = __ceph_lookup_mds_session(mdsc, mds);
1294                        mutex_unlock(&mdsc->mutex);
1295                        if (session) {
1296                                dout("inverting session/ino locks on %p\n",
1297                                     session);
1298                                mutex_lock(&session->s_mutex);
1299                        }
1300                        /*
1301                         * if session == NULL, we raced against a cap
1302                         * deletion or migration.  retry, and we'll
1303                         * get a better @mds value next time.
1304                         */
1305                        spin_lock(&ci->i_ceph_lock);
1306                        goto retry;
1307                }
1308
1309                spin_lock(&mdsc->cap_dirty_lock);
1310                capsnap->flush_tid = ++mdsc->last_cap_flush_tid;
1311                spin_unlock(&mdsc->cap_dirty_lock);
1312
1313                atomic_inc(&capsnap->nref);
1314                if (list_empty(&capsnap->flushing_item))
1315                        list_add_tail(&capsnap->flushing_item,
1316                                      &session->s_cap_snaps_flushing);
1317                spin_unlock(&ci->i_ceph_lock);
1318
1319                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1320                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
1321                send_cap_msg(session, ceph_vino(inode).ino, 0,
1322                             CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1323                             capsnap->dirty, 0, capsnap->flush_tid, 0,
1324                             0, mseq, capsnap->size, 0,
1325                             &capsnap->mtime, &capsnap->atime,
1326                             &capsnap->ctime, capsnap->time_warp_seq,
1327                             capsnap->uid, capsnap->gid, capsnap->mode,
1328                             capsnap->xattr_version, capsnap->xattr_blob,
1329                             capsnap->follows, capsnap->inline_data);
1330
1331                next_follows = capsnap->follows + 1;
1332                ceph_put_cap_snap(capsnap);
1333
1334                spin_lock(&ci->i_ceph_lock);
1335                goto retry;
1336        }
1337
1338        /* we flushed them all; remove this inode from the queue */
1339        spin_lock(&mdsc->snap_flush_lock);
1340        list_del_init(&ci->i_snap_flush_item);
1341        spin_unlock(&mdsc->snap_flush_lock);
1342
1343out:
1344        if (psession)
1345                *psession = session;
1346        else if (session) {
1347                mutex_unlock(&session->s_mutex);
1348                ceph_put_mds_session(session);
1349        }
1350}
1351
1352static void ceph_flush_snaps(struct ceph_inode_info *ci)
1353{
1354        spin_lock(&ci->i_ceph_lock);
1355        __ceph_flush_snaps(ci, NULL, 0);
1356        spin_unlock(&ci->i_ceph_lock);
1357}
1358
1359/*
1360 * Mark caps dirty.  If inode is newly dirty, return the dirty flags.
1361 * Caller is then responsible for calling __mark_inode_dirty with the
1362 * returned flags value.
1363 */
1364int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1365                           struct ceph_cap_flush **pcf)
1366{
1367        struct ceph_mds_client *mdsc =
1368                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1369        struct inode *inode = &ci->vfs_inode;
1370        int was = ci->i_dirty_caps;
1371        int dirty = 0;
1372
1373        if (!ci->i_auth_cap) {
1374                pr_warn("__mark_dirty_caps %p %llx mask %s, "
1375                        "but no auth cap (session was closed?)\n",
1376                        inode, ceph_ino(inode), ceph_cap_string(mask));
1377                return 0;
1378        }
1379
1380        dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1381             ceph_cap_string(mask), ceph_cap_string(was),
1382             ceph_cap_string(was | mask));
1383        ci->i_dirty_caps |= mask;
1384        if (was == 0) {
1385                WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1386                swap(ci->i_prealloc_cap_flush, *pcf);
1387
1388                if (!ci->i_head_snapc) {
1389                        WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
1390                        ci->i_head_snapc = ceph_get_snap_context(
1391                                ci->i_snap_realm->cached_context);
1392                }
1393                dout(" inode %p now dirty snapc %p auth cap %p\n",
1394                     &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1395                BUG_ON(!list_empty(&ci->i_dirty_item));
1396                spin_lock(&mdsc->cap_dirty_lock);
1397                list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1398                spin_unlock(&mdsc->cap_dirty_lock);
1399                if (ci->i_flushing_caps == 0) {
1400                        ihold(inode);
1401                        dirty |= I_DIRTY_SYNC;
1402                }
1403        } else {
1404                WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
1405        }
1406        BUG_ON(list_empty(&ci->i_dirty_item));
1407        if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1408            (mask & CEPH_CAP_FILE_BUFFER))
1409                dirty |= I_DIRTY_DATASYNC;
1410        __cap_delay_requeue(mdsc, ci);
1411        return dirty;
1412}
1413
1414static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci,
1415                                        struct ceph_cap_flush *cf)
1416{
1417        struct rb_node **p = &ci->i_cap_flush_tree.rb_node;
1418        struct rb_node *parent = NULL;
1419        struct ceph_cap_flush *other = NULL;
1420
1421        while (*p) {
1422                parent = *p;
1423                other = rb_entry(parent, struct ceph_cap_flush, i_node);
1424
1425                if (cf->tid < other->tid)
1426                        p = &(*p)->rb_left;
1427                else if (cf->tid > other->tid)
1428                        p = &(*p)->rb_right;
1429                else
1430                        BUG();
1431        }
1432
1433        rb_link_node(&cf->i_node, parent, p);
1434        rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree);
1435}
1436
1437static void __add_cap_flushing_to_mdsc(struct ceph_mds_client *mdsc,
1438                                       struct ceph_cap_flush *cf)
1439{
1440        struct rb_node **p = &mdsc->cap_flush_tree.rb_node;
1441        struct rb_node *parent = NULL;
1442        struct ceph_cap_flush *other = NULL;
1443
1444        while (*p) {
1445                parent = *p;
1446                other = rb_entry(parent, struct ceph_cap_flush, g_node);
1447
1448                if (cf->tid < other->tid)
1449                        p = &(*p)->rb_left;
1450                else if (cf->tid > other->tid)
1451                        p = &(*p)->rb_right;
1452                else
1453                        BUG();
1454        }
1455
1456        rb_link_node(&cf->g_node, parent, p);
1457        rb_insert_color(&cf->g_node, &mdsc->cap_flush_tree);
1458}
1459
1460struct ceph_cap_flush *ceph_alloc_cap_flush(void)
1461{
1462        return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
1463}
1464
1465void ceph_free_cap_flush(struct ceph_cap_flush *cf)
1466{
1467        if (cf)
1468                kmem_cache_free(ceph_cap_flush_cachep, cf);
1469}
1470
1471static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
1472{
1473        struct rb_node *n = rb_first(&mdsc->cap_flush_tree);
1474        if (n) {
1475                struct ceph_cap_flush *cf =
1476                        rb_entry(n, struct ceph_cap_flush, g_node);
1477                return cf->tid;
1478        }
1479        return 0;
1480}
1481
1482/*
1483 * Add dirty inode to the flushing list.  Assigned a seq number so we
1484 * can wait for caps to flush without starving.
1485 *
1486 * Called under i_ceph_lock.
1487 */
1488static int __mark_caps_flushing(struct inode *inode,
1489                                struct ceph_mds_session *session,
1490                                u64 *flush_tid, u64 *oldest_flush_tid)
1491{
1492        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1493        struct ceph_inode_info *ci = ceph_inode(inode);
1494        struct ceph_cap_flush *cf = NULL;
1495        int flushing;
1496
1497        BUG_ON(ci->i_dirty_caps == 0);
1498        BUG_ON(list_empty(&ci->i_dirty_item));
1499        BUG_ON(!ci->i_prealloc_cap_flush);
1500
1501        flushing = ci->i_dirty_caps;
1502        dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1503             ceph_cap_string(flushing),
1504             ceph_cap_string(ci->i_flushing_caps),
1505             ceph_cap_string(ci->i_flushing_caps | flushing));
1506        ci->i_flushing_caps |= flushing;
1507        ci->i_dirty_caps = 0;
1508        dout(" inode %p now !dirty\n", inode);
1509
1510        swap(cf, ci->i_prealloc_cap_flush);
1511        cf->caps = flushing;
1512
1513        spin_lock(&mdsc->cap_dirty_lock);
1514        list_del_init(&ci->i_dirty_item);
1515
1516        cf->tid = ++mdsc->last_cap_flush_tid;
1517        __add_cap_flushing_to_mdsc(mdsc, cf);
1518        *oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1519
1520        if (list_empty(&ci->i_flushing_item)) {
1521                list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1522                mdsc->num_cap_flushing++;
1523                dout(" inode %p now flushing tid %llu\n", inode, cf->tid);
1524        } else {
1525                list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1526                dout(" inode %p now flushing (more) tid %llu\n",
1527                     inode, cf->tid);
1528        }
1529        spin_unlock(&mdsc->cap_dirty_lock);
1530
1531        __add_cap_flushing_to_inode(ci, cf);
1532
1533        *flush_tid = cf->tid;
1534        return flushing;
1535}
1536
1537/*
1538 * try to invalidate mapping pages without blocking.
1539 */
1540static int try_nonblocking_invalidate(struct inode *inode)
1541{
1542        struct ceph_inode_info *ci = ceph_inode(inode);
1543        u32 invalidating_gen = ci->i_rdcache_gen;
1544
1545        spin_unlock(&ci->i_ceph_lock);
1546        invalidate_mapping_pages(&inode->i_data, 0, -1);
1547        spin_lock(&ci->i_ceph_lock);
1548
1549        if (inode->i_data.nrpages == 0 &&
1550            invalidating_gen == ci->i_rdcache_gen) {
1551                /* success. */
1552                dout("try_nonblocking_invalidate %p success\n", inode);
1553                /* save any racing async invalidate some trouble */
1554                ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1555                return 0;
1556        }
1557        dout("try_nonblocking_invalidate %p failed\n", inode);
1558        return -1;
1559}
1560
1561/*
1562 * Swiss army knife function to examine currently used and wanted
1563 * versus held caps.  Release, flush, ack revoked caps to mds as
1564 * appropriate.
1565 *
1566 *  CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1567 *    cap release further.
1568 *  CHECK_CAPS_AUTHONLY - we should only check the auth cap
1569 *  CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1570 *    further delay.
1571 */
1572void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1573                     struct ceph_mds_session *session)
1574{
1575        struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1576        struct ceph_mds_client *mdsc = fsc->mdsc;
1577        struct inode *inode = &ci->vfs_inode;
1578        struct ceph_cap *cap;
1579        u64 flush_tid, oldest_flush_tid;
1580        int file_wanted, used, cap_used;
1581        int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
1582        int issued, implemented, want, retain, revoking, flushing = 0;
1583        int mds = -1;   /* keep track of how far we've gone through i_caps list
1584                           to avoid an infinite loop on retry */
1585        struct rb_node *p;
1586        int tried_invalidate = 0;
1587        int delayed = 0, sent = 0, force_requeue = 0, num;
1588        int queue_invalidate = 0;
1589        int is_delayed = flags & CHECK_CAPS_NODELAY;
1590
1591        /* if we are unmounting, flush any unused caps immediately. */
1592        if (mdsc->stopping)
1593                is_delayed = 1;
1594
1595        spin_lock(&ci->i_ceph_lock);
1596
1597        if (ci->i_ceph_flags & CEPH_I_FLUSH)
1598                flags |= CHECK_CAPS_FLUSH;
1599
1600        /* flush snaps first time around only */
1601        if (!list_empty(&ci->i_cap_snaps))
1602                __ceph_flush_snaps(ci, &session, 0);
1603        goto retry_locked;
1604retry:
1605        spin_lock(&ci->i_ceph_lock);
1606retry_locked:
1607        file_wanted = __ceph_caps_file_wanted(ci);
1608        used = __ceph_caps_used(ci);
1609        issued = __ceph_caps_issued(ci, &implemented);
1610        revoking = implemented & ~issued;
1611
1612        want = file_wanted;
1613        retain = file_wanted | used | CEPH_CAP_PIN;
1614        if (!mdsc->stopping && inode->i_nlink > 0) {
1615                if (file_wanted) {
1616                        retain |= CEPH_CAP_ANY;       /* be greedy */
1617                } else if (S_ISDIR(inode->i_mode) &&
1618                           (issued & CEPH_CAP_FILE_SHARED) &&
1619                            __ceph_dir_is_complete(ci)) {
1620                        /*
1621                         * If a directory is complete, we want to keep
1622                         * the exclusive cap. So that MDS does not end up
1623                         * revoking the shared cap on every create/unlink
1624                         * operation.
1625                         */
1626                        want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
1627                        retain |= want;
1628                } else {
1629
1630                        retain |= CEPH_CAP_ANY_SHARED;
1631                        /*
1632                         * keep RD only if we didn't have the file open RW,
1633                         * because then the mds would revoke it anyway to
1634                         * journal max_size=0.
1635                         */
1636                        if (ci->i_max_size == 0)
1637                                retain |= CEPH_CAP_ANY_RD;
1638                }
1639        }
1640
1641        dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1642             " issued %s revoking %s retain %s %s%s%s\n", inode,
1643             ceph_cap_string(file_wanted),
1644             ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1645             ceph_cap_string(ci->i_flushing_caps),
1646             ceph_cap_string(issued), ceph_cap_string(revoking),
1647             ceph_cap_string(retain),
1648             (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1649             (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1650             (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1651
1652        /*
1653         * If we no longer need to hold onto old our caps, and we may
1654         * have cached pages, but don't want them, then try to invalidate.
1655         * If we fail, it's because pages are locked.... try again later.
1656         */
1657        if ((!is_delayed || mdsc->stopping) &&
1658            !S_ISDIR(inode->i_mode) &&          /* ignore readdir cache */
1659            ci->i_wrbuffer_ref == 0 &&          /* no dirty pages... */
1660            inode->i_data.nrpages &&            /* have cached pages */
1661            (revoking & (CEPH_CAP_FILE_CACHE|
1662                         CEPH_CAP_FILE_LAZYIO)) && /*  or revoking cache */
1663            !tried_invalidate) {
1664                dout("check_caps trying to invalidate on %p\n", inode);
1665                if (try_nonblocking_invalidate(inode) < 0) {
1666                        if (revoking & (CEPH_CAP_FILE_CACHE|
1667                                        CEPH_CAP_FILE_LAZYIO)) {
1668                                dout("check_caps queuing invalidate\n");
1669                                queue_invalidate = 1;
1670                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
1671                        } else {
1672                                dout("check_caps failed to invalidate pages\n");
1673                                /* we failed to invalidate pages.  check these
1674                                   caps again later. */
1675                                force_requeue = 1;
1676                                __cap_set_timeouts(mdsc, ci);
1677                        }
1678                }
1679                tried_invalidate = 1;
1680                goto retry_locked;
1681        }
1682
1683        num = 0;
1684        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1685                cap = rb_entry(p, struct ceph_cap, ci_node);
1686                num++;
1687
1688                /* avoid looping forever */
1689                if (mds >= cap->mds ||
1690                    ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1691                        continue;
1692
1693                /* NOTE: no side-effects allowed, until we take s_mutex */
1694
1695                cap_used = used;
1696                if (ci->i_auth_cap && cap != ci->i_auth_cap)
1697                        cap_used &= ~ci->i_auth_cap->issued;
1698
1699                revoking = cap->implemented & ~cap->issued;
1700                dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1701                     cap->mds, cap, ceph_cap_string(cap->issued),
1702                     ceph_cap_string(cap_used),
1703                     ceph_cap_string(cap->implemented),
1704                     ceph_cap_string(revoking));
1705
1706                if (cap == ci->i_auth_cap &&
1707                    (cap->issued & CEPH_CAP_FILE_WR)) {
1708                        /* request larger max_size from MDS? */
1709                        if (ci->i_wanted_max_size > ci->i_max_size &&
1710                            ci->i_wanted_max_size > ci->i_requested_max_size) {
1711                                dout("requesting new max_size\n");
1712                                goto ack;
1713                        }
1714
1715                        /* approaching file_max? */
1716                        if ((inode->i_size << 1) >= ci->i_max_size &&
1717                            (ci->i_reported_size << 1) < ci->i_max_size) {
1718                                dout("i_size approaching max_size\n");
1719                                goto ack;
1720                        }
1721                }
1722                /* flush anything dirty? */
1723                if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1724                    ci->i_dirty_caps) {
1725                        dout("flushing dirty caps\n");
1726                        goto ack;
1727                }
1728
1729                /* completed revocation? going down and there are no caps? */
1730                if (revoking && (revoking & cap_used) == 0) {
1731                        dout("completed revocation of %s\n",
1732                             ceph_cap_string(cap->implemented & ~cap->issued));
1733                        goto ack;
1734                }
1735
1736                /* want more caps from mds? */
1737                if (want & ~(cap->mds_wanted | cap->issued))
1738                        goto ack;
1739
1740                /* things we might delay */
1741                if ((cap->issued & ~retain) == 0 &&
1742                    cap->mds_wanted == want)
1743                        continue;     /* nope, all good */
1744
1745                if (is_delayed)
1746                        goto ack;
1747
1748                /* delay? */
1749                if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1750                    time_before(jiffies, ci->i_hold_caps_max)) {
1751                        dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1752                             ceph_cap_string(cap->issued),
1753                             ceph_cap_string(cap->issued & retain),
1754                             ceph_cap_string(cap->mds_wanted),
1755                             ceph_cap_string(want));
1756                        delayed++;
1757                        continue;
1758                }
1759
1760ack:
1761                if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1762                        dout(" skipping %p I_NOFLUSH set\n", inode);
1763                        continue;
1764                }
1765
1766                if (session && session != cap->session) {
1767                        dout("oops, wrong session %p mutex\n", session);
1768                        mutex_unlock(&session->s_mutex);
1769                        session = NULL;
1770                }
1771                if (!session) {
1772                        session = cap->session;
1773                        if (mutex_trylock(&session->s_mutex) == 0) {
1774                                dout("inverting session/ino locks on %p\n",
1775                                     session);
1776                                spin_unlock(&ci->i_ceph_lock);
1777                                if (took_snap_rwsem) {
1778                                        up_read(&mdsc->snap_rwsem);
1779                                        took_snap_rwsem = 0;
1780                                }
1781                                mutex_lock(&session->s_mutex);
1782                                goto retry;
1783                        }
1784                }
1785                /* take snap_rwsem after session mutex */
1786                if (!took_snap_rwsem) {
1787                        if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1788                                dout("inverting snap/in locks on %p\n",
1789                                     inode);
1790                                spin_unlock(&ci->i_ceph_lock);
1791                                down_read(&mdsc->snap_rwsem);
1792                                took_snap_rwsem = 1;
1793                                goto retry;
1794                        }
1795                        took_snap_rwsem = 1;
1796                }
1797
1798                if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
1799                        flushing = __mark_caps_flushing(inode, session,
1800                                                        &flush_tid,
1801                                                        &oldest_flush_tid);
1802                } else {
1803                        flushing = 0;
1804                        flush_tid = 0;
1805                        spin_lock(&mdsc->cap_dirty_lock);
1806                        oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1807                        spin_unlock(&mdsc->cap_dirty_lock);
1808                }
1809
1810                mds = cap->mds;  /* remember mds, so we don't repeat */
1811                sent++;
1812
1813                /* __send_cap drops i_ceph_lock */
1814                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
1815                                      want, retain, flushing,
1816                                      flush_tid, oldest_flush_tid);
1817                goto retry; /* retake i_ceph_lock and restart our cap scan. */
1818        }
1819
1820        /*
1821         * Reschedule delayed caps release if we delayed anything,
1822         * otherwise cancel.
1823         */
1824        if (delayed && is_delayed)
1825                force_requeue = 1;   /* __send_cap delayed release; requeue */
1826        if (!delayed && !is_delayed)
1827                __cap_delay_cancel(mdsc, ci);
1828        else if (!is_delayed || force_requeue)
1829                __cap_delay_requeue(mdsc, ci);
1830
1831        spin_unlock(&ci->i_ceph_lock);
1832
1833        if (queue_invalidate)
1834                ceph_queue_invalidate(inode);
1835
1836        if (session)
1837                mutex_unlock(&session->s_mutex);
1838        if (took_snap_rwsem)
1839                up_read(&mdsc->snap_rwsem);
1840}
1841
1842/*
1843 * Try to flush dirty caps back to the auth mds.
1844 */
1845static int try_flush_caps(struct inode *inode, u64 *ptid)
1846{
1847        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1848        struct ceph_inode_info *ci = ceph_inode(inode);
1849        struct ceph_mds_session *session = NULL;
1850        int flushing = 0;
1851        u64 flush_tid = 0, oldest_flush_tid = 0;
1852
1853retry:
1854        spin_lock(&ci->i_ceph_lock);
1855        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1856                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1857                goto out;
1858        }
1859        if (ci->i_dirty_caps && ci->i_auth_cap) {
1860                struct ceph_cap *cap = ci->i_auth_cap;
1861                int used = __ceph_caps_used(ci);
1862                int want = __ceph_caps_wanted(ci);
1863                int delayed;
1864
1865                if (!session || session != cap->session) {
1866                        spin_unlock(&ci->i_ceph_lock);
1867                        if (session)
1868                                mutex_unlock(&session->s_mutex);
1869                        session = cap->session;
1870                        mutex_lock(&session->s_mutex);
1871                        goto retry;
1872                }
1873                if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1874                        goto out;
1875
1876                flushing = __mark_caps_flushing(inode, session, &flush_tid,
1877                                                &oldest_flush_tid);
1878
1879                /* __send_cap drops i_ceph_lock */
1880                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1881                                     (cap->issued | cap->implemented),
1882                                     flushing, flush_tid, oldest_flush_tid);
1883
1884                if (delayed) {
1885                        spin_lock(&ci->i_ceph_lock);
1886                        __cap_delay_requeue(mdsc, ci);
1887                        spin_unlock(&ci->i_ceph_lock);
1888                }
1889        } else {
1890                struct rb_node *n = rb_last(&ci->i_cap_flush_tree);
1891                if (n) {
1892                        struct ceph_cap_flush *cf =
1893                                rb_entry(n, struct ceph_cap_flush, i_node);
1894                        flush_tid = cf->tid;
1895                }
1896                flushing = ci->i_flushing_caps;
1897                spin_unlock(&ci->i_ceph_lock);
1898        }
1899out:
1900        if (session)
1901                mutex_unlock(&session->s_mutex);
1902
1903        *ptid = flush_tid;
1904        return flushing;
1905}
1906
1907/*
1908 * Return true if we've flushed caps through the given flush_tid.
1909 */
1910static int caps_are_flushed(struct inode *inode, u64 flush_tid)
1911{
1912        struct ceph_inode_info *ci = ceph_inode(inode);
1913        struct ceph_cap_flush *cf;
1914        struct rb_node *n;
1915        int ret = 1;
1916
1917        spin_lock(&ci->i_ceph_lock);
1918        n = rb_first(&ci->i_cap_flush_tree);
1919        if (n) {
1920                cf = rb_entry(n, struct ceph_cap_flush, i_node);
1921                if (cf->tid <= flush_tid)
1922                        ret = 0;
1923        }
1924        spin_unlock(&ci->i_ceph_lock);
1925        return ret;
1926}
1927
1928/*
1929 * Wait on any unsafe replies for the given inode.  First wait on the
1930 * newest request, and make that the upper bound.  Then, if there are
1931 * more requests, keep waiting on the oldest as long as it is still older
1932 * than the original request.
1933 */
1934static void sync_write_wait(struct inode *inode)
1935{
1936        struct ceph_inode_info *ci = ceph_inode(inode);
1937        struct list_head *head = &ci->i_unsafe_writes;
1938        struct ceph_osd_request *req;
1939        u64 last_tid;
1940
1941        if (!S_ISREG(inode->i_mode))
1942                return;
1943
1944        spin_lock(&ci->i_unsafe_lock);
1945        if (list_empty(head))
1946                goto out;
1947
1948        /* set upper bound as _last_ entry in chain */
1949        req = list_last_entry(head, struct ceph_osd_request,
1950                              r_unsafe_item);
1951        last_tid = req->r_tid;
1952
1953        do {
1954                ceph_osdc_get_request(req);
1955                spin_unlock(&ci->i_unsafe_lock);
1956                dout("sync_write_wait on tid %llu (until %llu)\n",
1957                     req->r_tid, last_tid);
1958                wait_for_completion(&req->r_safe_completion);
1959                spin_lock(&ci->i_unsafe_lock);
1960                ceph_osdc_put_request(req);
1961
1962                /*
1963                 * from here on look at first entry in chain, since we
1964                 * only want to wait for anything older than last_tid
1965                 */
1966                if (list_empty(head))
1967                        break;
1968                req = list_first_entry(head, struct ceph_osd_request,
1969                                       r_unsafe_item);
1970        } while (req->r_tid < last_tid);
1971out:
1972        spin_unlock(&ci->i_unsafe_lock);
1973}
1974
1975/*
1976 * wait for any unsafe requests to complete.
1977 */
1978static int unsafe_request_wait(struct inode *inode)
1979{
1980        struct ceph_inode_info *ci = ceph_inode(inode);
1981        struct ceph_mds_request *req1 = NULL, *req2 = NULL;
1982        int ret, err = 0;
1983
1984        spin_lock(&ci->i_unsafe_lock);
1985        if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) {
1986                req1 = list_last_entry(&ci->i_unsafe_dirops,
1987                                        struct ceph_mds_request,
1988                                        r_unsafe_dir_item);
1989                ceph_mdsc_get_request(req1);
1990        }
1991        if (!list_empty(&ci->i_unsafe_iops)) {
1992                req2 = list_last_entry(&ci->i_unsafe_iops,
1993                                        struct ceph_mds_request,
1994                                        r_unsafe_target_item);
1995                ceph_mdsc_get_request(req2);
1996        }
1997        spin_unlock(&ci->i_unsafe_lock);
1998
1999        dout("unsafe_requeset_wait %p wait on tid %llu %llu\n",
2000             inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
2001        if (req1) {
2002                ret = !wait_for_completion_timeout(&req1->r_safe_completion,
2003                                        ceph_timeout_jiffies(req1->r_timeout));
2004                if (ret)
2005                        err = -EIO;
2006                ceph_mdsc_put_request(req1);
2007        }
2008        if (req2) {
2009                ret = !wait_for_completion_timeout(&req2->r_safe_completion,
2010                                        ceph_timeout_jiffies(req2->r_timeout));
2011                if (ret)
2012                        err = -EIO;
2013                ceph_mdsc_put_request(req2);
2014        }
2015        return err;
2016}
2017
2018int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2019{
2020        struct inode *inode = file->f_mapping->host;
2021        struct ceph_inode_info *ci = ceph_inode(inode);
2022        u64 flush_tid;
2023        int ret;
2024        int dirty;
2025
2026        dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
2027        sync_write_wait(inode);
2028
2029        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
2030        if (ret < 0)
2031                goto out;
2032
2033        if (datasync)
2034                goto out;
2035
2036        inode_lock(inode);
2037
2038        dirty = try_flush_caps(inode, &flush_tid);
2039        dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
2040
2041        ret = unsafe_request_wait(inode);
2042
2043        /*
2044         * only wait on non-file metadata writeback (the mds
2045         * can recover size and mtime, so we don't need to
2046         * wait for that)
2047         */
2048        if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
2049                ret = wait_event_interruptible(ci->i_cap_wq,
2050                                        caps_are_flushed(inode, flush_tid));
2051        }
2052        inode_unlock(inode);
2053out:
2054        dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
2055        return ret;
2056}
2057
2058/*
2059 * Flush any dirty caps back to the mds.  If we aren't asked to wait,
2060 * queue inode for flush but don't do so immediately, because we can
2061 * get by with fewer MDS messages if we wait for data writeback to
2062 * complete first.
2063 */
2064int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
2065{
2066        struct ceph_inode_info *ci = ceph_inode(inode);
2067        u64 flush_tid;
2068        int err = 0;
2069        int dirty;
2070        int wait = wbc->sync_mode == WB_SYNC_ALL;
2071
2072        dout("write_inode %p wait=%d\n", inode, wait);
2073        if (wait) {
2074                dirty = try_flush_caps(inode, &flush_tid);
2075                if (dirty)
2076                        err = wait_event_interruptible(ci->i_cap_wq,
2077                                       caps_are_flushed(inode, flush_tid));
2078        } else {
2079                struct ceph_mds_client *mdsc =
2080                        ceph_sb_to_client(inode->i_sb)->mdsc;
2081
2082                spin_lock(&ci->i_ceph_lock);
2083                if (__ceph_caps_dirty(ci))
2084                        __cap_delay_requeue_front(mdsc, ci);
2085                spin_unlock(&ci->i_ceph_lock);
2086        }
2087        return err;
2088}
2089
2090/*
2091 * After a recovering MDS goes active, we need to resend any caps
2092 * we were flushing.
2093 *
2094 * Caller holds session->s_mutex.
2095 */
2096static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
2097                                   struct ceph_mds_session *session)
2098{
2099        struct ceph_cap_snap *capsnap;
2100
2101        dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
2102        list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
2103                            flushing_item) {
2104                struct ceph_inode_info *ci = capsnap->ci;
2105                struct inode *inode = &ci->vfs_inode;
2106                struct ceph_cap *cap;
2107
2108                spin_lock(&ci->i_ceph_lock);
2109                cap = ci->i_auth_cap;
2110                if (cap && cap->session == session) {
2111                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
2112                             cap, capsnap);
2113                        __ceph_flush_snaps(ci, &session, 1);
2114                } else {
2115                        pr_err("%p auth cap %p not mds%d ???\n", inode,
2116                               cap, session->s_mds);
2117                }
2118                spin_unlock(&ci->i_ceph_lock);
2119        }
2120}
2121
2122static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2123                                struct ceph_mds_session *session,
2124                                struct ceph_inode_info *ci)
2125{
2126        struct inode *inode = &ci->vfs_inode;
2127        struct ceph_cap *cap;
2128        struct ceph_cap_flush *cf;
2129        struct rb_node *n;
2130        int delayed = 0;
2131        u64 first_tid = 0;
2132        u64 oldest_flush_tid;
2133
2134        spin_lock(&mdsc->cap_dirty_lock);
2135        oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2136        spin_unlock(&mdsc->cap_dirty_lock);
2137
2138        while (true) {
2139                spin_lock(&ci->i_ceph_lock);
2140                cap = ci->i_auth_cap;
2141                if (!(cap && cap->session == session)) {
2142                        pr_err("%p auth cap %p not mds%d ???\n", inode,
2143                                        cap, session->s_mds);
2144                        spin_unlock(&ci->i_ceph_lock);
2145                        break;
2146                }
2147
2148                for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2149                        cf = rb_entry(n, struct ceph_cap_flush, i_node);
2150                        if (cf->tid >= first_tid)
2151                                break;
2152                }
2153                if (!n) {
2154                        spin_unlock(&ci->i_ceph_lock);
2155                        break;
2156                }
2157
2158                cf = rb_entry(n, struct ceph_cap_flush, i_node);
2159
2160                first_tid = cf->tid + 1;
2161
2162                dout("kick_flushing_caps %p cap %p tid %llu %s\n", inode,
2163                     cap, cf->tid, ceph_cap_string(cf->caps));
2164                delayed |= __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2165                                      __ceph_caps_used(ci),
2166                                      __ceph_caps_wanted(ci),
2167                                      cap->issued | cap->implemented,
2168                                      cf->caps, cf->tid, oldest_flush_tid);
2169        }
2170        return delayed;
2171}
2172
2173void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2174                                   struct ceph_mds_session *session)
2175{
2176        struct ceph_inode_info *ci;
2177        struct ceph_cap *cap;
2178
2179        dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2180        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2181                spin_lock(&ci->i_ceph_lock);
2182                cap = ci->i_auth_cap;
2183                if (!(cap && cap->session == session)) {
2184                        pr_err("%p auth cap %p not mds%d ???\n",
2185                                &ci->vfs_inode, cap, session->s_mds);
2186                        spin_unlock(&ci->i_ceph_lock);
2187                        continue;
2188                }
2189
2190
2191                /*
2192                 * if flushing caps were revoked, we re-send the cap flush
2193                 * in client reconnect stage. This guarantees MDS * processes
2194                 * the cap flush message before issuing the flushing caps to
2195                 * other client.
2196                 */
2197                if ((cap->issued & ci->i_flushing_caps) !=
2198                    ci->i_flushing_caps) {
2199                        spin_unlock(&ci->i_ceph_lock);
2200                        if (!__kick_flushing_caps(mdsc, session, ci))
2201                                continue;
2202                        spin_lock(&ci->i_ceph_lock);
2203                }
2204
2205                spin_unlock(&ci->i_ceph_lock);
2206        }
2207}
2208
2209void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2210                             struct ceph_mds_session *session)
2211{
2212        struct ceph_inode_info *ci;
2213
2214        kick_flushing_capsnaps(mdsc, session);
2215
2216        dout("kick_flushing_caps mds%d\n", session->s_mds);
2217        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2218                int delayed = __kick_flushing_caps(mdsc, session, ci);
2219                if (delayed) {
2220                        spin_lock(&ci->i_ceph_lock);
2221                        __cap_delay_requeue(mdsc, ci);
2222                        spin_unlock(&ci->i_ceph_lock);
2223                }
2224        }
2225}
2226
2227static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2228                                     struct ceph_mds_session *session,
2229                                     struct inode *inode)
2230{
2231        struct ceph_inode_info *ci = ceph_inode(inode);
2232        struct ceph_cap *cap;
2233
2234        spin_lock(&ci->i_ceph_lock);
2235        cap = ci->i_auth_cap;
2236        dout("kick_flushing_inode_caps %p flushing %s\n", inode,
2237             ceph_cap_string(ci->i_flushing_caps));
2238
2239        __ceph_flush_snaps(ci, &session, 1);
2240
2241        if (ci->i_flushing_caps) {
2242                int delayed;
2243
2244                spin_lock(&mdsc->cap_dirty_lock);
2245                list_move_tail(&ci->i_flushing_item,
2246                               &cap->session->s_cap_flushing);
2247                spin_unlock(&mdsc->cap_dirty_lock);
2248
2249                spin_unlock(&ci->i_ceph_lock);
2250
2251                delayed = __kick_flushing_caps(mdsc, session, ci);
2252                if (delayed) {
2253                        spin_lock(&ci->i_ceph_lock);
2254                        __cap_delay_requeue(mdsc, ci);
2255                        spin_unlock(&ci->i_ceph_lock);
2256                }
2257        } else {
2258                spin_unlock(&ci->i_ceph_lock);
2259        }
2260}
2261
2262
2263/*
2264 * Take references to capabilities we hold, so that we don't release
2265 * them to the MDS prematurely.
2266 *
2267 * Protected by i_ceph_lock.
2268 */
2269static void __take_cap_refs(struct ceph_inode_info *ci, int got,
2270                            bool snap_rwsem_locked)
2271{
2272        if (got & CEPH_CAP_PIN)
2273                ci->i_pin_ref++;
2274        if (got & CEPH_CAP_FILE_RD)
2275                ci->i_rd_ref++;
2276        if (got & CEPH_CAP_FILE_CACHE)
2277                ci->i_rdcache_ref++;
2278        if (got & CEPH_CAP_FILE_WR) {
2279                if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2280                        BUG_ON(!snap_rwsem_locked);
2281                        ci->i_head_snapc = ceph_get_snap_context(
2282                                        ci->i_snap_realm->cached_context);
2283                }
2284                ci->i_wr_ref++;
2285        }
2286        if (got & CEPH_CAP_FILE_BUFFER) {
2287                if (ci->i_wb_ref == 0)
2288                        ihold(&ci->vfs_inode);
2289                ci->i_wb_ref++;
2290                dout("__take_cap_refs %p wb %d -> %d (?)\n",
2291                     &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2292        }
2293}
2294
2295/*
2296 * Try to grab cap references.  Specify those refs we @want, and the
2297 * minimal set we @need.  Also include the larger offset we are writing
2298 * to (when applicable), and check against max_size here as well.
2299 * Note that caller is responsible for ensuring max_size increases are
2300 * requested from the MDS.
2301 */
2302static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2303                            loff_t endoff, bool nonblock, int *got, int *err)
2304{
2305        struct inode *inode = &ci->vfs_inode;
2306        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2307        int ret = 0;
2308        int have, implemented;
2309        int file_wanted;
2310        bool snap_rwsem_locked = false;
2311
2312        dout("get_cap_refs %p need %s want %s\n", inode,
2313             ceph_cap_string(need), ceph_cap_string(want));
2314
2315again:
2316        spin_lock(&ci->i_ceph_lock);
2317
2318        /* make sure file is actually open */
2319        file_wanted = __ceph_caps_file_wanted(ci);
2320        if ((file_wanted & need) == 0) {
2321                dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2322                     ceph_cap_string(need), ceph_cap_string(file_wanted));
2323                *err = -EBADF;
2324                ret = 1;
2325                goto out_unlock;
2326        }
2327
2328        /* finish pending truncate */
2329        while (ci->i_truncate_pending) {
2330                spin_unlock(&ci->i_ceph_lock);
2331                if (snap_rwsem_locked) {
2332                        up_read(&mdsc->snap_rwsem);
2333                        snap_rwsem_locked = false;
2334                }
2335                __ceph_do_pending_vmtruncate(inode);
2336                spin_lock(&ci->i_ceph_lock);
2337        }
2338
2339        have = __ceph_caps_issued(ci, &implemented);
2340
2341        if (have & need & CEPH_CAP_FILE_WR) {
2342                if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2343                        dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2344                             inode, endoff, ci->i_max_size);
2345                        if (endoff > ci->i_requested_max_size) {
2346                                *err = -EAGAIN;
2347                                ret = 1;
2348                        }
2349                        goto out_unlock;
2350                }
2351                /*
2352                 * If a sync write is in progress, we must wait, so that we
2353                 * can get a final snapshot value for size+mtime.
2354                 */
2355                if (__ceph_have_pending_cap_snap(ci)) {
2356                        dout("get_cap_refs %p cap_snap_pending\n", inode);
2357                        goto out_unlock;
2358                }
2359        }
2360
2361        if ((have & need) == need) {
2362                /*
2363                 * Look at (implemented & ~have & not) so that we keep waiting
2364                 * on transition from wanted -> needed caps.  This is needed
2365                 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2366                 * going before a prior buffered writeback happens.
2367                 */
2368                int not = want & ~(have & need);
2369                int revoking = implemented & ~have;
2370                dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2371                     inode, ceph_cap_string(have), ceph_cap_string(not),
2372                     ceph_cap_string(revoking));
2373                if ((revoking & not) == 0) {
2374                        if (!snap_rwsem_locked &&
2375                            !ci->i_head_snapc &&
2376                            (need & CEPH_CAP_FILE_WR)) {
2377                                if (!down_read_trylock(&mdsc->snap_rwsem)) {
2378                                        /*
2379                                         * we can not call down_read() when
2380                                         * task isn't in TASK_RUNNING state
2381                                         */
2382                                        if (nonblock) {
2383                                                *err = -EAGAIN;
2384                                                ret = 1;
2385                                                goto out_unlock;
2386                                        }
2387
2388                                        spin_unlock(&ci->i_ceph_lock);
2389                                        down_read(&mdsc->snap_rwsem);
2390                                        snap_rwsem_locked = true;
2391                                        goto again;
2392                                }
2393                                snap_rwsem_locked = true;
2394                        }
2395                        *got = need | (have & want);
2396                        __take_cap_refs(ci, *got, true);
2397                        ret = 1;
2398                }
2399        } else {
2400                int session_readonly = false;
2401                if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2402                        struct ceph_mds_session *s = ci->i_auth_cap->session;
2403                        spin_lock(&s->s_cap_lock);
2404                        session_readonly = s->s_readonly;
2405                        spin_unlock(&s->s_cap_lock);
2406                }
2407                if (session_readonly) {
2408                        dout("get_cap_refs %p needed %s but mds%d readonly\n",
2409                             inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2410                        *err = -EROFS;
2411                        ret = 1;
2412                        goto out_unlock;
2413                }
2414
2415                if (!__ceph_is_any_caps(ci) &&
2416                    ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2417                        dout("get_cap_refs %p forced umount\n", inode);
2418                        *err = -EIO;
2419                        ret = 1;
2420                        goto out_unlock;
2421                }
2422
2423                dout("get_cap_refs %p have %s needed %s\n", inode,
2424                     ceph_cap_string(have), ceph_cap_string(need));
2425        }
2426out_unlock:
2427        spin_unlock(&ci->i_ceph_lock);
2428        if (snap_rwsem_locked)
2429                up_read(&mdsc->snap_rwsem);
2430
2431        dout("get_cap_refs %p ret %d got %s\n", inode,
2432             ret, ceph_cap_string(*got));
2433        return ret;
2434}
2435
2436/*
2437 * Check the offset we are writing up to against our current
2438 * max_size.  If necessary, tell the MDS we want to write to
2439 * a larger offset.
2440 */
2441static void check_max_size(struct inode *inode, loff_t endoff)
2442{
2443        struct ceph_inode_info *ci = ceph_inode(inode);
2444        int check = 0;
2445
2446        /* do we need to explicitly request a larger max_size? */
2447        spin_lock(&ci->i_ceph_lock);
2448        if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2449                dout("write %p at large endoff %llu, req max_size\n",
2450                     inode, endoff);
2451                ci->i_wanted_max_size = endoff;
2452        }
2453        /* duplicate ceph_check_caps()'s logic */
2454        if (ci->i_auth_cap &&
2455            (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2456            ci->i_wanted_max_size > ci->i_max_size &&
2457            ci->i_wanted_max_size > ci->i_requested_max_size)
2458                check = 1;
2459        spin_unlock(&ci->i_ceph_lock);
2460        if (check)
2461                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2462}
2463
2464/*
2465 * Wait for caps, and take cap references.  If we can't get a WR cap
2466 * due to a small max_size, make sure we check_max_size (and possibly
2467 * ask the mds) so we don't get hung up indefinitely.
2468 */
2469int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2470                  loff_t endoff, int *got, struct page **pinned_page)
2471{
2472        int _got, ret, err = 0;
2473
2474        ret = ceph_pool_perm_check(ci, need);
2475        if (ret < 0)
2476                return ret;
2477
2478        while (true) {
2479                if (endoff > 0)
2480                        check_max_size(&ci->vfs_inode, endoff);
2481
2482                err = 0;
2483                _got = 0;
2484                ret = try_get_cap_refs(ci, need, want, endoff,
2485                                       false, &_got, &err);
2486                if (ret) {
2487                        if (err == -EAGAIN)
2488                                continue;
2489                        if (err < 0)
2490                                return err;
2491                } else {
2492                        ret = wait_event_interruptible(ci->i_cap_wq,
2493                                        try_get_cap_refs(ci, need, want, endoff,
2494                                                         true, &_got, &err));
2495                        if (err == -EAGAIN)
2496                                continue;
2497                        if (err < 0)
2498                                ret = err;
2499                        if (ret < 0)
2500                                return ret;
2501                }
2502
2503                if (ci->i_inline_version != CEPH_INLINE_NONE &&
2504                    (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2505                    i_size_read(&ci->vfs_inode) > 0) {
2506                        struct page *page =
2507                                find_get_page(ci->vfs_inode.i_mapping, 0);
2508                        if (page) {
2509                                if (PageUptodate(page)) {
2510                                        *pinned_page = page;
2511                                        break;
2512                                }
2513                                put_page(page);
2514                        }
2515                        /*
2516                         * drop cap refs first because getattr while
2517                         * holding * caps refs can cause deadlock.
2518                         */
2519                        ceph_put_cap_refs(ci, _got);
2520                        _got = 0;
2521
2522                        /*
2523                         * getattr request will bring inline data into
2524                         * page cache
2525                         */
2526                        ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2527                                                CEPH_STAT_CAP_INLINE_DATA,
2528                                                true);
2529                        if (ret < 0)
2530                                return ret;
2531                        continue;
2532                }
2533                break;
2534        }
2535
2536        *got = _got;
2537        return 0;
2538}
2539
2540/*
2541 * Take cap refs.  Caller must already know we hold at least one ref
2542 * on the caps in question or we don't know this is safe.
2543 */
2544void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2545{
2546        spin_lock(&ci->i_ceph_lock);
2547        __take_cap_refs(ci, caps, false);
2548        spin_unlock(&ci->i_ceph_lock);
2549}
2550
2551
2552/*
2553 * drop cap_snap that is not associated with any snapshot.
2554 * we don't need to send FLUSHSNAP message for it.
2555 */
2556static int ceph_try_drop_cap_snap(struct ceph_cap_snap *capsnap)
2557{
2558        if (!capsnap->need_flush &&
2559            !capsnap->writing && !capsnap->dirty_pages) {
2560
2561                dout("dropping cap_snap %p follows %llu\n",
2562                     capsnap, capsnap->follows);
2563                ceph_put_snap_context(capsnap->context);
2564                list_del(&capsnap->ci_item);
2565                list_del(&capsnap->flushing_item);
2566                ceph_put_cap_snap(capsnap);
2567                return 1;
2568        }
2569        return 0;
2570}
2571
2572/*
2573 * Release cap refs.
2574 *
2575 * If we released the last ref on any given cap, call ceph_check_caps
2576 * to release (or schedule a release).
2577 *
2578 * If we are releasing a WR cap (from a sync write), finalize any affected
2579 * cap_snap, and wake up any waiters.
2580 */
2581void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2582{
2583        struct inode *inode = &ci->vfs_inode;
2584        int last = 0, put = 0, flushsnaps = 0, wake = 0;
2585
2586        spin_lock(&ci->i_ceph_lock);
2587        if (had & CEPH_CAP_PIN)
2588                --ci->i_pin_ref;
2589        if (had & CEPH_CAP_FILE_RD)
2590                if (--ci->i_rd_ref == 0)
2591                        last++;
2592        if (had & CEPH_CAP_FILE_CACHE)
2593                if (--ci->i_rdcache_ref == 0)
2594                        last++;
2595        if (had & CEPH_CAP_FILE_BUFFER) {
2596                if (--ci->i_wb_ref == 0) {
2597                        last++;
2598                        put++;
2599                }
2600                dout("put_cap_refs %p wb %d -> %d (?)\n",
2601                     inode, ci->i_wb_ref+1, ci->i_wb_ref);
2602        }
2603        if (had & CEPH_CAP_FILE_WR)
2604                if (--ci->i_wr_ref == 0) {
2605                        last++;
2606                        if (__ceph_have_pending_cap_snap(ci)) {
2607                                struct ceph_cap_snap *capsnap =
2608                                        list_last_entry(&ci->i_cap_snaps,
2609                                                        struct ceph_cap_snap,
2610                                                        ci_item);
2611                                capsnap->writing = 0;
2612                                if (ceph_try_drop_cap_snap(capsnap))
2613                                        put++;
2614                                else if (__ceph_finish_cap_snap(ci, capsnap))
2615                                        flushsnaps = 1;
2616                                wake = 1;
2617                        }
2618                        if (ci->i_wrbuffer_ref_head == 0 &&
2619                            ci->i_dirty_caps == 0 &&
2620                            ci->i_flushing_caps == 0) {
2621                                BUG_ON(!ci->i_head_snapc);
2622                                ceph_put_snap_context(ci->i_head_snapc);
2623                                ci->i_head_snapc = NULL;
2624                        }
2625                        /* see comment in __ceph_remove_cap() */
2626                        if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
2627                                drop_inode_snap_realm(ci);
2628                }
2629        spin_unlock(&ci->i_ceph_lock);
2630
2631        dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2632             last ? " last" : "", put ? " put" : "");
2633
2634        if (last && !flushsnaps)
2635                ceph_check_caps(ci, 0, NULL);
2636        else if (flushsnaps)
2637                ceph_flush_snaps(ci);
2638        if (wake)
2639                wake_up_all(&ci->i_cap_wq);
2640        while (put-- > 0)
2641                iput(inode);
2642}
2643
2644/*
2645 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2646 * context.  Adjust per-snap dirty page accounting as appropriate.
2647 * Once all dirty data for a cap_snap is flushed, flush snapped file
2648 * metadata back to the MDS.  If we dropped the last ref, call
2649 * ceph_check_caps.
2650 */
2651void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2652                                struct ceph_snap_context *snapc)
2653{
2654        struct inode *inode = &ci->vfs_inode;
2655        int last = 0;
2656        int complete_capsnap = 0;
2657        int drop_capsnap = 0;
2658        int found = 0;
2659        struct ceph_cap_snap *capsnap = NULL;
2660
2661        spin_lock(&ci->i_ceph_lock);
2662        ci->i_wrbuffer_ref -= nr;
2663        last = !ci->i_wrbuffer_ref;
2664
2665        if (ci->i_head_snapc == snapc) {
2666                ci->i_wrbuffer_ref_head -= nr;
2667                if (ci->i_wrbuffer_ref_head == 0 &&
2668                    ci->i_wr_ref == 0 &&
2669                    ci->i_dirty_caps == 0 &&
2670                    ci->i_flushing_caps == 0) {
2671                        BUG_ON(!ci->i_head_snapc);
2672                        ceph_put_snap_context(ci->i_head_snapc);
2673                        ci->i_head_snapc = NULL;
2674                }
2675                dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2676                     inode,
2677                     ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2678                     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2679                     last ? " LAST" : "");
2680        } else {
2681                list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2682                        if (capsnap->context == snapc) {
2683                                found = 1;
2684                                break;
2685                        }
2686                }
2687                BUG_ON(!found);
2688                capsnap->dirty_pages -= nr;
2689                if (capsnap->dirty_pages == 0) {
2690                        complete_capsnap = 1;
2691                        drop_capsnap = ceph_try_drop_cap_snap(capsnap);
2692                }
2693                dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2694                     " snap %lld %d/%d -> %d/%d %s%s\n",
2695                     inode, capsnap, capsnap->context->seq,
2696                     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2697                     ci->i_wrbuffer_ref, capsnap->dirty_pages,
2698                     last ? " (wrbuffer last)" : "",
2699                     complete_capsnap ? " (complete capsnap)" : "");
2700        }
2701
2702        spin_unlock(&ci->i_ceph_lock);
2703
2704        if (last) {
2705                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2706                iput(inode);
2707        } else if (complete_capsnap) {
2708                ceph_flush_snaps(ci);
2709                wake_up_all(&ci->i_cap_wq);
2710        }
2711        if (drop_capsnap)
2712                iput(inode);
2713}
2714
2715/*
2716 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2717 */
2718static void invalidate_aliases(struct inode *inode)
2719{
2720        struct dentry *dn, *prev = NULL;
2721
2722        dout("invalidate_aliases inode %p\n", inode);
2723        d_prune_aliases(inode);
2724        /*
2725         * For non-directory inode, d_find_alias() only returns
2726         * hashed dentry. After calling d_invalidate(), the
2727         * dentry becomes unhashed.
2728         *
2729         * For directory inode, d_find_alias() can return
2730         * unhashed dentry. But directory inode should have
2731         * one alias at most.
2732         */
2733        while ((dn = d_find_alias(inode))) {
2734                if (dn == prev) {
2735                        dput(dn);
2736                        break;
2737                }
2738                d_invalidate(dn);
2739                if (prev)
2740                        dput(prev);
2741                prev = dn;
2742        }
2743        if (prev)
2744                dput(prev);
2745}
2746
2747/*
2748 * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
2749 * actually be a revocation if it specifies a smaller cap set.)
2750 *
2751 * caller holds s_mutex and i_ceph_lock, we drop both.
2752 */
2753static void handle_cap_grant(struct ceph_mds_client *mdsc,
2754                             struct inode *inode, struct ceph_mds_caps *grant,
2755                             u64 inline_version,
2756                             void *inline_data, int inline_len,
2757                             struct ceph_buffer *xattr_buf,
2758                             struct ceph_mds_session *session,
2759                             struct ceph_cap *cap, int issued,
2760                             u32 pool_ns_len)
2761        __releases(ci->i_ceph_lock)
2762        __releases(mdsc->snap_rwsem)
2763{
2764        struct ceph_inode_info *ci = ceph_inode(inode);
2765        int mds = session->s_mds;
2766        int seq = le32_to_cpu(grant->seq);
2767        int newcaps = le32_to_cpu(grant->caps);
2768        int used, wanted, dirty;
2769        u64 size = le64_to_cpu(grant->size);
2770        u64 max_size = le64_to_cpu(grant->max_size);
2771        struct timespec mtime, atime, ctime;
2772        int check_caps = 0;
2773        bool wake = false;
2774        bool writeback = false;
2775        bool queue_trunc = false;
2776        bool queue_invalidate = false;
2777        bool queue_revalidate = false;
2778        bool deleted_inode = false;
2779        bool fill_inline = false;
2780
2781        dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2782             inode, cap, mds, seq, ceph_cap_string(newcaps));
2783        dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2784                inode->i_size);
2785
2786
2787        /*
2788         * auth mds of the inode changed. we received the cap export message,
2789         * but still haven't received the cap import message. handle_cap_export
2790         * updated the new auth MDS' cap.
2791         *
2792         * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message
2793         * that was sent before the cap import message. So don't remove caps.
2794         */
2795        if (ceph_seq_cmp(seq, cap->seq) <= 0) {
2796                WARN_ON(cap != ci->i_auth_cap);
2797                WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id));
2798                seq = cap->seq;
2799                newcaps |= cap->issued;
2800        }
2801
2802        /*
2803         * If CACHE is being revoked, and we have no dirty buffers,
2804         * try to invalidate (once).  (If there are dirty buffers, we
2805         * will invalidate _after_ writeback.)
2806         */
2807        if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
2808            ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2809            (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2810            !ci->i_wrbuffer_ref) {
2811                if (try_nonblocking_invalidate(inode)) {
2812                        /* there were locked pages.. invalidate later
2813                           in a separate thread. */
2814                        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2815                                queue_invalidate = true;
2816                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
2817                        }
2818                }
2819
2820                ceph_fscache_invalidate(inode);
2821        }
2822
2823        /* side effects now are allowed */
2824        cap->cap_gen = session->s_cap_gen;
2825        cap->seq = seq;
2826
2827        __check_cap_issue(ci, cap, newcaps);
2828
2829        if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2830            (issued & CEPH_CAP_AUTH_EXCL) == 0) {
2831                inode->i_mode = le32_to_cpu(grant->mode);
2832                inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2833                inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2834                dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2835                     from_kuid(&init_user_ns, inode->i_uid),
2836                     from_kgid(&init_user_ns, inode->i_gid));
2837        }
2838
2839        if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2840            (issued & CEPH_CAP_LINK_EXCL) == 0) {
2841                set_nlink(inode, le32_to_cpu(grant->nlink));
2842                if (inode->i_nlink == 0 &&
2843                    (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
2844                        deleted_inode = true;
2845        }
2846
2847        if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2848                int len = le32_to_cpu(grant->xattr_len);
2849                u64 version = le64_to_cpu(grant->xattr_version);
2850
2851                if (version > ci->i_xattrs.version) {
2852                        dout(" got new xattrs v%llu on %p len %d\n",
2853                             version, inode, len);
2854                        if (ci->i_xattrs.blob)
2855                                ceph_buffer_put(ci->i_xattrs.blob);
2856                        ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2857                        ci->i_xattrs.version = version;
2858                        ceph_forget_all_cached_acls(inode);
2859                }
2860        }
2861
2862        /* Do we need to revalidate our fscache cookie. Don't bother on the
2863         * first cache cap as we already validate at cookie creation time. */
2864        if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
2865                queue_revalidate = true;
2866
2867        if (newcaps & CEPH_CAP_ANY_RD) {
2868                /* ctime/mtime/atime? */
2869                ceph_decode_timespec(&mtime, &grant->mtime);
2870                ceph_decode_timespec(&atime, &grant->atime);
2871                ceph_decode_timespec(&ctime, &grant->ctime);
2872                ceph_fill_file_time(inode, issued,
2873                                    le32_to_cpu(grant->time_warp_seq),
2874                                    &ctime, &mtime, &atime);
2875        }
2876
2877        if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
2878                /* file layout may have changed */
2879                ci->i_layout = grant->layout;
2880                ci->i_pool_ns_len = pool_ns_len;
2881
2882                /* size/truncate_seq? */
2883                queue_trunc = ceph_fill_file_size(inode, issued,
2884                                        le32_to_cpu(grant->truncate_seq),
2885                                        le64_to_cpu(grant->truncate_size),
2886                                        size);
2887                /* max size increase? */
2888                if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
2889                        dout("max_size %lld -> %llu\n",
2890                             ci->i_max_size, max_size);
2891                        ci->i_max_size = max_size;
2892                        if (max_size >= ci->i_wanted_max_size) {
2893                                ci->i_wanted_max_size = 0;  /* reset */
2894                                ci->i_requested_max_size = 0;
2895                        }
2896                        wake = true;
2897                }
2898        }
2899
2900        /* check cap bits */
2901        wanted = __ceph_caps_wanted(ci);
2902        used = __ceph_caps_used(ci);
2903        dirty = __ceph_caps_dirty(ci);
2904        dout(" my wanted = %s, used = %s, dirty %s\n",
2905             ceph_cap_string(wanted),
2906             ceph_cap_string(used),
2907             ceph_cap_string(dirty));
2908        if (wanted != le32_to_cpu(grant->wanted)) {
2909                dout("mds wanted %s -> %s\n",
2910                     ceph_cap_string(le32_to_cpu(grant->wanted)),
2911                     ceph_cap_string(wanted));
2912                /* imported cap may not have correct mds_wanted */
2913                if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
2914                        check_caps = 1;
2915        }
2916
2917        /* revocation, grant, or no-op? */
2918        if (cap->issued & ~newcaps) {
2919                int revoking = cap->issued & ~newcaps;
2920
2921                dout("revocation: %s -> %s (revoking %s)\n",
2922                     ceph_cap_string(cap->issued),
2923                     ceph_cap_string(newcaps),
2924                     ceph_cap_string(revoking));
2925                if (revoking & used & CEPH_CAP_FILE_BUFFER)
2926                        writeback = true;  /* initiate writeback; will delay ack */
2927                else if (revoking == CEPH_CAP_FILE_CACHE &&
2928                         (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2929                         queue_invalidate)
2930                        ; /* do nothing yet, invalidation will be queued */
2931                else if (cap == ci->i_auth_cap)
2932                        check_caps = 1; /* check auth cap only */
2933                else
2934                        check_caps = 2; /* check all caps */
2935                cap->issued = newcaps;
2936                cap->implemented |= newcaps;
2937        } else if (cap->issued == newcaps) {
2938                dout("caps unchanged: %s -> %s\n",
2939                     ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2940        } else {
2941                dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2942                     ceph_cap_string(newcaps));
2943                /* non-auth MDS is revoking the newly grant caps ? */
2944                if (cap == ci->i_auth_cap &&
2945                    __ceph_caps_revoking_other(ci, cap, newcaps))
2946                    check_caps = 2;
2947
2948                cap->issued = newcaps;
2949                cap->implemented |= newcaps; /* add bits only, to
2950                                              * avoid stepping on a
2951                                              * pending revocation */
2952                wake = true;
2953        }
2954        BUG_ON(cap->issued & ~cap->implemented);
2955
2956        if (inline_version > 0 && inline_version >= ci->i_inline_version) {
2957                ci->i_inline_version = inline_version;
2958                if (ci->i_inline_version != CEPH_INLINE_NONE &&
2959                    (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
2960                        fill_inline = true;
2961        }
2962
2963        spin_unlock(&ci->i_ceph_lock);
2964
2965        if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
2966                kick_flushing_inode_caps(mdsc, session, inode);
2967                up_read(&mdsc->snap_rwsem);
2968                if (newcaps & ~issued)
2969                        wake = true;
2970        }
2971
2972        if (fill_inline)
2973                ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
2974
2975        if (queue_trunc) {
2976                ceph_queue_vmtruncate(inode);
2977                ceph_queue_revalidate(inode);
2978        } else if (queue_revalidate)
2979                ceph_queue_revalidate(inode);
2980
2981        if (writeback)
2982                /*
2983                 * queue inode for writeback: we can't actually call
2984                 * filemap_write_and_wait, etc. from message handler
2985                 * context.
2986                 */
2987                ceph_queue_writeback(inode);
2988        if (queue_invalidate)
2989                ceph_queue_invalidate(inode);
2990        if (deleted_inode)
2991                invalidate_aliases(inode);
2992        if (wake)
2993                wake_up_all(&ci->i_cap_wq);
2994
2995        if (check_caps == 1)
2996                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2997                                session);
2998        else if (check_caps == 2)
2999                ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
3000        else
3001                mutex_unlock(&session->s_mutex);
3002}
3003
3004/*
3005 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
3006 * MDS has been safely committed.
3007 */
3008static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
3009                                 struct ceph_mds_caps *m,
3010                                 struct ceph_mds_session *session,
3011                                 struct ceph_cap *cap)
3012        __releases(ci->i_ceph_lock)
3013{
3014        struct ceph_inode_info *ci = ceph_inode(inode);
3015        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3016        struct ceph_cap_flush *cf;
3017        struct rb_node *n;
3018        LIST_HEAD(to_remove);
3019        unsigned seq = le32_to_cpu(m->seq);
3020        int dirty = le32_to_cpu(m->dirty);
3021        int cleaned = 0;
3022        int drop = 0;
3023
3024        n = rb_first(&ci->i_cap_flush_tree);
3025        while (n) {
3026                cf = rb_entry(n, struct ceph_cap_flush, i_node);
3027                n = rb_next(&cf->i_node);
3028                if (cf->tid == flush_tid)
3029                        cleaned = cf->caps;
3030                if (cf->tid <= flush_tid) {
3031                        rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
3032                        list_add_tail(&cf->list, &to_remove);
3033                } else {
3034                        cleaned &= ~cf->caps;
3035                        if (!cleaned)
3036                                break;
3037                }
3038        }
3039
3040        dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
3041             " flushing %s -> %s\n",
3042             inode, session->s_mds, seq, ceph_cap_string(dirty),
3043             ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
3044             ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3045
3046        if (list_empty(&to_remove) && !cleaned)
3047                goto out;
3048
3049        ci->i_flushing_caps &= ~cleaned;
3050
3051        spin_lock(&mdsc->cap_dirty_lock);
3052
3053        if (!list_empty(&to_remove)) {
3054                list_for_each_entry(cf, &to_remove, list)
3055                        rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
3056
3057                n = rb_first(&mdsc->cap_flush_tree);
3058                cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
3059                if (!cf || cf->tid > flush_tid)
3060                        wake_up_all(&mdsc->cap_flushing_wq);
3061        }
3062
3063        if (ci->i_flushing_caps == 0) {
3064                list_del_init(&ci->i_flushing_item);
3065                if (!list_empty(&session->s_cap_flushing))
3066                        dout(" mds%d still flushing cap on %p\n",
3067                             session->s_mds,
3068                             &list_entry(session->s_cap_flushing.next,
3069                                         struct ceph_inode_info,
3070                                         i_flushing_item)->vfs_inode);
3071                mdsc->num_cap_flushing--;
3072                dout(" inode %p now !flushing\n", inode);
3073
3074                if (ci->i_dirty_caps == 0) {
3075                        dout(" inode %p now clean\n", inode);
3076                        BUG_ON(!list_empty(&ci->i_dirty_item));
3077                        drop = 1;
3078                        if (ci->i_wr_ref == 0 &&
3079                            ci->i_wrbuffer_ref_head == 0) {
3080                                BUG_ON(!ci->i_head_snapc);
3081                                ceph_put_snap_context(ci->i_head_snapc);
3082                                ci->i_head_snapc = NULL;
3083                        }
3084                } else {
3085                        BUG_ON(list_empty(&ci->i_dirty_item));
3086                }
3087        }
3088        spin_unlock(&mdsc->cap_dirty_lock);
3089        wake_up_all(&ci->i_cap_wq);
3090
3091out:
3092        spin_unlock(&ci->i_ceph_lock);
3093
3094        while (!list_empty(&to_remove)) {
3095                cf = list_first_entry(&to_remove,
3096                                      struct ceph_cap_flush, list);
3097                list_del(&cf->list);
3098                ceph_free_cap_flush(cf);
3099        }
3100        if (drop)
3101                iput(inode);
3102}
3103
3104/*
3105 * Handle FLUSHSNAP_ACK.  MDS has flushed snap data to disk and we can
3106 * throw away our cap_snap.
3107 *
3108 * Caller hold s_mutex.
3109 */
3110static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
3111                                     struct ceph_mds_caps *m,
3112                                     struct ceph_mds_session *session)
3113{
3114        struct ceph_inode_info *ci = ceph_inode(inode);
3115        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3116        u64 follows = le64_to_cpu(m->snap_follows);
3117        struct ceph_cap_snap *capsnap;
3118        int drop = 0;
3119
3120        dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
3121             inode, ci, session->s_mds, follows);
3122
3123        spin_lock(&ci->i_ceph_lock);
3124        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
3125                if (capsnap->follows == follows) {
3126                        if (capsnap->flush_tid != flush_tid) {
3127                                dout(" cap_snap %p follows %lld tid %lld !="
3128                                     " %lld\n", capsnap, follows,
3129                                     flush_tid, capsnap->flush_tid);
3130                                break;
3131                        }
3132                        WARN_ON(capsnap->dirty_pages || capsnap->writing);
3133                        dout(" removing %p cap_snap %p follows %lld\n",
3134                             inode, capsnap, follows);
3135                        ceph_put_snap_context(capsnap->context);
3136                        list_del(&capsnap->ci_item);
3137                        list_del(&capsnap->flushing_item);
3138                        ceph_put_cap_snap(capsnap);
3139                        wake_up_all(&mdsc->cap_flushing_wq);
3140                        drop = 1;
3141                        break;
3142                } else {
3143                        dout(" skipping cap_snap %p follows %lld\n",
3144                             capsnap, capsnap->follows);
3145                }
3146        }
3147        spin_unlock(&ci->i_ceph_lock);
3148        if (drop)
3149                iput(inode);
3150}
3151
3152/*
3153 * Handle TRUNC from MDS, indicating file truncation.
3154 *
3155 * caller hold s_mutex.
3156 */
3157static void handle_cap_trunc(struct inode *inode,
3158                             struct ceph_mds_caps *trunc,
3159                             struct ceph_mds_session *session)
3160        __releases(ci->i_ceph_lock)
3161{
3162        struct ceph_inode_info *ci = ceph_inode(inode);
3163        int mds = session->s_mds;
3164        int seq = le32_to_cpu(trunc->seq);
3165        u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
3166        u64 truncate_size = le64_to_cpu(trunc->truncate_size);
3167        u64 size = le64_to_cpu(trunc->size);
3168        int implemented = 0;
3169        int dirty = __ceph_caps_dirty(ci);
3170        int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
3171        int queue_trunc = 0;
3172
3173        issued |= implemented | dirty;
3174
3175        dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
3176             inode, mds, seq, truncate_size, truncate_seq);
3177        queue_trunc = ceph_fill_file_size(inode, issued,
3178                                          truncate_seq, truncate_size, size);
3179        spin_unlock(&ci->i_ceph_lock);
3180
3181        if (queue_trunc) {
3182                ceph_queue_vmtruncate(inode);
3183                ceph_fscache_invalidate(inode);
3184        }
3185}
3186
3187/*
3188 * Handle EXPORT from MDS.  Cap is being migrated _from_ this mds to a
3189 * different one.  If we are the most recent migration we've seen (as
3190 * indicated by mseq), make note of the migrating cap bits for the
3191 * duration (until we see the corresponding IMPORT).
3192 *
3193 * caller holds s_mutex
3194 */
3195static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
3196                              struct ceph_mds_cap_peer *ph,
3197                              struct ceph_mds_session *session)
3198{
3199        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
3200        struct ceph_mds_session *tsession = NULL;
3201        struct ceph_cap *cap, *tcap, *new_cap = NULL;
3202        struct ceph_inode_info *ci = ceph_inode(inode);
3203        u64 t_cap_id;
3204        unsigned mseq = le32_to_cpu(ex->migrate_seq);
3205        unsigned t_seq, t_mseq;
3206        int target, issued;
3207        int mds = session->s_mds;
3208
3209        if (ph) {
3210                t_cap_id = le64_to_cpu(ph->cap_id);
3211                t_seq = le32_to_cpu(ph->seq);
3212                t_mseq = le32_to_cpu(ph->mseq);
3213                target = le32_to_cpu(ph->mds);
3214        } else {
3215                t_cap_id = t_seq = t_mseq = 0;
3216                target = -1;
3217        }
3218
3219        dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
3220             inode, ci, mds, mseq, target);
3221retry:
3222        spin_lock(&ci->i_ceph_lock);
3223        cap = __get_cap_for_mds(ci, mds);
3224        if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
3225                goto out_unlock;
3226
3227        if (target < 0) {
3228                __ceph_remove_cap(cap, false);
3229                goto out_unlock;
3230        }
3231
3232        /*
3233         * now we know we haven't received the cap import message yet
3234         * because the exported cap still exist.
3235         */
3236
3237        issued = cap->issued;
3238        WARN_ON(issued != cap->implemented);
3239
3240        tcap = __get_cap_for_mds(ci, target);
3241        if (tcap) {
3242                /* already have caps from the target */
3243                if (tcap->cap_id != t_cap_id ||
3244                    ceph_seq_cmp(tcap->seq, t_seq) < 0) {
3245                        dout(" updating import cap %p mds%d\n", tcap, target);
3246                        tcap->cap_id = t_cap_id;
3247                        tcap->seq = t_seq - 1;
3248                        tcap->issue_seq = t_seq - 1;
3249                        tcap->mseq = t_mseq;
3250                        tcap->issued |= issued;
3251                        tcap->implemented |= issued;
3252                        if (cap == ci->i_auth_cap)
3253                                ci->i_auth_cap = tcap;
3254                        if (ci->i_flushing_caps && ci->i_auth_cap == tcap) {
3255                                spin_lock(&mdsc->cap_dirty_lock);
3256                                list_move_tail(&ci->i_flushing_item,
3257                                               &tcap->session->s_cap_flushing);
3258                                spin_unlock(&mdsc->cap_dirty_lock);
3259                        }
3260                }
3261                __ceph_remove_cap(cap, false);
3262                goto out_unlock;
3263        } else if (tsession) {
3264                /* add placeholder for the export tagert */
3265                int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
3266                ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
3267                             t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
3268
3269                __ceph_remove_cap(cap, false);
3270                goto out_unlock;
3271        }
3272
3273        spin_unlock(&ci->i_ceph_lock);
3274        mutex_unlock(&session->s_mutex);
3275
3276        /* open target session */
3277        tsession = ceph_mdsc_open_export_target_session(mdsc, target);
3278        if (!IS_ERR(tsession)) {
3279                if (mds > target) {
3280                        mutex_lock(&session->s_mutex);
3281                        mutex_lock_nested(&tsession->s_mutex,
3282                                          SINGLE_DEPTH_NESTING);
3283                } else {
3284                        mutex_lock(&tsession->s_mutex);
3285                        mutex_lock_nested(&session->s_mutex,
3286                                          SINGLE_DEPTH_NESTING);
3287                }
3288                new_cap = ceph_get_cap(mdsc, NULL);
3289        } else {
3290                WARN_ON(1);
3291                tsession = NULL;
3292                target = -1;
3293        }
3294        goto retry;
3295
3296out_unlock:
3297        spin_unlock(&ci->i_ceph_lock);
3298        mutex_unlock(&session->s_mutex);
3299        if (tsession) {
3300                mutex_unlock(&tsession->s_mutex);
3301                ceph_put_mds_session(tsession);
3302        }
3303        if (new_cap)
3304                ceph_put_cap(mdsc, new_cap);
3305}
3306
3307/*
3308 * Handle cap IMPORT.
3309 *
3310 * caller holds s_mutex. acquires i_ceph_lock
3311 */
3312static void handle_cap_import(struct ceph_mds_client *mdsc,
3313                              struct inode *inode, struct ceph_mds_caps *im,
3314                              struct ceph_mds_cap_peer *ph,
3315                              struct ceph_mds_session *session,
3316                              struct ceph_cap **target_cap, int *old_issued)
3317        __acquires(ci->i_ceph_lock)
3318{
3319        struct ceph_inode_info *ci = ceph_inode(inode);
3320        struct ceph_cap *cap, *ocap, *new_cap = NULL;
3321        int mds = session->s_mds;
3322        int issued;
3323        unsigned caps = le32_to_cpu(im->caps);
3324        unsigned wanted = le32_to_cpu(im->wanted);
3325        unsigned seq = le32_to_cpu(im->seq);
3326        unsigned mseq = le32_to_cpu(im->migrate_seq);
3327        u64 realmino = le64_to_cpu(im->realm);
3328        u64 cap_id = le64_to_cpu(im->cap_id);
3329        u64 p_cap_id;
3330        int peer;
3331
3332        if (ph) {
3333                p_cap_id = le64_to_cpu(ph->cap_id);
3334                peer = le32_to_cpu(ph->mds);
3335        } else {
3336                p_cap_id = 0;
3337                peer = -1;
3338        }
3339
3340        dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
3341             inode, ci, mds, mseq, peer);
3342
3343retry:
3344        spin_lock(&ci->i_ceph_lock);
3345        cap = __get_cap_for_mds(ci, mds);
3346        if (!cap) {
3347                if (!new_cap) {
3348                        spin_unlock(&ci->i_ceph_lock);
3349                        new_cap = ceph_get_cap(mdsc, NULL);
3350                        goto retry;
3351                }
3352                cap = new_cap;
3353        } else {
3354                if (new_cap) {
3355                        ceph_put_cap(mdsc, new_cap);
3356                        new_cap = NULL;
3357                }
3358        }
3359
3360        __ceph_caps_issued(ci, &issued);
3361        issued |= __ceph_caps_dirty(ci);
3362
3363        ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
3364                     realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
3365
3366        ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
3367        if (ocap && ocap->cap_id == p_cap_id) {
3368                dout(" remove export cap %p mds%d flags %d\n",
3369                     ocap, peer, ph->flags);
3370                if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
3371                    (ocap->seq != le32_to_cpu(ph->seq) ||
3372                     ocap->mseq != le32_to_cpu(ph->mseq))) {
3373                        pr_err("handle_cap_import: mismatched seq/mseq: "
3374                               "ino (%llx.%llx) mds%d seq %d mseq %d "
3375                               "importer mds%d has peer seq %d mseq %d\n",
3376                               ceph_vinop(inode), peer, ocap->seq,
3377                               ocap->mseq, mds, le32_to_cpu(ph->seq),
3378                               le32_to_cpu(ph->mseq));
3379                }
3380                __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
3381        }
3382
3383        /* make sure we re-request max_size, if necessary */
3384        ci->i_wanted_max_size = 0;
3385        ci->i_requested_max_size = 0;
3386
3387        *old_issued = issued;
3388        *target_cap = cap;
3389}
3390
3391/*
3392 * Handle a caps message from the MDS.
3393 *
3394 * Identify the appropriate session, inode, and call the right handler
3395 * based on the cap op.
3396 */
3397void ceph_handle_caps(struct ceph_mds_session *session,
3398                      struct ceph_msg *msg)
3399{
3400        struct ceph_mds_client *mdsc = session->s_mdsc;
3401        struct super_block *sb = mdsc->fsc->sb;
3402        struct inode *inode;
3403        struct ceph_inode_info *ci;
3404        struct ceph_cap *cap;
3405        struct ceph_mds_caps *h;
3406        struct ceph_mds_cap_peer *peer = NULL;
3407        struct ceph_snap_realm *realm;
3408        int mds = session->s_mds;
3409        int op, issued;
3410        u32 seq, mseq;
3411        struct ceph_vino vino;
3412        u64 cap_id;
3413        u64 size, max_size;
3414        u64 tid;
3415        u64 inline_version = 0;
3416        void *inline_data = NULL;
3417        u32  inline_len = 0;
3418        void *snaptrace;
3419        size_t snaptrace_len;
3420        u32 pool_ns_len = 0;
3421        void *p, *end;
3422
3423        dout("handle_caps from mds%d\n", mds);
3424
3425        /* decode */
3426        end = msg->front.iov_base + msg->front.iov_len;
3427        tid = le64_to_cpu(msg->hdr.tid);
3428        if (msg->front.iov_len < sizeof(*h))
3429                goto bad;
3430        h = msg->front.iov_base;
3431        op = le32_to_cpu(h->op);
3432        vino.ino = le64_to_cpu(h->ino);
3433        vino.snap = CEPH_NOSNAP;
3434        cap_id = le64_to_cpu(h->cap_id);
3435        seq = le32_to_cpu(h->seq);
3436        mseq = le32_to_cpu(h->migrate_seq);
3437        size = le64_to_cpu(h->size);
3438        max_size = le64_to_cpu(h->max_size);
3439
3440        snaptrace = h + 1;
3441        snaptrace_len = le32_to_cpu(h->snap_trace_len);
3442        p = snaptrace + snaptrace_len;
3443
3444        if (le16_to_cpu(msg->hdr.version) >= 2) {
3445                u32 flock_len;
3446                ceph_decode_32_safe(&p, end, flock_len, bad);
3447                if (p + flock_len > end)
3448                        goto bad;
3449                p += flock_len;
3450        }
3451
3452        if (le16_to_cpu(msg->hdr.version) >= 3) {
3453                if (op == CEPH_CAP_OP_IMPORT) {
3454                        if (p + sizeof(*peer) > end)
3455                                goto bad;
3456                        peer = p;
3457                        p += sizeof(*peer);
3458                } else if (op == CEPH_CAP_OP_EXPORT) {
3459                        /* recorded in unused fields */
3460                        peer = (void *)&h->size;
3461                }
3462        }
3463
3464        if (le16_to_cpu(msg->hdr.version) >= 4) {
3465                ceph_decode_64_safe(&p, end, inline_version, bad);
3466                ceph_decode_32_safe(&p, end, inline_len, bad);
3467                if (p + inline_len > end)
3468                        goto bad;
3469                inline_data = p;
3470                p += inline_len;
3471        }
3472
3473        if (le16_to_cpu(msg->hdr.version) >= 8) {
3474                u64 flush_tid;
3475                u32 caller_uid, caller_gid;
3476                u32 osd_epoch_barrier;
3477                /* version >= 5 */
3478                ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
3479                /* version >= 6 */
3480                ceph_decode_64_safe(&p, end, flush_tid, bad);
3481                /* version >= 7 */
3482                ceph_decode_32_safe(&p, end, caller_uid, bad);
3483                ceph_decode_32_safe(&p, end, caller_gid, bad);
3484                /* version >= 8 */
3485                ceph_decode_32_safe(&p, end, pool_ns_len, bad);
3486        }
3487
3488        /* lookup ino */
3489        inode = ceph_find_inode(sb, vino);
3490        ci = ceph_inode(inode);
3491        dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
3492             vino.snap, inode);
3493
3494        mutex_lock(&session->s_mutex);
3495        session->s_seq++;
3496        dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
3497             (unsigned)seq);
3498
3499        if (!inode) {
3500                dout(" i don't have ino %llx\n", vino.ino);
3501
3502                if (op == CEPH_CAP_OP_IMPORT) {
3503                        cap = ceph_get_cap(mdsc, NULL);
3504                        cap->cap_ino = vino.ino;
3505                        cap->queue_release = 1;
3506                        cap->cap_id = cap_id;
3507                        cap->mseq = mseq;
3508                        cap->seq = seq;
3509                        spin_lock(&session->s_cap_lock);
3510                        list_add_tail(&cap->session_caps,
3511                                        &session->s_cap_releases);
3512                        session->s_num_cap_releases++;
3513                        spin_unlock(&session->s_cap_lock);
3514                }
3515                goto flush_cap_releases;
3516        }
3517
3518        /* these will work even if we don't have a cap yet */
3519        switch (op) {
3520        case CEPH_CAP_OP_FLUSHSNAP_ACK:
3521                handle_cap_flushsnap_ack(inode, tid, h, session);
3522                goto done;
3523
3524        case CEPH_CAP_OP_EXPORT:
3525                handle_cap_export(inode, h, peer, session);
3526                goto done_unlocked;
3527
3528        case CEPH_CAP_OP_IMPORT:
3529                realm = NULL;
3530                if (snaptrace_len) {
3531                        down_write(&mdsc->snap_rwsem);
3532                        ceph_update_snap_trace(mdsc, snaptrace,
3533                                               snaptrace + snaptrace_len,
3534                                               false, &realm);
3535                        downgrade_write(&mdsc->snap_rwsem);
3536                } else {
3537                        down_read(&mdsc->snap_rwsem);
3538                }
3539                handle_cap_import(mdsc, inode, h, peer, session,
3540                                  &cap, &issued);
3541                handle_cap_grant(mdsc, inode, h,
3542                                 inline_version, inline_data, inline_len,
3543                                 msg->middle, session, cap, issued,
3544                                 pool_ns_len);
3545                if (realm)
3546                        ceph_put_snap_realm(mdsc, realm);
3547                goto done_unlocked;
3548        }
3549
3550        /* the rest require a cap */
3551        spin_lock(&ci->i_ceph_lock);
3552        cap = __get_cap_for_mds(ceph_inode(inode), mds);
3553        if (!cap) {
3554                dout(" no cap on %p ino %llx.%llx from mds%d\n",
3555                     inode, ceph_ino(inode), ceph_snap(inode), mds);
3556                spin_unlock(&ci->i_ceph_lock);
3557                goto flush_cap_releases;
3558        }
3559
3560        /* note that each of these drops i_ceph_lock for us */
3561        switch (op) {
3562        case CEPH_CAP_OP_REVOKE:
3563        case CEPH_CAP_OP_GRANT:
3564                __ceph_caps_issued(ci, &issued);
3565                issued |= __ceph_caps_dirty(ci);
3566                handle_cap_grant(mdsc, inode, h,
3567                                 inline_version, inline_data, inline_len,
3568                                 msg->middle, session, cap, issued,
3569                                 pool_ns_len);
3570                goto done_unlocked;
3571
3572        case CEPH_CAP_OP_FLUSH_ACK:
3573                handle_cap_flush_ack(inode, tid, h, session, cap);
3574                break;
3575
3576        case CEPH_CAP_OP_TRUNC:
3577                handle_cap_trunc(inode, h, session);
3578                break;
3579
3580        default:
3581                spin_unlock(&ci->i_ceph_lock);
3582                pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
3583                       ceph_cap_op_name(op));
3584        }
3585
3586        goto done;
3587
3588flush_cap_releases:
3589        /*
3590         * send any cap release message to try to move things
3591         * along for the mds (who clearly thinks we still have this
3592         * cap).
3593         */
3594        ceph_send_cap_releases(mdsc, session);
3595
3596done:
3597        mutex_unlock(&session->s_mutex);
3598done_unlocked:
3599        iput(inode);
3600        return;
3601
3602bad:
3603        pr_err("ceph_handle_caps: corrupt message\n");
3604        ceph_msg_dump(msg);
3605        return;
3606}
3607
3608/*
3609 * Delayed work handler to process end of delayed cap release LRU list.
3610 */
3611void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
3612{
3613        struct ceph_inode_info *ci;
3614        int flags = CHECK_CAPS_NODELAY;
3615
3616        dout("check_delayed_caps\n");
3617        while (1) {
3618                spin_lock(&mdsc->cap_delay_lock);
3619                if (list_empty(&mdsc->cap_delay_list))
3620                        break;
3621                ci = list_first_entry(&mdsc->cap_delay_list,
3622                                      struct ceph_inode_info,
3623                                      i_cap_delay_list);
3624                if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3625                    time_before(jiffies, ci->i_hold_caps_max))
3626                        break;
3627                list_del_init(&ci->i_cap_delay_list);
3628                spin_unlock(&mdsc->cap_delay_lock);
3629                dout("check_delayed_caps on %p\n", &ci->vfs_inode);
3630                ceph_check_caps(ci, flags, NULL);
3631        }
3632        spin_unlock(&mdsc->cap_delay_lock);
3633}
3634
3635/*
3636 * Flush all dirty caps to the mds
3637 */
3638void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3639{
3640        struct ceph_inode_info *ci;
3641        struct inode *inode;
3642
3643        dout("flush_dirty_caps\n");
3644        spin_lock(&mdsc->cap_dirty_lock);
3645        while (!list_empty(&mdsc->cap_dirty)) {
3646                ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3647                                      i_dirty_item);
3648                inode = &ci->vfs_inode;
3649                ihold(inode);
3650                dout("flush_dirty_caps %p\n", inode);
3651                spin_unlock(&mdsc->cap_dirty_lock);
3652                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3653                iput(inode);
3654                spin_lock(&mdsc->cap_dirty_lock);
3655        }
3656        spin_unlock(&mdsc->cap_dirty_lock);
3657        dout("flush_dirty_caps done\n");
3658}
3659
3660/*
3661 * Drop open file reference.  If we were the last open file,
3662 * we may need to release capabilities to the MDS (or schedule
3663 * their delayed release).
3664 */
3665void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3666{
3667        struct inode *inode = &ci->vfs_inode;
3668        int last = 0;
3669
3670        spin_lock(&ci->i_ceph_lock);
3671        dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
3672             ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
3673        BUG_ON(ci->i_nr_by_mode[fmode] == 0);
3674        if (--ci->i_nr_by_mode[fmode] == 0)
3675                last++;
3676        spin_unlock(&ci->i_ceph_lock);
3677
3678        if (last && ci->i_vino.snap == CEPH_NOSNAP)
3679                ceph_check_caps(ci, 0, NULL);
3680}
3681
3682/*
3683 * Helpers for embedding cap and dentry lease releases into mds
3684 * requests.
3685 *
3686 * @force is used by dentry_release (below) to force inclusion of a
3687 * record for the directory inode, even when there aren't any caps to
3688 * drop.
3689 */
3690int ceph_encode_inode_release(void **p, struct inode *inode,
3691                              int mds, int drop, int unless, int force)
3692{
3693        struct ceph_inode_info *ci = ceph_inode(inode);
3694        struct ceph_cap *cap;
3695        struct ceph_mds_request_release *rel = *p;
3696        int used, dirty;
3697        int ret = 0;
3698
3699        spin_lock(&ci->i_ceph_lock);
3700        used = __ceph_caps_used(ci);
3701        dirty = __ceph_caps_dirty(ci);
3702
3703        dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3704             inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3705             ceph_cap_string(unless));
3706
3707        /* only drop unused, clean caps */
3708        drop &= ~(used | dirty);
3709
3710        cap = __get_cap_for_mds(ci, mds);
3711        if (cap && __cap_is_valid(cap)) {
3712                if (force ||
3713                    ((cap->issued & drop) &&
3714                     (cap->issued & unless) == 0)) {
3715                        if ((cap->issued & drop) &&
3716                            (cap->issued & unless) == 0) {
3717                                int wanted = __ceph_caps_wanted(ci);
3718                                if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3719                                        wanted |= cap->mds_wanted;
3720                                dout("encode_inode_release %p cap %p "
3721                                     "%s -> %s, wanted %s -> %s\n", inode, cap,
3722                                     ceph_cap_string(cap->issued),
3723                                     ceph_cap_string(cap->issued & ~drop),
3724                                     ceph_cap_string(cap->mds_wanted),
3725                                     ceph_cap_string(wanted));
3726
3727                                cap->issued &= ~drop;
3728                                cap->implemented &= ~drop;
3729                                cap->mds_wanted = wanted;
3730                        } else {
3731                                dout("encode_inode_release %p cap %p %s"
3732                                     " (force)\n", inode, cap,
3733                                     ceph_cap_string(cap->issued));
3734                        }
3735
3736                        rel->ino = cpu_to_le64(ceph_ino(inode));
3737                        rel->cap_id = cpu_to_le64(cap->cap_id);
3738                        rel->seq = cpu_to_le32(cap->seq);
3739                        rel->issue_seq = cpu_to_le32(cap->issue_seq);
3740                        rel->mseq = cpu_to_le32(cap->mseq);
3741                        rel->caps = cpu_to_le32(cap->implemented);
3742                        rel->wanted = cpu_to_le32(cap->mds_wanted);
3743                        rel->dname_len = 0;
3744                        rel->dname_seq = 0;
3745                        *p += sizeof(*rel);
3746                        ret = 1;
3747                } else {
3748                        dout("encode_inode_release %p cap %p %s\n",
3749                             inode, cap, ceph_cap_string(cap->issued));
3750                }
3751        }
3752        spin_unlock(&ci->i_ceph_lock);
3753        return ret;
3754}
3755
3756int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3757                               int mds, int drop, int unless)
3758{
3759        struct inode *dir = d_inode(dentry->d_parent);
3760        struct ceph_mds_request_release *rel = *p;
3761        struct ceph_dentry_info *di = ceph_dentry(dentry);
3762        int force = 0;
3763        int ret;
3764
3765        /*
3766         * force an record for the directory caps if we have a dentry lease.
3767         * this is racy (can't take i_ceph_lock and d_lock together), but it
3768         * doesn't have to be perfect; the mds will revoke anything we don't
3769         * release.
3770         */
3771        spin_lock(&dentry->d_lock);
3772        if (di->lease_session && di->lease_session->s_mds == mds)
3773                force = 1;
3774        spin_unlock(&dentry->d_lock);
3775
3776        ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3777
3778        spin_lock(&dentry->d_lock);
3779        if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3780                dout("encode_dentry_release %p mds%d seq %d\n",
3781                     dentry, mds, (int)di->lease_seq);
3782                rel->dname_len = cpu_to_le32(dentry->d_name.len);
3783                memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3784                *p += dentry->d_name.len;
3785                rel->dname_seq = cpu_to_le32(di->lease_seq);
3786                __ceph_mdsc_drop_dentry_lease(dentry);
3787        }
3788        spin_unlock(&dentry->d_lock);
3789        return ret;
3790}
3791