linux/fs/ceph/mds_client.c
<<
>>
Prefs
   1#include <linux/ceph/ceph_debug.h>
   2
   3#include <linux/fs.h>
   4#include <linux/wait.h>
   5#include <linux/slab.h>
   6#include <linux/gfp.h>
   7#include <linux/sched.h>
   8#include <linux/debugfs.h>
   9#include <linux/seq_file.h>
  10#include <linux/utsname.h>
  11#include <linux/ratelimit.h>
  12
  13#include "super.h"
  14#include "mds_client.h"
  15
  16#include <linux/ceph/ceph_features.h>
  17#include <linux/ceph/messenger.h>
  18#include <linux/ceph/decode.h>
  19#include <linux/ceph/pagelist.h>
  20#include <linux/ceph/auth.h>
  21#include <linux/ceph/debugfs.h>
  22
  23/*
  24 * A cluster of MDS (metadata server) daemons is responsible for
  25 * managing the file system namespace (the directory hierarchy and
  26 * inodes) and for coordinating shared access to storage.  Metadata is
  27 * partitioning hierarchically across a number of servers, and that
  28 * partition varies over time as the cluster adjusts the distribution
  29 * in order to balance load.
  30 *
  31 * The MDS client is primarily responsible to managing synchronous
  32 * metadata requests for operations like open, unlink, and so forth.
  33 * If there is a MDS failure, we find out about it when we (possibly
  34 * request and) receive a new MDS map, and can resubmit affected
  35 * requests.
  36 *
  37 * For the most part, though, we take advantage of a lossless
  38 * communications channel to the MDS, and do not need to worry about
  39 * timing out or resubmitting requests.
  40 *
  41 * We maintain a stateful "session" with each MDS we interact with.
  42 * Within each session, we sent periodic heartbeat messages to ensure
  43 * any capabilities or leases we have been issues remain valid.  If
  44 * the session times out and goes stale, our leases and capabilities
  45 * are no longer valid.
  46 */
  47
  48struct ceph_reconnect_state {
  49        int nr_caps;
  50        struct ceph_pagelist *pagelist;
  51        unsigned msg_version;
  52};
  53
  54static void __wake_requests(struct ceph_mds_client *mdsc,
  55                            struct list_head *head);
  56
  57static const struct ceph_connection_operations mds_con_ops;
  58
  59
  60/*
  61 * mds reply parsing
  62 */
  63
  64/*
  65 * parse individual inode info
  66 */
  67static int parse_reply_info_in(void **p, void *end,
  68                               struct ceph_mds_reply_info_in *info,
  69                               u64 features)
  70{
  71        int err = -EIO;
  72
  73        info->in = *p;
  74        *p += sizeof(struct ceph_mds_reply_inode) +
  75                sizeof(*info->in->fragtree.splits) *
  76                le32_to_cpu(info->in->fragtree.nsplits);
  77
  78        ceph_decode_32_safe(p, end, info->symlink_len, bad);
  79        ceph_decode_need(p, end, info->symlink_len, bad);
  80        info->symlink = *p;
  81        *p += info->symlink_len;
  82
  83        if (features & CEPH_FEATURE_DIRLAYOUTHASH)
  84                ceph_decode_copy_safe(p, end, &info->dir_layout,
  85                                      sizeof(info->dir_layout), bad);
  86        else
  87                memset(&info->dir_layout, 0, sizeof(info->dir_layout));
  88
  89        ceph_decode_32_safe(p, end, info->xattr_len, bad);
  90        ceph_decode_need(p, end, info->xattr_len, bad);
  91        info->xattr_data = *p;
  92        *p += info->xattr_len;
  93
  94        if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
  95                ceph_decode_64_safe(p, end, info->inline_version, bad);
  96                ceph_decode_32_safe(p, end, info->inline_len, bad);
  97                ceph_decode_need(p, end, info->inline_len, bad);
  98                info->inline_data = *p;
  99                *p += info->inline_len;
 100        } else
 101                info->inline_version = CEPH_INLINE_NONE;
 102
 103        info->pool_ns_len = 0;
 104        info->pool_ns_data = NULL;
 105        if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
 106                ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
 107                if (info->pool_ns_len > 0) {
 108                        ceph_decode_need(p, end, info->pool_ns_len, bad);
 109                        info->pool_ns_data = *p;
 110                        *p += info->pool_ns_len;
 111                }
 112        }
 113
 114        return 0;
 115bad:
 116        return err;
 117}
 118
 119/*
 120 * parse a normal reply, which may contain a (dir+)dentry and/or a
 121 * target inode.
 122 */
 123static int parse_reply_info_trace(void **p, void *end,
 124                                  struct ceph_mds_reply_info_parsed *info,
 125                                  u64 features)
 126{
 127        int err;
 128
 129        if (info->head->is_dentry) {
 130                err = parse_reply_info_in(p, end, &info->diri, features);
 131                if (err < 0)
 132                        goto out_bad;
 133
 134                if (unlikely(*p + sizeof(*info->dirfrag) > end))
 135                        goto bad;
 136                info->dirfrag = *p;
 137                *p += sizeof(*info->dirfrag) +
 138                        sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
 139                if (unlikely(*p > end))
 140                        goto bad;
 141
 142                ceph_decode_32_safe(p, end, info->dname_len, bad);
 143                ceph_decode_need(p, end, info->dname_len, bad);
 144                info->dname = *p;
 145                *p += info->dname_len;
 146                info->dlease = *p;
 147                *p += sizeof(*info->dlease);
 148        }
 149
 150        if (info->head->is_target) {
 151                err = parse_reply_info_in(p, end, &info->targeti, features);
 152                if (err < 0)
 153                        goto out_bad;
 154        }
 155
 156        if (unlikely(*p != end))
 157                goto bad;
 158        return 0;
 159
 160bad:
 161        err = -EIO;
 162out_bad:
 163        pr_err("problem parsing mds trace %d\n", err);
 164        return err;
 165}
 166
 167/*
 168 * parse readdir results
 169 */
 170static int parse_reply_info_dir(void **p, void *end,
 171                                struct ceph_mds_reply_info_parsed *info,
 172                                u64 features)
 173{
 174        u32 num, i = 0;
 175        int err;
 176
 177        info->dir_dir = *p;
 178        if (*p + sizeof(*info->dir_dir) > end)
 179                goto bad;
 180        *p += sizeof(*info->dir_dir) +
 181                sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
 182        if (*p > end)
 183                goto bad;
 184
 185        ceph_decode_need(p, end, sizeof(num) + 2, bad);
 186        num = ceph_decode_32(p);
 187        {
 188                u16 flags = ceph_decode_16(p);
 189                info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
 190                info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
 191                info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
 192        }
 193        if (num == 0)
 194                goto done;
 195
 196        BUG_ON(!info->dir_entries);
 197        if ((unsigned long)(info->dir_entries + num) >
 198            (unsigned long)info->dir_entries + info->dir_buf_size) {
 199                pr_err("dir contents are larger than expected\n");
 200                WARN_ON(1);
 201                goto bad;
 202        }
 203
 204        info->dir_nr = num;
 205        while (num) {
 206                struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
 207                /* dentry */
 208                ceph_decode_need(p, end, sizeof(u32)*2, bad);
 209                rde->name_len = ceph_decode_32(p);
 210                ceph_decode_need(p, end, rde->name_len, bad);
 211                rde->name = *p;
 212                *p += rde->name_len;
 213                dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
 214                rde->lease = *p;
 215                *p += sizeof(struct ceph_mds_reply_lease);
 216
 217                /* inode */
 218                err = parse_reply_info_in(p, end, &rde->inode, features);
 219                if (err < 0)
 220                        goto out_bad;
 221                /* ceph_readdir_prepopulate() will update it */
 222                rde->offset = 0;
 223                i++;
 224                num--;
 225        }
 226
 227done:
 228        if (*p != end)
 229                goto bad;
 230        return 0;
 231
 232bad:
 233        err = -EIO;
 234out_bad:
 235        pr_err("problem parsing dir contents %d\n", err);
 236        return err;
 237}
 238
 239/*
 240 * parse fcntl F_GETLK results
 241 */
 242static int parse_reply_info_filelock(void **p, void *end,
 243                                     struct ceph_mds_reply_info_parsed *info,
 244                                     u64 features)
 245{
 246        if (*p + sizeof(*info->filelock_reply) > end)
 247                goto bad;
 248
 249        info->filelock_reply = *p;
 250        *p += sizeof(*info->filelock_reply);
 251
 252        if (unlikely(*p != end))
 253                goto bad;
 254        return 0;
 255
 256bad:
 257        return -EIO;
 258}
 259
 260/*
 261 * parse create results
 262 */
 263static int parse_reply_info_create(void **p, void *end,
 264                                  struct ceph_mds_reply_info_parsed *info,
 265                                  u64 features)
 266{
 267        if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
 268                if (*p == end) {
 269                        info->has_create_ino = false;
 270                } else {
 271                        info->has_create_ino = true;
 272                        info->ino = ceph_decode_64(p);
 273                }
 274        }
 275
 276        if (unlikely(*p != end))
 277                goto bad;
 278        return 0;
 279
 280bad:
 281        return -EIO;
 282}
 283
 284/*
 285 * parse extra results
 286 */
 287static int parse_reply_info_extra(void **p, void *end,
 288                                  struct ceph_mds_reply_info_parsed *info,
 289                                  u64 features)
 290{
 291        u32 op = le32_to_cpu(info->head->op);
 292
 293        if (op == CEPH_MDS_OP_GETFILELOCK)
 294                return parse_reply_info_filelock(p, end, info, features);
 295        else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
 296                return parse_reply_info_dir(p, end, info, features);
 297        else if (op == CEPH_MDS_OP_CREATE)
 298                return parse_reply_info_create(p, end, info, features);
 299        else
 300                return -EIO;
 301}
 302
 303/*
 304 * parse entire mds reply
 305 */
 306static int parse_reply_info(struct ceph_msg *msg,
 307                            struct ceph_mds_reply_info_parsed *info,
 308                            u64 features)
 309{
 310        void *p, *end;
 311        u32 len;
 312        int err;
 313
 314        info->head = msg->front.iov_base;
 315        p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
 316        end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
 317
 318        /* trace */
 319        ceph_decode_32_safe(&p, end, len, bad);
 320        if (len > 0) {
 321                ceph_decode_need(&p, end, len, bad);
 322                err = parse_reply_info_trace(&p, p+len, info, features);
 323                if (err < 0)
 324                        goto out_bad;
 325        }
 326
 327        /* extra */
 328        ceph_decode_32_safe(&p, end, len, bad);
 329        if (len > 0) {
 330                ceph_decode_need(&p, end, len, bad);
 331                err = parse_reply_info_extra(&p, p+len, info, features);
 332                if (err < 0)
 333                        goto out_bad;
 334        }
 335
 336        /* snap blob */
 337        ceph_decode_32_safe(&p, end, len, bad);
 338        info->snapblob_len = len;
 339        info->snapblob = p;
 340        p += len;
 341
 342        if (p != end)
 343                goto bad;
 344        return 0;
 345
 346bad:
 347        err = -EIO;
 348out_bad:
 349        pr_err("mds parse_reply err %d\n", err);
 350        return err;
 351}
 352
 353static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
 354{
 355        if (!info->dir_entries)
 356                return;
 357        free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
 358}
 359
 360
 361/*
 362 * sessions
 363 */
 364const char *ceph_session_state_name(int s)
 365{
 366        switch (s) {
 367        case CEPH_MDS_SESSION_NEW: return "new";
 368        case CEPH_MDS_SESSION_OPENING: return "opening";
 369        case CEPH_MDS_SESSION_OPEN: return "open";
 370        case CEPH_MDS_SESSION_HUNG: return "hung";
 371        case CEPH_MDS_SESSION_CLOSING: return "closing";
 372        case CEPH_MDS_SESSION_RESTARTING: return "restarting";
 373        case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
 374        case CEPH_MDS_SESSION_REJECTED: return "rejected";
 375        default: return "???";
 376        }
 377}
 378
 379static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
 380{
 381        if (atomic_inc_not_zero(&s->s_ref)) {
 382                dout("mdsc get_session %p %d -> %d\n", s,
 383                     atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
 384                return s;
 385        } else {
 386                dout("mdsc get_session %p 0 -- FAIL", s);
 387                return NULL;
 388        }
 389}
 390
 391void ceph_put_mds_session(struct ceph_mds_session *s)
 392{
 393        dout("mdsc put_session %p %d -> %d\n", s,
 394             atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
 395        if (atomic_dec_and_test(&s->s_ref)) {
 396                if (s->s_auth.authorizer)
 397                        ceph_auth_destroy_authorizer(s->s_auth.authorizer);
 398                kfree(s);
 399        }
 400}
 401
 402/*
 403 * called under mdsc->mutex
 404 */
 405struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
 406                                                   int mds)
 407{
 408        struct ceph_mds_session *session;
 409
 410        if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
 411                return NULL;
 412        session = mdsc->sessions[mds];
 413        dout("lookup_mds_session %p %d\n", session,
 414             atomic_read(&session->s_ref));
 415        get_session(session);
 416        return session;
 417}
 418
 419static bool __have_session(struct ceph_mds_client *mdsc, int mds)
 420{
 421        if (mds >= mdsc->max_sessions)
 422                return false;
 423        return mdsc->sessions[mds];
 424}
 425
 426static int __verify_registered_session(struct ceph_mds_client *mdsc,
 427                                       struct ceph_mds_session *s)
 428{
 429        if (s->s_mds >= mdsc->max_sessions ||
 430            mdsc->sessions[s->s_mds] != s)
 431                return -ENOENT;
 432        return 0;
 433}
 434
 435/*
 436 * create+register a new session for given mds.
 437 * called under mdsc->mutex.
 438 */
 439static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
 440                                                 int mds)
 441{
 442        struct ceph_mds_session *s;
 443
 444        if (mds >= mdsc->mdsmap->m_max_mds)
 445                return ERR_PTR(-EINVAL);
 446
 447        s = kzalloc(sizeof(*s), GFP_NOFS);
 448        if (!s)
 449                return ERR_PTR(-ENOMEM);
 450        s->s_mdsc = mdsc;
 451        s->s_mds = mds;
 452        s->s_state = CEPH_MDS_SESSION_NEW;
 453        s->s_ttl = 0;
 454        s->s_seq = 0;
 455        mutex_init(&s->s_mutex);
 456
 457        ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
 458
 459        spin_lock_init(&s->s_gen_ttl_lock);
 460        s->s_cap_gen = 0;
 461        s->s_cap_ttl = jiffies - 1;
 462
 463        spin_lock_init(&s->s_cap_lock);
 464        s->s_renew_requested = 0;
 465        s->s_renew_seq = 0;
 466        INIT_LIST_HEAD(&s->s_caps);
 467        s->s_nr_caps = 0;
 468        s->s_trim_caps = 0;
 469        atomic_set(&s->s_ref, 1);
 470        INIT_LIST_HEAD(&s->s_waiting);
 471        INIT_LIST_HEAD(&s->s_unsafe);
 472        s->s_num_cap_releases = 0;
 473        s->s_cap_reconnect = 0;
 474        s->s_cap_iterator = NULL;
 475        INIT_LIST_HEAD(&s->s_cap_releases);
 476        INIT_LIST_HEAD(&s->s_cap_flushing);
 477
 478        dout("register_session mds%d\n", mds);
 479        if (mds >= mdsc->max_sessions) {
 480                int newmax = 1 << get_count_order(mds+1);
 481                struct ceph_mds_session **sa;
 482
 483                dout("register_session realloc to %d\n", newmax);
 484                sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
 485                if (sa == NULL)
 486                        goto fail_realloc;
 487                if (mdsc->sessions) {
 488                        memcpy(sa, mdsc->sessions,
 489                               mdsc->max_sessions * sizeof(void *));
 490                        kfree(mdsc->sessions);
 491                }
 492                mdsc->sessions = sa;
 493                mdsc->max_sessions = newmax;
 494        }
 495        mdsc->sessions[mds] = s;
 496        atomic_inc(&mdsc->num_sessions);
 497        atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
 498
 499        ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
 500                      ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
 501
 502        return s;
 503
 504fail_realloc:
 505        kfree(s);
 506        return ERR_PTR(-ENOMEM);
 507}
 508
 509/*
 510 * called under mdsc->mutex
 511 */
 512static void __unregister_session(struct ceph_mds_client *mdsc,
 513                               struct ceph_mds_session *s)
 514{
 515        dout("__unregister_session mds%d %p\n", s->s_mds, s);
 516        BUG_ON(mdsc->sessions[s->s_mds] != s);
 517        mdsc->sessions[s->s_mds] = NULL;
 518        ceph_con_close(&s->s_con);
 519        ceph_put_mds_session(s);
 520        atomic_dec(&mdsc->num_sessions);
 521}
 522
 523/*
 524 * drop session refs in request.
 525 *
 526 * should be last request ref, or hold mdsc->mutex
 527 */
 528static void put_request_session(struct ceph_mds_request *req)
 529{
 530        if (req->r_session) {
 531                ceph_put_mds_session(req->r_session);
 532                req->r_session = NULL;
 533        }
 534}
 535
 536void ceph_mdsc_release_request(struct kref *kref)
 537{
 538        struct ceph_mds_request *req = container_of(kref,
 539                                                    struct ceph_mds_request,
 540                                                    r_kref);
 541        destroy_reply_info(&req->r_reply_info);
 542        if (req->r_request)
 543                ceph_msg_put(req->r_request);
 544        if (req->r_reply)
 545                ceph_msg_put(req->r_reply);
 546        if (req->r_inode) {
 547                ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
 548                iput(req->r_inode);
 549        }
 550        if (req->r_locked_dir)
 551                ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
 552        iput(req->r_target_inode);
 553        if (req->r_dentry)
 554                dput(req->r_dentry);
 555        if (req->r_old_dentry)
 556                dput(req->r_old_dentry);
 557        if (req->r_old_dentry_dir) {
 558                /*
 559                 * track (and drop pins for) r_old_dentry_dir
 560                 * separately, since r_old_dentry's d_parent may have
 561                 * changed between the dir mutex being dropped and
 562                 * this request being freed.
 563                 */
 564                ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
 565                                  CEPH_CAP_PIN);
 566                iput(req->r_old_dentry_dir);
 567        }
 568        kfree(req->r_path1);
 569        kfree(req->r_path2);
 570        if (req->r_pagelist)
 571                ceph_pagelist_release(req->r_pagelist);
 572        put_request_session(req);
 573        ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
 574        kfree(req);
 575}
 576
 577DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
 578
 579/*
 580 * lookup session, bump ref if found.
 581 *
 582 * called under mdsc->mutex.
 583 */
 584static struct ceph_mds_request *
 585lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
 586{
 587        struct ceph_mds_request *req;
 588
 589        req = lookup_request(&mdsc->request_tree, tid);
 590        if (req)
 591                ceph_mdsc_get_request(req);
 592
 593        return req;
 594}
 595
 596/*
 597 * Register an in-flight request, and assign a tid.  Link to directory
 598 * are modifying (if any).
 599 *
 600 * Called under mdsc->mutex.
 601 */
 602static void __register_request(struct ceph_mds_client *mdsc,
 603                               struct ceph_mds_request *req,
 604                               struct inode *dir)
 605{
 606        req->r_tid = ++mdsc->last_tid;
 607        if (req->r_num_caps)
 608                ceph_reserve_caps(mdsc, &req->r_caps_reservation,
 609                                  req->r_num_caps);
 610        dout("__register_request %p tid %lld\n", req, req->r_tid);
 611        ceph_mdsc_get_request(req);
 612        insert_request(&mdsc->request_tree, req);
 613
 614        req->r_uid = current_fsuid();
 615        req->r_gid = current_fsgid();
 616
 617        if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
 618                mdsc->oldest_tid = req->r_tid;
 619
 620        if (dir) {
 621                ihold(dir);
 622                req->r_unsafe_dir = dir;
 623        }
 624}
 625
 626static void __unregister_request(struct ceph_mds_client *mdsc,
 627                                 struct ceph_mds_request *req)
 628{
 629        dout("__unregister_request %p tid %lld\n", req, req->r_tid);
 630
 631        if (req->r_tid == mdsc->oldest_tid) {
 632                struct rb_node *p = rb_next(&req->r_node);
 633                mdsc->oldest_tid = 0;
 634                while (p) {
 635                        struct ceph_mds_request *next_req =
 636                                rb_entry(p, struct ceph_mds_request, r_node);
 637                        if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
 638                                mdsc->oldest_tid = next_req->r_tid;
 639                                break;
 640                        }
 641                        p = rb_next(p);
 642                }
 643        }
 644
 645        erase_request(&mdsc->request_tree, req);
 646
 647        if (req->r_unsafe_dir && req->r_got_unsafe) {
 648                struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
 649                spin_lock(&ci->i_unsafe_lock);
 650                list_del_init(&req->r_unsafe_dir_item);
 651                spin_unlock(&ci->i_unsafe_lock);
 652        }
 653        if (req->r_target_inode && req->r_got_unsafe) {
 654                struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
 655                spin_lock(&ci->i_unsafe_lock);
 656                list_del_init(&req->r_unsafe_target_item);
 657                spin_unlock(&ci->i_unsafe_lock);
 658        }
 659
 660        if (req->r_unsafe_dir) {
 661                iput(req->r_unsafe_dir);
 662                req->r_unsafe_dir = NULL;
 663        }
 664
 665        complete_all(&req->r_safe_completion);
 666
 667        ceph_mdsc_put_request(req);
 668}
 669
 670/*
 671 * Choose mds to send request to next.  If there is a hint set in the
 672 * request (e.g., due to a prior forward hint from the mds), use that.
 673 * Otherwise, consult frag tree and/or caps to identify the
 674 * appropriate mds.  If all else fails, choose randomly.
 675 *
 676 * Called under mdsc->mutex.
 677 */
 678static struct dentry *get_nonsnap_parent(struct dentry *dentry)
 679{
 680        /*
 681         * we don't need to worry about protecting the d_parent access
 682         * here because we never renaming inside the snapped namespace
 683         * except to resplice to another snapdir, and either the old or new
 684         * result is a valid result.
 685         */
 686        while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
 687                dentry = dentry->d_parent;
 688        return dentry;
 689}
 690
 691static int __choose_mds(struct ceph_mds_client *mdsc,
 692                        struct ceph_mds_request *req)
 693{
 694        struct inode *inode;
 695        struct ceph_inode_info *ci;
 696        struct ceph_cap *cap;
 697        int mode = req->r_direct_mode;
 698        int mds = -1;
 699        u32 hash = req->r_direct_hash;
 700        bool is_hash = req->r_direct_is_hash;
 701
 702        /*
 703         * is there a specific mds we should try?  ignore hint if we have
 704         * no session and the mds is not up (active or recovering).
 705         */
 706        if (req->r_resend_mds >= 0 &&
 707            (__have_session(mdsc, req->r_resend_mds) ||
 708             ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
 709                dout("choose_mds using resend_mds mds%d\n",
 710                     req->r_resend_mds);
 711                return req->r_resend_mds;
 712        }
 713
 714        if (mode == USE_RANDOM_MDS)
 715                goto random;
 716
 717        inode = NULL;
 718        if (req->r_inode) {
 719                inode = req->r_inode;
 720        } else if (req->r_dentry) {
 721                /* ignore race with rename; old or new d_parent is okay */
 722                struct dentry *parent = req->r_dentry->d_parent;
 723                struct inode *dir = d_inode(parent);
 724
 725                if (dir->i_sb != mdsc->fsc->sb) {
 726                        /* not this fs! */
 727                        inode = d_inode(req->r_dentry);
 728                } else if (ceph_snap(dir) != CEPH_NOSNAP) {
 729                        /* direct snapped/virtual snapdir requests
 730                         * based on parent dir inode */
 731                        struct dentry *dn = get_nonsnap_parent(parent);
 732                        inode = d_inode(dn);
 733                        dout("__choose_mds using nonsnap parent %p\n", inode);
 734                } else {
 735                        /* dentry target */
 736                        inode = d_inode(req->r_dentry);
 737                        if (!inode || mode == USE_AUTH_MDS) {
 738                                /* dir + name */
 739                                inode = dir;
 740                                hash = ceph_dentry_hash(dir, req->r_dentry);
 741                                is_hash = true;
 742                        }
 743                }
 744        }
 745
 746        dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
 747             (int)hash, mode);
 748        if (!inode)
 749                goto random;
 750        ci = ceph_inode(inode);
 751
 752        if (is_hash && S_ISDIR(inode->i_mode)) {
 753                struct ceph_inode_frag frag;
 754                int found;
 755
 756                ceph_choose_frag(ci, hash, &frag, &found);
 757                if (found) {
 758                        if (mode == USE_ANY_MDS && frag.ndist > 0) {
 759                                u8 r;
 760
 761                                /* choose a random replica */
 762                                get_random_bytes(&r, 1);
 763                                r %= frag.ndist;
 764                                mds = frag.dist[r];
 765                                dout("choose_mds %p %llx.%llx "
 766                                     "frag %u mds%d (%d/%d)\n",
 767                                     inode, ceph_vinop(inode),
 768                                     frag.frag, mds,
 769                                     (int)r, frag.ndist);
 770                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
 771                                    CEPH_MDS_STATE_ACTIVE)
 772                                        return mds;
 773                        }
 774
 775                        /* since this file/dir wasn't known to be
 776                         * replicated, then we want to look for the
 777                         * authoritative mds. */
 778                        mode = USE_AUTH_MDS;
 779                        if (frag.mds >= 0) {
 780                                /* choose auth mds */
 781                                mds = frag.mds;
 782                                dout("choose_mds %p %llx.%llx "
 783                                     "frag %u mds%d (auth)\n",
 784                                     inode, ceph_vinop(inode), frag.frag, mds);
 785                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
 786                                    CEPH_MDS_STATE_ACTIVE)
 787                                        return mds;
 788                        }
 789                }
 790        }
 791
 792        spin_lock(&ci->i_ceph_lock);
 793        cap = NULL;
 794        if (mode == USE_AUTH_MDS)
 795                cap = ci->i_auth_cap;
 796        if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
 797                cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
 798        if (!cap) {
 799                spin_unlock(&ci->i_ceph_lock);
 800                goto random;
 801        }
 802        mds = cap->session->s_mds;
 803        dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
 804             inode, ceph_vinop(inode), mds,
 805             cap == ci->i_auth_cap ? "auth " : "", cap);
 806        spin_unlock(&ci->i_ceph_lock);
 807        return mds;
 808
 809random:
 810        mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
 811        dout("choose_mds chose random mds%d\n", mds);
 812        return mds;
 813}
 814
 815
 816/*
 817 * session messages
 818 */
 819static struct ceph_msg *create_session_msg(u32 op, u64 seq)
 820{
 821        struct ceph_msg *msg;
 822        struct ceph_mds_session_head *h;
 823
 824        msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
 825                           false);
 826        if (!msg) {
 827                pr_err("create_session_msg ENOMEM creating msg\n");
 828                return NULL;
 829        }
 830        h = msg->front.iov_base;
 831        h->op = cpu_to_le32(op);
 832        h->seq = cpu_to_le64(seq);
 833
 834        return msg;
 835}
 836
 837/*
 838 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
 839 * to include additional client metadata fields.
 840 */
 841static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
 842{
 843        struct ceph_msg *msg;
 844        struct ceph_mds_session_head *h;
 845        int i = -1;
 846        int metadata_bytes = 0;
 847        int metadata_key_count = 0;
 848        struct ceph_options *opt = mdsc->fsc->client->options;
 849        struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
 850        void *p;
 851
 852        const char* metadata[][2] = {
 853                {"hostname", utsname()->nodename},
 854                {"kernel_version", utsname()->release},
 855                {"entity_id", opt->name ? : ""},
 856                {"root", fsopt->server_path ? : "/"},
 857                {NULL, NULL}
 858        };
 859
 860        /* Calculate serialized length of metadata */
 861        metadata_bytes = 4;  /* map length */
 862        for (i = 0; metadata[i][0] != NULL; ++i) {
 863                metadata_bytes += 8 + strlen(metadata[i][0]) +
 864                        strlen(metadata[i][1]);
 865                metadata_key_count++;
 866        }
 867
 868        /* Allocate the message */
 869        msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
 870                           GFP_NOFS, false);
 871        if (!msg) {
 872                pr_err("create_session_msg ENOMEM creating msg\n");
 873                return NULL;
 874        }
 875        h = msg->front.iov_base;
 876        h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
 877        h->seq = cpu_to_le64(seq);
 878
 879        /*
 880         * Serialize client metadata into waiting buffer space, using
 881         * the format that userspace expects for map<string, string>
 882         *
 883         * ClientSession messages with metadata are v2
 884         */
 885        msg->hdr.version = cpu_to_le16(2);
 886        msg->hdr.compat_version = cpu_to_le16(1);
 887
 888        /* The write pointer, following the session_head structure */
 889        p = msg->front.iov_base + sizeof(*h);
 890
 891        /* Number of entries in the map */
 892        ceph_encode_32(&p, metadata_key_count);
 893
 894        /* Two length-prefixed strings for each entry in the map */
 895        for (i = 0; metadata[i][0] != NULL; ++i) {
 896                size_t const key_len = strlen(metadata[i][0]);
 897                size_t const val_len = strlen(metadata[i][1]);
 898
 899                ceph_encode_32(&p, key_len);
 900                memcpy(p, metadata[i][0], key_len);
 901                p += key_len;
 902                ceph_encode_32(&p, val_len);
 903                memcpy(p, metadata[i][1], val_len);
 904                p += val_len;
 905        }
 906
 907        return msg;
 908}
 909
 910/*
 911 * send session open request.
 912 *
 913 * called under mdsc->mutex
 914 */
 915static int __open_session(struct ceph_mds_client *mdsc,
 916                          struct ceph_mds_session *session)
 917{
 918        struct ceph_msg *msg;
 919        int mstate;
 920        int mds = session->s_mds;
 921
 922        /* wait for mds to go active? */
 923        mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
 924        dout("open_session to mds%d (%s)\n", mds,
 925             ceph_mds_state_name(mstate));
 926        session->s_state = CEPH_MDS_SESSION_OPENING;
 927        session->s_renew_requested = jiffies;
 928
 929        /* send connect message */
 930        msg = create_session_open_msg(mdsc, session->s_seq);
 931        if (!msg)
 932                return -ENOMEM;
 933        ceph_con_send(&session->s_con, msg);
 934        return 0;
 935}
 936
 937/*
 938 * open sessions for any export targets for the given mds
 939 *
 940 * called under mdsc->mutex
 941 */
 942static struct ceph_mds_session *
 943__open_export_target_session(struct ceph_mds_client *mdsc, int target)
 944{
 945        struct ceph_mds_session *session;
 946
 947        session = __ceph_lookup_mds_session(mdsc, target);
 948        if (!session) {
 949                session = register_session(mdsc, target);
 950                if (IS_ERR(session))
 951                        return session;
 952        }
 953        if (session->s_state == CEPH_MDS_SESSION_NEW ||
 954            session->s_state == CEPH_MDS_SESSION_CLOSING)
 955                __open_session(mdsc, session);
 956
 957        return session;
 958}
 959
 960struct ceph_mds_session *
 961ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
 962{
 963        struct ceph_mds_session *session;
 964
 965        dout("open_export_target_session to mds%d\n", target);
 966
 967        mutex_lock(&mdsc->mutex);
 968        session = __open_export_target_session(mdsc, target);
 969        mutex_unlock(&mdsc->mutex);
 970
 971        return session;
 972}
 973
 974static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
 975                                          struct ceph_mds_session *session)
 976{
 977        struct ceph_mds_info *mi;
 978        struct ceph_mds_session *ts;
 979        int i, mds = session->s_mds;
 980
 981        if (mds >= mdsc->mdsmap->m_max_mds)
 982                return;
 983
 984        mi = &mdsc->mdsmap->m_info[mds];
 985        dout("open_export_target_sessions for mds%d (%d targets)\n",
 986             session->s_mds, mi->num_export_targets);
 987
 988        for (i = 0; i < mi->num_export_targets; i++) {
 989                ts = __open_export_target_session(mdsc, mi->export_targets[i]);
 990                if (!IS_ERR(ts))
 991                        ceph_put_mds_session(ts);
 992        }
 993}
 994
 995void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
 996                                           struct ceph_mds_session *session)
 997{
 998        mutex_lock(&mdsc->mutex);
 999        __open_export_target_sessions(mdsc, session);
1000        mutex_unlock(&mdsc->mutex);
1001}
1002
1003/*
1004 * session caps
1005 */
1006
1007/* caller holds s_cap_lock, we drop it */
1008static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
1009                                 struct ceph_mds_session *session)
1010        __releases(session->s_cap_lock)
1011{
1012        LIST_HEAD(tmp_list);
1013        list_splice_init(&session->s_cap_releases, &tmp_list);
1014        session->s_num_cap_releases = 0;
1015        spin_unlock(&session->s_cap_lock);
1016
1017        dout("cleanup_cap_releases mds%d\n", session->s_mds);
1018        while (!list_empty(&tmp_list)) {
1019                struct ceph_cap *cap;
1020                /* zero out the in-progress message */
1021                cap = list_first_entry(&tmp_list,
1022                                        struct ceph_cap, session_caps);
1023                list_del(&cap->session_caps);
1024                ceph_put_cap(mdsc, cap);
1025        }
1026}
1027
1028static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1029                                     struct ceph_mds_session *session)
1030{
1031        struct ceph_mds_request *req;
1032        struct rb_node *p;
1033
1034        dout("cleanup_session_requests mds%d\n", session->s_mds);
1035        mutex_lock(&mdsc->mutex);
1036        while (!list_empty(&session->s_unsafe)) {
1037                req = list_first_entry(&session->s_unsafe,
1038                                       struct ceph_mds_request, r_unsafe_item);
1039                list_del_init(&req->r_unsafe_item);
1040                pr_warn_ratelimited(" dropping unsafe request %llu\n",
1041                                    req->r_tid);
1042                __unregister_request(mdsc, req);
1043        }
1044        /* zero r_attempts, so kick_requests() will re-send requests */
1045        p = rb_first(&mdsc->request_tree);
1046        while (p) {
1047                req = rb_entry(p, struct ceph_mds_request, r_node);
1048                p = rb_next(p);
1049                if (req->r_session &&
1050                    req->r_session->s_mds == session->s_mds)
1051                        req->r_attempts = 0;
1052        }
1053        mutex_unlock(&mdsc->mutex);
1054}
1055
1056/*
1057 * Helper to safely iterate over all caps associated with a session, with
1058 * special care taken to handle a racing __ceph_remove_cap().
1059 *
1060 * Caller must hold session s_mutex.
1061 */
1062static int iterate_session_caps(struct ceph_mds_session *session,
1063                                 int (*cb)(struct inode *, struct ceph_cap *,
1064                                            void *), void *arg)
1065{
1066        struct list_head *p;
1067        struct ceph_cap *cap;
1068        struct inode *inode, *last_inode = NULL;
1069        struct ceph_cap *old_cap = NULL;
1070        int ret;
1071
1072        dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1073        spin_lock(&session->s_cap_lock);
1074        p = session->s_caps.next;
1075        while (p != &session->s_caps) {
1076                cap = list_entry(p, struct ceph_cap, session_caps);
1077                inode = igrab(&cap->ci->vfs_inode);
1078                if (!inode) {
1079                        p = p->next;
1080                        continue;
1081                }
1082                session->s_cap_iterator = cap;
1083                spin_unlock(&session->s_cap_lock);
1084
1085                if (last_inode) {
1086                        iput(last_inode);
1087                        last_inode = NULL;
1088                }
1089                if (old_cap) {
1090                        ceph_put_cap(session->s_mdsc, old_cap);
1091                        old_cap = NULL;
1092                }
1093
1094                ret = cb(inode, cap, arg);
1095                last_inode = inode;
1096
1097                spin_lock(&session->s_cap_lock);
1098                p = p->next;
1099                if (cap->ci == NULL) {
1100                        dout("iterate_session_caps  finishing cap %p removal\n",
1101                             cap);
1102                        BUG_ON(cap->session != session);
1103                        cap->session = NULL;
1104                        list_del_init(&cap->session_caps);
1105                        session->s_nr_caps--;
1106                        if (cap->queue_release) {
1107                                list_add_tail(&cap->session_caps,
1108                                              &session->s_cap_releases);
1109                                session->s_num_cap_releases++;
1110                        } else {
1111                                old_cap = cap;  /* put_cap it w/o locks held */
1112                        }
1113                }
1114                if (ret < 0)
1115                        goto out;
1116        }
1117        ret = 0;
1118out:
1119        session->s_cap_iterator = NULL;
1120        spin_unlock(&session->s_cap_lock);
1121
1122        iput(last_inode);
1123        if (old_cap)
1124                ceph_put_cap(session->s_mdsc, old_cap);
1125
1126        return ret;
1127}
1128
1129static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1130                                  void *arg)
1131{
1132        struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1133        struct ceph_inode_info *ci = ceph_inode(inode);
1134        LIST_HEAD(to_remove);
1135        bool drop = false;
1136        bool invalidate = false;
1137
1138        dout("removing cap %p, ci is %p, inode is %p\n",
1139             cap, ci, &ci->vfs_inode);
1140        spin_lock(&ci->i_ceph_lock);
1141        __ceph_remove_cap(cap, false);
1142        if (!ci->i_auth_cap) {
1143                struct ceph_cap_flush *cf;
1144                struct ceph_mds_client *mdsc = fsc->mdsc;
1145
1146                ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
1147
1148                if (ci->i_wrbuffer_ref > 0 &&
1149                    ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
1150                        invalidate = true;
1151
1152                while (!list_empty(&ci->i_cap_flush_list)) {
1153                        cf = list_first_entry(&ci->i_cap_flush_list,
1154                                              struct ceph_cap_flush, i_list);
1155                        list_move(&cf->i_list, &to_remove);
1156                }
1157
1158                spin_lock(&mdsc->cap_dirty_lock);
1159
1160                list_for_each_entry(cf, &to_remove, i_list)
1161                        list_del(&cf->g_list);
1162
1163                if (!list_empty(&ci->i_dirty_item)) {
1164                        pr_warn_ratelimited(
1165                                " dropping dirty %s state for %p %lld\n",
1166                                ceph_cap_string(ci->i_dirty_caps),
1167                                inode, ceph_ino(inode));
1168                        ci->i_dirty_caps = 0;
1169                        list_del_init(&ci->i_dirty_item);
1170                        drop = true;
1171                }
1172                if (!list_empty(&ci->i_flushing_item)) {
1173                        pr_warn_ratelimited(
1174                                " dropping dirty+flushing %s state for %p %lld\n",
1175                                ceph_cap_string(ci->i_flushing_caps),
1176                                inode, ceph_ino(inode));
1177                        ci->i_flushing_caps = 0;
1178                        list_del_init(&ci->i_flushing_item);
1179                        mdsc->num_cap_flushing--;
1180                        drop = true;
1181                }
1182                spin_unlock(&mdsc->cap_dirty_lock);
1183
1184                if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1185                        list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1186                        ci->i_prealloc_cap_flush = NULL;
1187                }
1188        }
1189        spin_unlock(&ci->i_ceph_lock);
1190        while (!list_empty(&to_remove)) {
1191                struct ceph_cap_flush *cf;
1192                cf = list_first_entry(&to_remove,
1193                                      struct ceph_cap_flush, i_list);
1194                list_del(&cf->i_list);
1195                ceph_free_cap_flush(cf);
1196        }
1197
1198        wake_up_all(&ci->i_cap_wq);
1199        if (invalidate)
1200                ceph_queue_invalidate(inode);
1201        if (drop)
1202                iput(inode);
1203        return 0;
1204}
1205
1206/*
1207 * caller must hold session s_mutex
1208 */
1209static void remove_session_caps(struct ceph_mds_session *session)
1210{
1211        struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1212        struct super_block *sb = fsc->sb;
1213        dout("remove_session_caps on %p\n", session);
1214        iterate_session_caps(session, remove_session_caps_cb, fsc);
1215
1216        wake_up_all(&fsc->mdsc->cap_flushing_wq);
1217
1218        spin_lock(&session->s_cap_lock);
1219        if (session->s_nr_caps > 0) {
1220                struct inode *inode;
1221                struct ceph_cap *cap, *prev = NULL;
1222                struct ceph_vino vino;
1223                /*
1224                 * iterate_session_caps() skips inodes that are being
1225                 * deleted, we need to wait until deletions are complete.
1226                 * __wait_on_freeing_inode() is designed for the job,
1227                 * but it is not exported, so use lookup inode function
1228                 * to access it.
1229                 */
1230                while (!list_empty(&session->s_caps)) {
1231                        cap = list_entry(session->s_caps.next,
1232                                         struct ceph_cap, session_caps);
1233                        if (cap == prev)
1234                                break;
1235                        prev = cap;
1236                        vino = cap->ci->i_vino;
1237                        spin_unlock(&session->s_cap_lock);
1238
1239                        inode = ceph_find_inode(sb, vino);
1240                        iput(inode);
1241
1242                        spin_lock(&session->s_cap_lock);
1243                }
1244        }
1245
1246        // drop cap expires and unlock s_cap_lock
1247        cleanup_cap_releases(session->s_mdsc, session);
1248
1249        BUG_ON(session->s_nr_caps > 0);
1250        BUG_ON(!list_empty(&session->s_cap_flushing));
1251}
1252
1253/*
1254 * wake up any threads waiting on this session's caps.  if the cap is
1255 * old (didn't get renewed on the client reconnect), remove it now.
1256 *
1257 * caller must hold s_mutex.
1258 */
1259static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1260                              void *arg)
1261{
1262        struct ceph_inode_info *ci = ceph_inode(inode);
1263
1264        if (arg) {
1265                spin_lock(&ci->i_ceph_lock);
1266                ci->i_wanted_max_size = 0;
1267                ci->i_requested_max_size = 0;
1268                spin_unlock(&ci->i_ceph_lock);
1269        }
1270        wake_up_all(&ci->i_cap_wq);
1271        return 0;
1272}
1273
1274static void wake_up_session_caps(struct ceph_mds_session *session,
1275                                 int reconnect)
1276{
1277        dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1278        iterate_session_caps(session, wake_up_session_cb,
1279                             (void *)(unsigned long)reconnect);
1280}
1281
1282/*
1283 * Send periodic message to MDS renewing all currently held caps.  The
1284 * ack will reset the expiration for all caps from this session.
1285 *
1286 * caller holds s_mutex
1287 */
1288static int send_renew_caps(struct ceph_mds_client *mdsc,
1289                           struct ceph_mds_session *session)
1290{
1291        struct ceph_msg *msg;
1292        int state;
1293
1294        if (time_after_eq(jiffies, session->s_cap_ttl) &&
1295            time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1296                pr_info("mds%d caps stale\n", session->s_mds);
1297        session->s_renew_requested = jiffies;
1298
1299        /* do not try to renew caps until a recovering mds has reconnected
1300         * with its clients. */
1301        state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1302        if (state < CEPH_MDS_STATE_RECONNECT) {
1303                dout("send_renew_caps ignoring mds%d (%s)\n",
1304                     session->s_mds, ceph_mds_state_name(state));
1305                return 0;
1306        }
1307
1308        dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1309                ceph_mds_state_name(state));
1310        msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1311                                 ++session->s_renew_seq);
1312        if (!msg)
1313                return -ENOMEM;
1314        ceph_con_send(&session->s_con, msg);
1315        return 0;
1316}
1317
1318static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1319                             struct ceph_mds_session *session, u64 seq)
1320{
1321        struct ceph_msg *msg;
1322
1323        dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1324             session->s_mds, ceph_session_state_name(session->s_state), seq);
1325        msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1326        if (!msg)
1327                return -ENOMEM;
1328        ceph_con_send(&session->s_con, msg);
1329        return 0;
1330}
1331
1332
1333/*
1334 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1335 *
1336 * Called under session->s_mutex
1337 */
1338static void renewed_caps(struct ceph_mds_client *mdsc,
1339                         struct ceph_mds_session *session, int is_renew)
1340{
1341        int was_stale;
1342        int wake = 0;
1343
1344        spin_lock(&session->s_cap_lock);
1345        was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1346
1347        session->s_cap_ttl = session->s_renew_requested +
1348                mdsc->mdsmap->m_session_timeout*HZ;
1349
1350        if (was_stale) {
1351                if (time_before(jiffies, session->s_cap_ttl)) {
1352                        pr_info("mds%d caps renewed\n", session->s_mds);
1353                        wake = 1;
1354                } else {
1355                        pr_info("mds%d caps still stale\n", session->s_mds);
1356                }
1357        }
1358        dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1359             session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1360             time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1361        spin_unlock(&session->s_cap_lock);
1362
1363        if (wake)
1364                wake_up_session_caps(session, 0);
1365}
1366
1367/*
1368 * send a session close request
1369 */
1370static int request_close_session(struct ceph_mds_client *mdsc,
1371                                 struct ceph_mds_session *session)
1372{
1373        struct ceph_msg *msg;
1374
1375        dout("request_close_session mds%d state %s seq %lld\n",
1376             session->s_mds, ceph_session_state_name(session->s_state),
1377             session->s_seq);
1378        msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1379        if (!msg)
1380                return -ENOMEM;
1381        ceph_con_send(&session->s_con, msg);
1382        return 1;
1383}
1384
1385/*
1386 * Called with s_mutex held.
1387 */
1388static int __close_session(struct ceph_mds_client *mdsc,
1389                         struct ceph_mds_session *session)
1390{
1391        if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1392                return 0;
1393        session->s_state = CEPH_MDS_SESSION_CLOSING;
1394        return request_close_session(mdsc, session);
1395}
1396
1397/*
1398 * Trim old(er) caps.
1399 *
1400 * Because we can't cache an inode without one or more caps, we do
1401 * this indirectly: if a cap is unused, we prune its aliases, at which
1402 * point the inode will hopefully get dropped to.
1403 *
1404 * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1405 * memory pressure from the MDS, though, so it needn't be perfect.
1406 */
1407static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1408{
1409        struct ceph_mds_session *session = arg;
1410        struct ceph_inode_info *ci = ceph_inode(inode);
1411        int used, wanted, oissued, mine;
1412
1413        if (session->s_trim_caps <= 0)
1414                return -1;
1415
1416        spin_lock(&ci->i_ceph_lock);
1417        mine = cap->issued | cap->implemented;
1418        used = __ceph_caps_used(ci);
1419        wanted = __ceph_caps_file_wanted(ci);
1420        oissued = __ceph_caps_issued_other(ci, cap);
1421
1422        dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1423             inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1424             ceph_cap_string(used), ceph_cap_string(wanted));
1425        if (cap == ci->i_auth_cap) {
1426                if (ci->i_dirty_caps || ci->i_flushing_caps ||
1427                    !list_empty(&ci->i_cap_snaps))
1428                        goto out;
1429                if ((used | wanted) & CEPH_CAP_ANY_WR)
1430                        goto out;
1431        }
1432        /* The inode has cached pages, but it's no longer used.
1433         * we can safely drop it */
1434        if (wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1435            !(oissued & CEPH_CAP_FILE_CACHE)) {
1436          used = 0;
1437          oissued = 0;
1438        }
1439        if ((used | wanted) & ~oissued & mine)
1440                goto out;   /* we need these caps */
1441
1442        session->s_trim_caps--;
1443        if (oissued) {
1444                /* we aren't the only cap.. just remove us */
1445                __ceph_remove_cap(cap, true);
1446        } else {
1447                /* try dropping referring dentries */
1448                spin_unlock(&ci->i_ceph_lock);
1449                d_prune_aliases(inode);
1450                dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
1451                     inode, cap, atomic_read(&inode->i_count));
1452                return 0;
1453        }
1454
1455out:
1456        spin_unlock(&ci->i_ceph_lock);
1457        return 0;
1458}
1459
1460/*
1461 * Trim session cap count down to some max number.
1462 */
1463static int trim_caps(struct ceph_mds_client *mdsc,
1464                     struct ceph_mds_session *session,
1465                     int max_caps)
1466{
1467        int trim_caps = session->s_nr_caps - max_caps;
1468
1469        dout("trim_caps mds%d start: %d / %d, trim %d\n",
1470             session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1471        if (trim_caps > 0) {
1472                session->s_trim_caps = trim_caps;
1473                iterate_session_caps(session, trim_caps_cb, session);
1474                dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1475                     session->s_mds, session->s_nr_caps, max_caps,
1476                        trim_caps - session->s_trim_caps);
1477                session->s_trim_caps = 0;
1478        }
1479
1480        ceph_send_cap_releases(mdsc, session);
1481        return 0;
1482}
1483
1484static int check_caps_flush(struct ceph_mds_client *mdsc,
1485                            u64 want_flush_tid)
1486{
1487        int ret = 1;
1488
1489        spin_lock(&mdsc->cap_dirty_lock);
1490        if (!list_empty(&mdsc->cap_flush_list)) {
1491                struct ceph_cap_flush *cf =
1492                        list_first_entry(&mdsc->cap_flush_list,
1493                                         struct ceph_cap_flush, g_list);
1494                if (cf->tid <= want_flush_tid) {
1495                        dout("check_caps_flush still flushing tid "
1496                             "%llu <= %llu\n", cf->tid, want_flush_tid);
1497                        ret = 0;
1498                }
1499        }
1500        spin_unlock(&mdsc->cap_dirty_lock);
1501        return ret;
1502}
1503
1504/*
1505 * flush all dirty inode data to disk.
1506 *
1507 * returns true if we've flushed through want_flush_tid
1508 */
1509static void wait_caps_flush(struct ceph_mds_client *mdsc,
1510                            u64 want_flush_tid)
1511{
1512        dout("check_caps_flush want %llu\n", want_flush_tid);
1513
1514        wait_event(mdsc->cap_flushing_wq,
1515                   check_caps_flush(mdsc, want_flush_tid));
1516
1517        dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1518}
1519
1520/*
1521 * called under s_mutex
1522 */
1523void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1524                            struct ceph_mds_session *session)
1525{
1526        struct ceph_msg *msg = NULL;
1527        struct ceph_mds_cap_release *head;
1528        struct ceph_mds_cap_item *item;
1529        struct ceph_cap *cap;
1530        LIST_HEAD(tmp_list);
1531        int num_cap_releases;
1532
1533        spin_lock(&session->s_cap_lock);
1534again:
1535        list_splice_init(&session->s_cap_releases, &tmp_list);
1536        num_cap_releases = session->s_num_cap_releases;
1537        session->s_num_cap_releases = 0;
1538        spin_unlock(&session->s_cap_lock);
1539
1540        while (!list_empty(&tmp_list)) {
1541                if (!msg) {
1542                        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1543                                        PAGE_SIZE, GFP_NOFS, false);
1544                        if (!msg)
1545                                goto out_err;
1546                        head = msg->front.iov_base;
1547                        head->num = cpu_to_le32(0);
1548                        msg->front.iov_len = sizeof(*head);
1549                }
1550                cap = list_first_entry(&tmp_list, struct ceph_cap,
1551                                        session_caps);
1552                list_del(&cap->session_caps);
1553                num_cap_releases--;
1554
1555                head = msg->front.iov_base;
1556                le32_add_cpu(&head->num, 1);
1557                item = msg->front.iov_base + msg->front.iov_len;
1558                item->ino = cpu_to_le64(cap->cap_ino);
1559                item->cap_id = cpu_to_le64(cap->cap_id);
1560                item->migrate_seq = cpu_to_le32(cap->mseq);
1561                item->seq = cpu_to_le32(cap->issue_seq);
1562                msg->front.iov_len += sizeof(*item);
1563
1564                ceph_put_cap(mdsc, cap);
1565
1566                if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1567                        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1568                        dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1569                        ceph_con_send(&session->s_con, msg);
1570                        msg = NULL;
1571                }
1572        }
1573
1574        BUG_ON(num_cap_releases != 0);
1575
1576        spin_lock(&session->s_cap_lock);
1577        if (!list_empty(&session->s_cap_releases))
1578                goto again;
1579        spin_unlock(&session->s_cap_lock);
1580
1581        if (msg) {
1582                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1583                dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1584                ceph_con_send(&session->s_con, msg);
1585        }
1586        return;
1587out_err:
1588        pr_err("send_cap_releases mds%d, failed to allocate message\n",
1589                session->s_mds);
1590        spin_lock(&session->s_cap_lock);
1591        list_splice(&tmp_list, &session->s_cap_releases);
1592        session->s_num_cap_releases += num_cap_releases;
1593        spin_unlock(&session->s_cap_lock);
1594}
1595
1596/*
1597 * requests
1598 */
1599
1600int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1601                                    struct inode *dir)
1602{
1603        struct ceph_inode_info *ci = ceph_inode(dir);
1604        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1605        struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1606        size_t size = sizeof(struct ceph_mds_reply_dir_entry);
1607        int order, num_entries;
1608
1609        spin_lock(&ci->i_ceph_lock);
1610        num_entries = ci->i_files + ci->i_subdirs;
1611        spin_unlock(&ci->i_ceph_lock);
1612        num_entries = max(num_entries, 1);
1613        num_entries = min(num_entries, opt->max_readdir);
1614
1615        order = get_order(size * num_entries);
1616        while (order >= 0) {
1617                rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
1618                                                             __GFP_NOWARN,
1619                                                             order);
1620                if (rinfo->dir_entries)
1621                        break;
1622                order--;
1623        }
1624        if (!rinfo->dir_entries)
1625                return -ENOMEM;
1626
1627        num_entries = (PAGE_SIZE << order) / size;
1628        num_entries = min(num_entries, opt->max_readdir);
1629
1630        rinfo->dir_buf_size = PAGE_SIZE << order;
1631        req->r_num_caps = num_entries + 1;
1632        req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1633        req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1634        return 0;
1635}
1636
1637/*
1638 * Create an mds request.
1639 */
1640struct ceph_mds_request *
1641ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1642{
1643        struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1644
1645        if (!req)
1646                return ERR_PTR(-ENOMEM);
1647
1648        mutex_init(&req->r_fill_mutex);
1649        req->r_mdsc = mdsc;
1650        req->r_started = jiffies;
1651        req->r_resend_mds = -1;
1652        INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1653        INIT_LIST_HEAD(&req->r_unsafe_target_item);
1654        req->r_fmode = -1;
1655        kref_init(&req->r_kref);
1656        RB_CLEAR_NODE(&req->r_node);
1657        INIT_LIST_HEAD(&req->r_wait);
1658        init_completion(&req->r_completion);
1659        init_completion(&req->r_safe_completion);
1660        INIT_LIST_HEAD(&req->r_unsafe_item);
1661
1662        req->r_stamp = current_fs_time(mdsc->fsc->sb);
1663
1664        req->r_op = op;
1665        req->r_direct_mode = mode;
1666        return req;
1667}
1668
1669/*
1670 * return oldest (lowest) request, tid in request tree, 0 if none.
1671 *
1672 * called under mdsc->mutex.
1673 */
1674static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1675{
1676        if (RB_EMPTY_ROOT(&mdsc->request_tree))
1677                return NULL;
1678        return rb_entry(rb_first(&mdsc->request_tree),
1679                        struct ceph_mds_request, r_node);
1680}
1681
1682static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1683{
1684        return mdsc->oldest_tid;
1685}
1686
1687/*
1688 * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
1689 * on build_path_from_dentry in fs/cifs/dir.c.
1690 *
1691 * If @stop_on_nosnap, generate path relative to the first non-snapped
1692 * inode.
1693 *
1694 * Encode hidden .snap dirs as a double /, i.e.
1695 *   foo/.snap/bar -> foo//bar
1696 */
1697char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1698                           int stop_on_nosnap)
1699{
1700        struct dentry *temp;
1701        char *path;
1702        int len, pos;
1703        unsigned seq;
1704
1705        if (dentry == NULL)
1706                return ERR_PTR(-EINVAL);
1707
1708retry:
1709        len = 0;
1710        seq = read_seqbegin(&rename_lock);
1711        rcu_read_lock();
1712        for (temp = dentry; !IS_ROOT(temp);) {
1713                struct inode *inode = d_inode(temp);
1714                if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1715                        len++;  /* slash only */
1716                else if (stop_on_nosnap && inode &&
1717                         ceph_snap(inode) == CEPH_NOSNAP)
1718                        break;
1719                else
1720                        len += 1 + temp->d_name.len;
1721                temp = temp->d_parent;
1722        }
1723        rcu_read_unlock();
1724        if (len)
1725                len--;  /* no leading '/' */
1726
1727        path = kmalloc(len+1, GFP_NOFS);
1728        if (path == NULL)
1729                return ERR_PTR(-ENOMEM);
1730        pos = len;
1731        path[pos] = 0;  /* trailing null */
1732        rcu_read_lock();
1733        for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1734                struct inode *inode;
1735
1736                spin_lock(&temp->d_lock);
1737                inode = d_inode(temp);
1738                if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1739                        dout("build_path path+%d: %p SNAPDIR\n",
1740                             pos, temp);
1741                } else if (stop_on_nosnap && inode &&
1742                           ceph_snap(inode) == CEPH_NOSNAP) {
1743                        spin_unlock(&temp->d_lock);
1744                        break;
1745                } else {
1746                        pos -= temp->d_name.len;
1747                        if (pos < 0) {
1748                                spin_unlock(&temp->d_lock);
1749                                break;
1750                        }
1751                        strncpy(path + pos, temp->d_name.name,
1752                                temp->d_name.len);
1753                }
1754                spin_unlock(&temp->d_lock);
1755                if (pos)
1756                        path[--pos] = '/';
1757                temp = temp->d_parent;
1758        }
1759        rcu_read_unlock();
1760        if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1761                pr_err("build_path did not end path lookup where "
1762                       "expected, namelen is %d, pos is %d\n", len, pos);
1763                /* presumably this is only possible if racing with a
1764                   rename of one of the parent directories (we can not
1765                   lock the dentries above us to prevent this, but
1766                   retrying should be harmless) */
1767                kfree(path);
1768                goto retry;
1769        }
1770
1771        *base = ceph_ino(d_inode(temp));
1772        *plen = len;
1773        dout("build_path on %p %d built %llx '%.*s'\n",
1774             dentry, d_count(dentry), *base, len, path);
1775        return path;
1776}
1777
1778static int build_dentry_path(struct dentry *dentry,
1779                             const char **ppath, int *ppathlen, u64 *pino,
1780                             int *pfreepath)
1781{
1782        char *path;
1783
1784        if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
1785                *pino = ceph_ino(d_inode(dentry->d_parent));
1786                *ppath = dentry->d_name.name;
1787                *ppathlen = dentry->d_name.len;
1788                return 0;
1789        }
1790        path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1791        if (IS_ERR(path))
1792                return PTR_ERR(path);
1793        *ppath = path;
1794        *pfreepath = 1;
1795        return 0;
1796}
1797
1798static int build_inode_path(struct inode *inode,
1799                            const char **ppath, int *ppathlen, u64 *pino,
1800                            int *pfreepath)
1801{
1802        struct dentry *dentry;
1803        char *path;
1804
1805        if (ceph_snap(inode) == CEPH_NOSNAP) {
1806                *pino = ceph_ino(inode);
1807                *ppathlen = 0;
1808                return 0;
1809        }
1810        dentry = d_find_alias(inode);
1811        path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1812        dput(dentry);
1813        if (IS_ERR(path))
1814                return PTR_ERR(path);
1815        *ppath = path;
1816        *pfreepath = 1;
1817        return 0;
1818}
1819
1820/*
1821 * request arguments may be specified via an inode *, a dentry *, or
1822 * an explicit ino+path.
1823 */
1824static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1825                                  const char *rpath, u64 rino,
1826                                  const char **ppath, int *pathlen,
1827                                  u64 *ino, int *freepath)
1828{
1829        int r = 0;
1830
1831        if (rinode) {
1832                r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1833                dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1834                     ceph_snap(rinode));
1835        } else if (rdentry) {
1836                r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1837                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1838                     *ppath);
1839        } else if (rpath || rino) {
1840                *ino = rino;
1841                *ppath = rpath;
1842                *pathlen = rpath ? strlen(rpath) : 0;
1843                dout(" path %.*s\n", *pathlen, rpath);
1844        }
1845
1846        return r;
1847}
1848
1849/*
1850 * called under mdsc->mutex
1851 */
1852static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1853                                               struct ceph_mds_request *req,
1854                                               int mds, bool drop_cap_releases)
1855{
1856        struct ceph_msg *msg;
1857        struct ceph_mds_request_head *head;
1858        const char *path1 = NULL;
1859        const char *path2 = NULL;
1860        u64 ino1 = 0, ino2 = 0;
1861        int pathlen1 = 0, pathlen2 = 0;
1862        int freepath1 = 0, freepath2 = 0;
1863        int len;
1864        u16 releases;
1865        void *p, *end;
1866        int ret;
1867
1868        ret = set_request_path_attr(req->r_inode, req->r_dentry,
1869                              req->r_path1, req->r_ino1.ino,
1870                              &path1, &pathlen1, &ino1, &freepath1);
1871        if (ret < 0) {
1872                msg = ERR_PTR(ret);
1873                goto out;
1874        }
1875
1876        ret = set_request_path_attr(NULL, req->r_old_dentry,
1877                              req->r_path2, req->r_ino2.ino,
1878                              &path2, &pathlen2, &ino2, &freepath2);
1879        if (ret < 0) {
1880                msg = ERR_PTR(ret);
1881                goto out_free1;
1882        }
1883
1884        len = sizeof(*head) +
1885                pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1886                sizeof(struct ceph_timespec);
1887
1888        /* calculate (max) length for cap releases */
1889        len += sizeof(struct ceph_mds_request_release) *
1890                (!!req->r_inode_drop + !!req->r_dentry_drop +
1891                 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1892        if (req->r_dentry_drop)
1893                len += req->r_dentry->d_name.len;
1894        if (req->r_old_dentry_drop)
1895                len += req->r_old_dentry->d_name.len;
1896
1897        msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
1898        if (!msg) {
1899                msg = ERR_PTR(-ENOMEM);
1900                goto out_free2;
1901        }
1902
1903        msg->hdr.version = cpu_to_le16(2);
1904        msg->hdr.tid = cpu_to_le64(req->r_tid);
1905
1906        head = msg->front.iov_base;
1907        p = msg->front.iov_base + sizeof(*head);
1908        end = msg->front.iov_base + msg->front.iov_len;
1909
1910        head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1911        head->op = cpu_to_le32(req->r_op);
1912        head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
1913        head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
1914        head->args = req->r_args;
1915
1916        ceph_encode_filepath(&p, end, ino1, path1);
1917        ceph_encode_filepath(&p, end, ino2, path2);
1918
1919        /* make note of release offset, in case we need to replay */
1920        req->r_request_release_offset = p - msg->front.iov_base;
1921
1922        /* cap releases */
1923        releases = 0;
1924        if (req->r_inode_drop)
1925                releases += ceph_encode_inode_release(&p,
1926                      req->r_inode ? req->r_inode : d_inode(req->r_dentry),
1927                      mds, req->r_inode_drop, req->r_inode_unless, 0);
1928        if (req->r_dentry_drop)
1929                releases += ceph_encode_dentry_release(&p, req->r_dentry,
1930                       mds, req->r_dentry_drop, req->r_dentry_unless);
1931        if (req->r_old_dentry_drop)
1932                releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1933                       mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1934        if (req->r_old_inode_drop)
1935                releases += ceph_encode_inode_release(&p,
1936                      d_inode(req->r_old_dentry),
1937                      mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1938
1939        if (drop_cap_releases) {
1940                releases = 0;
1941                p = msg->front.iov_base + req->r_request_release_offset;
1942        }
1943
1944        head->num_releases = cpu_to_le16(releases);
1945
1946        /* time stamp */
1947        {
1948                struct ceph_timespec ts;
1949                ceph_encode_timespec(&ts, &req->r_stamp);
1950                ceph_encode_copy(&p, &ts, sizeof(ts));
1951        }
1952
1953        BUG_ON(p > end);
1954        msg->front.iov_len = p - msg->front.iov_base;
1955        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1956
1957        if (req->r_pagelist) {
1958                struct ceph_pagelist *pagelist = req->r_pagelist;
1959                atomic_inc(&pagelist->refcnt);
1960                ceph_msg_data_add_pagelist(msg, pagelist);
1961                msg->hdr.data_len = cpu_to_le32(pagelist->length);
1962        } else {
1963                msg->hdr.data_len = 0;
1964        }
1965
1966        msg->hdr.data_off = cpu_to_le16(0);
1967
1968out_free2:
1969        if (freepath2)
1970                kfree((char *)path2);
1971out_free1:
1972        if (freepath1)
1973                kfree((char *)path1);
1974out:
1975        return msg;
1976}
1977
1978/*
1979 * called under mdsc->mutex if error, under no mutex if
1980 * success.
1981 */
1982static void complete_request(struct ceph_mds_client *mdsc,
1983                             struct ceph_mds_request *req)
1984{
1985        if (req->r_callback)
1986                req->r_callback(mdsc, req);
1987        else
1988                complete_all(&req->r_completion);
1989}
1990
1991/*
1992 * called under mdsc->mutex
1993 */
1994static int __prepare_send_request(struct ceph_mds_client *mdsc,
1995                                  struct ceph_mds_request *req,
1996                                  int mds, bool drop_cap_releases)
1997{
1998        struct ceph_mds_request_head *rhead;
1999        struct ceph_msg *msg;
2000        int flags = 0;
2001
2002        req->r_attempts++;
2003        if (req->r_inode) {
2004                struct ceph_cap *cap =
2005                        ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2006
2007                if (cap)
2008                        req->r_sent_on_mseq = cap->mseq;
2009                else
2010                        req->r_sent_on_mseq = -1;
2011        }
2012        dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2013             req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2014
2015        if (req->r_got_unsafe) {
2016                void *p;
2017                /*
2018                 * Replay.  Do not regenerate message (and rebuild
2019                 * paths, etc.); just use the original message.
2020                 * Rebuilding paths will break for renames because
2021                 * d_move mangles the src name.
2022                 */
2023                msg = req->r_request;
2024                rhead = msg->front.iov_base;
2025
2026                flags = le32_to_cpu(rhead->flags);
2027                flags |= CEPH_MDS_FLAG_REPLAY;
2028                rhead->flags = cpu_to_le32(flags);
2029
2030                if (req->r_target_inode)
2031                        rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2032
2033                rhead->num_retry = req->r_attempts - 1;
2034
2035                /* remove cap/dentry releases from message */
2036                rhead->num_releases = 0;
2037
2038                /* time stamp */
2039                p = msg->front.iov_base + req->r_request_release_offset;
2040                {
2041                        struct ceph_timespec ts;
2042                        ceph_encode_timespec(&ts, &req->r_stamp);
2043                        ceph_encode_copy(&p, &ts, sizeof(ts));
2044                }
2045
2046                msg->front.iov_len = p - msg->front.iov_base;
2047                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2048                return 0;
2049        }
2050
2051        if (req->r_request) {
2052                ceph_msg_put(req->r_request);
2053                req->r_request = NULL;
2054        }
2055        msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2056        if (IS_ERR(msg)) {
2057                req->r_err = PTR_ERR(msg);
2058                return PTR_ERR(msg);
2059        }
2060        req->r_request = msg;
2061
2062        rhead = msg->front.iov_base;
2063        rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2064        if (req->r_got_unsafe)
2065                flags |= CEPH_MDS_FLAG_REPLAY;
2066        if (req->r_locked_dir)
2067                flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2068        rhead->flags = cpu_to_le32(flags);
2069        rhead->num_fwd = req->r_num_fwd;
2070        rhead->num_retry = req->r_attempts - 1;
2071        rhead->ino = 0;
2072
2073        dout(" r_locked_dir = %p\n", req->r_locked_dir);
2074        return 0;
2075}
2076
2077/*
2078 * send request, or put it on the appropriate wait list.
2079 */
2080static int __do_request(struct ceph_mds_client *mdsc,
2081                        struct ceph_mds_request *req)
2082{
2083        struct ceph_mds_session *session = NULL;
2084        int mds = -1;
2085        int err = 0;
2086
2087        if (req->r_err || req->r_got_result) {
2088                if (req->r_aborted)
2089                        __unregister_request(mdsc, req);
2090                goto out;
2091        }
2092
2093        if (req->r_timeout &&
2094            time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2095                dout("do_request timed out\n");
2096                err = -EIO;
2097                goto finish;
2098        }
2099        if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2100                dout("do_request forced umount\n");
2101                err = -EIO;
2102                goto finish;
2103        }
2104        if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2105                if (mdsc->mdsmap_err) {
2106                        err = mdsc->mdsmap_err;
2107                        dout("do_request mdsmap err %d\n", err);
2108                        goto finish;
2109                }
2110                if (mdsc->mdsmap->m_epoch == 0) {
2111                        dout("do_request no mdsmap, waiting for map\n");
2112                        list_add(&req->r_wait, &mdsc->waiting_for_map);
2113                        goto finish;
2114                }
2115                if (!(mdsc->fsc->mount_options->flags &
2116                      CEPH_MOUNT_OPT_MOUNTWAIT) &&
2117                    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2118                        err = -ENOENT;
2119                        pr_info("probably no mds server is up\n");
2120                        goto finish;
2121                }
2122        }
2123
2124        put_request_session(req);
2125
2126        mds = __choose_mds(mdsc, req);
2127        if (mds < 0 ||
2128            ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2129                dout("do_request no mds or not active, waiting for map\n");
2130                list_add(&req->r_wait, &mdsc->waiting_for_map);
2131                goto out;
2132        }
2133
2134        /* get, open session */
2135        session = __ceph_lookup_mds_session(mdsc, mds);
2136        if (!session) {
2137                session = register_session(mdsc, mds);
2138                if (IS_ERR(session)) {
2139                        err = PTR_ERR(session);
2140                        goto finish;
2141                }
2142        }
2143        req->r_session = get_session(session);
2144
2145        dout("do_request mds%d session %p state %s\n", mds, session,
2146             ceph_session_state_name(session->s_state));
2147        if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2148            session->s_state != CEPH_MDS_SESSION_HUNG) {
2149                if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2150                        err = -EACCES;
2151                        goto out_session;
2152                }
2153                if (session->s_state == CEPH_MDS_SESSION_NEW ||
2154                    session->s_state == CEPH_MDS_SESSION_CLOSING)
2155                        __open_session(mdsc, session);
2156                list_add(&req->r_wait, &session->s_waiting);
2157                goto out_session;
2158        }
2159
2160        /* send request */
2161        req->r_resend_mds = -1;   /* forget any previous mds hint */
2162
2163        if (req->r_request_started == 0)   /* note request start time */
2164                req->r_request_started = jiffies;
2165
2166        err = __prepare_send_request(mdsc, req, mds, false);
2167        if (!err) {
2168                ceph_msg_get(req->r_request);
2169                ceph_con_send(&session->s_con, req->r_request);
2170        }
2171
2172out_session:
2173        ceph_put_mds_session(session);
2174finish:
2175        if (err) {
2176                dout("__do_request early error %d\n", err);
2177                req->r_err = err;
2178                complete_request(mdsc, req);
2179                __unregister_request(mdsc, req);
2180        }
2181out:
2182        return err;
2183}
2184
2185/*
2186 * called under mdsc->mutex
2187 */
2188static void __wake_requests(struct ceph_mds_client *mdsc,
2189                            struct list_head *head)
2190{
2191        struct ceph_mds_request *req;
2192        LIST_HEAD(tmp_list);
2193
2194        list_splice_init(head, &tmp_list);
2195
2196        while (!list_empty(&tmp_list)) {
2197                req = list_entry(tmp_list.next,
2198                                 struct ceph_mds_request, r_wait);
2199                list_del_init(&req->r_wait);
2200                dout(" wake request %p tid %llu\n", req, req->r_tid);
2201                __do_request(mdsc, req);
2202        }
2203}
2204
2205/*
2206 * Wake up threads with requests pending for @mds, so that they can
2207 * resubmit their requests to a possibly different mds.
2208 */
2209static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2210{
2211        struct ceph_mds_request *req;
2212        struct rb_node *p = rb_first(&mdsc->request_tree);
2213
2214        dout("kick_requests mds%d\n", mds);
2215        while (p) {
2216                req = rb_entry(p, struct ceph_mds_request, r_node);
2217                p = rb_next(p);
2218                if (req->r_got_unsafe)
2219                        continue;
2220                if (req->r_attempts > 0)
2221                        continue; /* only new requests */
2222                if (req->r_session &&
2223                    req->r_session->s_mds == mds) {
2224                        dout(" kicking tid %llu\n", req->r_tid);
2225                        list_del_init(&req->r_wait);
2226                        __do_request(mdsc, req);
2227                }
2228        }
2229}
2230
2231void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2232                              struct ceph_mds_request *req)
2233{
2234        dout("submit_request on %p\n", req);
2235        mutex_lock(&mdsc->mutex);
2236        __register_request(mdsc, req, NULL);
2237        __do_request(mdsc, req);
2238        mutex_unlock(&mdsc->mutex);
2239}
2240
2241/*
2242 * Synchrously perform an mds request.  Take care of all of the
2243 * session setup, forwarding, retry details.
2244 */
2245int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2246                         struct inode *dir,
2247                         struct ceph_mds_request *req)
2248{
2249        int err;
2250
2251        dout("do_request on %p\n", req);
2252
2253        /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2254        if (req->r_inode)
2255                ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2256        if (req->r_locked_dir)
2257                ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
2258        if (req->r_old_dentry_dir)
2259                ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2260                                  CEPH_CAP_PIN);
2261
2262        /* issue */
2263        mutex_lock(&mdsc->mutex);
2264        __register_request(mdsc, req, dir);
2265        __do_request(mdsc, req);
2266
2267        if (req->r_err) {
2268                err = req->r_err;
2269                goto out;
2270        }
2271
2272        /* wait */
2273        mutex_unlock(&mdsc->mutex);
2274        dout("do_request waiting\n");
2275        if (!req->r_timeout && req->r_wait_for_completion) {
2276                err = req->r_wait_for_completion(mdsc, req);
2277        } else {
2278                long timeleft = wait_for_completion_killable_timeout(
2279                                        &req->r_completion,
2280                                        ceph_timeout_jiffies(req->r_timeout));
2281                if (timeleft > 0)
2282                        err = 0;
2283                else if (!timeleft)
2284                        err = -EIO;  /* timed out */
2285                else
2286                        err = timeleft;  /* killed */
2287        }
2288        dout("do_request waited, got %d\n", err);
2289        mutex_lock(&mdsc->mutex);
2290
2291        /* only abort if we didn't race with a real reply */
2292        if (req->r_got_result) {
2293                err = le32_to_cpu(req->r_reply_info.head->result);
2294        } else if (err < 0) {
2295                dout("aborted request %lld with %d\n", req->r_tid, err);
2296
2297                /*
2298                 * ensure we aren't running concurrently with
2299                 * ceph_fill_trace or ceph_readdir_prepopulate, which
2300                 * rely on locks (dir mutex) held by our caller.
2301                 */
2302                mutex_lock(&req->r_fill_mutex);
2303                req->r_err = err;
2304                req->r_aborted = true;
2305                mutex_unlock(&req->r_fill_mutex);
2306
2307                if (req->r_locked_dir &&
2308                    (req->r_op & CEPH_MDS_OP_WRITE))
2309                        ceph_invalidate_dir_request(req);
2310        } else {
2311                err = req->r_err;
2312        }
2313
2314out:
2315        mutex_unlock(&mdsc->mutex);
2316        dout("do_request %p done, result %d\n", req, err);
2317        return err;
2318}
2319
2320/*
2321 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2322 * namespace request.
2323 */
2324void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2325{
2326        struct inode *inode = req->r_locked_dir;
2327
2328        dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
2329
2330        ceph_dir_clear_complete(inode);
2331        if (req->r_dentry)
2332                ceph_invalidate_dentry_lease(req->r_dentry);
2333        if (req->r_old_dentry)
2334                ceph_invalidate_dentry_lease(req->r_old_dentry);
2335}
2336
2337/*
2338 * Handle mds reply.
2339 *
2340 * We take the session mutex and parse and process the reply immediately.
2341 * This preserves the logical ordering of replies, capabilities, etc., sent
2342 * by the MDS as they are applied to our local cache.
2343 */
2344static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2345{
2346        struct ceph_mds_client *mdsc = session->s_mdsc;
2347        struct ceph_mds_request *req;
2348        struct ceph_mds_reply_head *head = msg->front.iov_base;
2349        struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2350        struct ceph_snap_realm *realm;
2351        u64 tid;
2352        int err, result;
2353        int mds = session->s_mds;
2354
2355        if (msg->front.iov_len < sizeof(*head)) {
2356                pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2357                ceph_msg_dump(msg);
2358                return;
2359        }
2360
2361        /* get request, session */
2362        tid = le64_to_cpu(msg->hdr.tid);
2363        mutex_lock(&mdsc->mutex);
2364        req = lookup_get_request(mdsc, tid);
2365        if (!req) {
2366                dout("handle_reply on unknown tid %llu\n", tid);
2367                mutex_unlock(&mdsc->mutex);
2368                return;
2369        }
2370        dout("handle_reply %p\n", req);
2371
2372        /* correct session? */
2373        if (req->r_session != session) {
2374                pr_err("mdsc_handle_reply got %llu on session mds%d"
2375                       " not mds%d\n", tid, session->s_mds,
2376                       req->r_session ? req->r_session->s_mds : -1);
2377                mutex_unlock(&mdsc->mutex);
2378                goto out;
2379        }
2380
2381        /* dup? */
2382        if ((req->r_got_unsafe && !head->safe) ||
2383            (req->r_got_safe && head->safe)) {
2384                pr_warn("got a dup %s reply on %llu from mds%d\n",
2385                           head->safe ? "safe" : "unsafe", tid, mds);
2386                mutex_unlock(&mdsc->mutex);
2387                goto out;
2388        }
2389        if (req->r_got_safe) {
2390                pr_warn("got unsafe after safe on %llu from mds%d\n",
2391                           tid, mds);
2392                mutex_unlock(&mdsc->mutex);
2393                goto out;
2394        }
2395
2396        result = le32_to_cpu(head->result);
2397
2398        /*
2399         * Handle an ESTALE
2400         * if we're not talking to the authority, send to them
2401         * if the authority has changed while we weren't looking,
2402         * send to new authority
2403         * Otherwise we just have to return an ESTALE
2404         */
2405        if (result == -ESTALE) {
2406                dout("got ESTALE on request %llu", req->r_tid);
2407                req->r_resend_mds = -1;
2408                if (req->r_direct_mode != USE_AUTH_MDS) {
2409                        dout("not using auth, setting for that now");
2410                        req->r_direct_mode = USE_AUTH_MDS;
2411                        __do_request(mdsc, req);
2412                        mutex_unlock(&mdsc->mutex);
2413                        goto out;
2414                } else  {
2415                        int mds = __choose_mds(mdsc, req);
2416                        if (mds >= 0 && mds != req->r_session->s_mds) {
2417                                dout("but auth changed, so resending");
2418                                __do_request(mdsc, req);
2419                                mutex_unlock(&mdsc->mutex);
2420                                goto out;
2421                        }
2422                }
2423                dout("have to return ESTALE on request %llu", req->r_tid);
2424        }
2425
2426
2427        if (head->safe) {
2428                req->r_got_safe = true;
2429                __unregister_request(mdsc, req);
2430
2431                if (req->r_got_unsafe) {
2432                        /*
2433                         * We already handled the unsafe response, now do the
2434                         * cleanup.  No need to examine the response; the MDS
2435                         * doesn't include any result info in the safe
2436                         * response.  And even if it did, there is nothing
2437                         * useful we could do with a revised return value.
2438                         */
2439                        dout("got safe reply %llu, mds%d\n", tid, mds);
2440                        list_del_init(&req->r_unsafe_item);
2441
2442                        /* last unsafe request during umount? */
2443                        if (mdsc->stopping && !__get_oldest_req(mdsc))
2444                                complete_all(&mdsc->safe_umount_waiters);
2445                        mutex_unlock(&mdsc->mutex);
2446                        goto out;
2447                }
2448        } else {
2449                req->r_got_unsafe = true;
2450                list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2451                if (req->r_unsafe_dir) {
2452                        struct ceph_inode_info *ci =
2453                                        ceph_inode(req->r_unsafe_dir);
2454                        spin_lock(&ci->i_unsafe_lock);
2455                        list_add_tail(&req->r_unsafe_dir_item,
2456                                      &ci->i_unsafe_dirops);
2457                        spin_unlock(&ci->i_unsafe_lock);
2458                }
2459        }
2460
2461        dout("handle_reply tid %lld result %d\n", tid, result);
2462        rinfo = &req->r_reply_info;
2463        err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2464        mutex_unlock(&mdsc->mutex);
2465
2466        mutex_lock(&session->s_mutex);
2467        if (err < 0) {
2468                pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2469                ceph_msg_dump(msg);
2470                goto out_err;
2471        }
2472
2473        /* snap trace */
2474        realm = NULL;
2475        if (rinfo->snapblob_len) {
2476                down_write(&mdsc->snap_rwsem);
2477                ceph_update_snap_trace(mdsc, rinfo->snapblob,
2478                                rinfo->snapblob + rinfo->snapblob_len,
2479                                le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2480                                &realm);
2481                downgrade_write(&mdsc->snap_rwsem);
2482        } else {
2483                down_read(&mdsc->snap_rwsem);
2484        }
2485
2486        /* insert trace into our cache */
2487        mutex_lock(&req->r_fill_mutex);
2488        current->journal_info = req;
2489        err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2490        if (err == 0) {
2491                if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2492                                    req->r_op == CEPH_MDS_OP_LSSNAP))
2493                        ceph_readdir_prepopulate(req, req->r_session);
2494                ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2495        }
2496        current->journal_info = NULL;
2497        mutex_unlock(&req->r_fill_mutex);
2498
2499        up_read(&mdsc->snap_rwsem);
2500        if (realm)
2501                ceph_put_snap_realm(mdsc, realm);
2502
2503        if (err == 0 && req->r_got_unsafe && req->r_target_inode) {
2504                struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
2505                spin_lock(&ci->i_unsafe_lock);
2506                list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
2507                spin_unlock(&ci->i_unsafe_lock);
2508        }
2509out_err:
2510        mutex_lock(&mdsc->mutex);
2511        if (!req->r_aborted) {
2512                if (err) {
2513                        req->r_err = err;
2514                } else {
2515                        req->r_reply =  ceph_msg_get(msg);
2516                        req->r_got_result = true;
2517                }
2518        } else {
2519                dout("reply arrived after request %lld was aborted\n", tid);
2520        }
2521        mutex_unlock(&mdsc->mutex);
2522
2523        mutex_unlock(&session->s_mutex);
2524
2525        /* kick calling process */
2526        complete_request(mdsc, req);
2527out:
2528        ceph_mdsc_put_request(req);
2529        return;
2530}
2531
2532
2533
2534/*
2535 * handle mds notification that our request has been forwarded.
2536 */
2537static void handle_forward(struct ceph_mds_client *mdsc,
2538                           struct ceph_mds_session *session,
2539                           struct ceph_msg *msg)
2540{
2541        struct ceph_mds_request *req;
2542        u64 tid = le64_to_cpu(msg->hdr.tid);
2543        u32 next_mds;
2544        u32 fwd_seq;
2545        int err = -EINVAL;
2546        void *p = msg->front.iov_base;
2547        void *end = p + msg->front.iov_len;
2548
2549        ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2550        next_mds = ceph_decode_32(&p);
2551        fwd_seq = ceph_decode_32(&p);
2552
2553        mutex_lock(&mdsc->mutex);
2554        req = lookup_get_request(mdsc, tid);
2555        if (!req) {
2556                dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2557                goto out;  /* dup reply? */
2558        }
2559
2560        if (req->r_aborted) {
2561                dout("forward tid %llu aborted, unregistering\n", tid);
2562                __unregister_request(mdsc, req);
2563        } else if (fwd_seq <= req->r_num_fwd) {
2564                dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2565                     tid, next_mds, req->r_num_fwd, fwd_seq);
2566        } else {
2567                /* resend. forward race not possible; mds would drop */
2568                dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2569                BUG_ON(req->r_err);
2570                BUG_ON(req->r_got_result);
2571                req->r_attempts = 0;
2572                req->r_num_fwd = fwd_seq;
2573                req->r_resend_mds = next_mds;
2574                put_request_session(req);
2575                __do_request(mdsc, req);
2576        }
2577        ceph_mdsc_put_request(req);
2578out:
2579        mutex_unlock(&mdsc->mutex);
2580        return;
2581
2582bad:
2583        pr_err("mdsc_handle_forward decode error err=%d\n", err);
2584}
2585
2586/*
2587 * handle a mds session control message
2588 */
2589static void handle_session(struct ceph_mds_session *session,
2590                           struct ceph_msg *msg)
2591{
2592        struct ceph_mds_client *mdsc = session->s_mdsc;
2593        u32 op;
2594        u64 seq;
2595        int mds = session->s_mds;
2596        struct ceph_mds_session_head *h = msg->front.iov_base;
2597        int wake = 0;
2598
2599        /* decode */
2600        if (msg->front.iov_len != sizeof(*h))
2601                goto bad;
2602        op = le32_to_cpu(h->op);
2603        seq = le64_to_cpu(h->seq);
2604
2605        mutex_lock(&mdsc->mutex);
2606        if (op == CEPH_SESSION_CLOSE)
2607                __unregister_session(mdsc, session);
2608        /* FIXME: this ttl calculation is generous */
2609        session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2610        mutex_unlock(&mdsc->mutex);
2611
2612        mutex_lock(&session->s_mutex);
2613
2614        dout("handle_session mds%d %s %p state %s seq %llu\n",
2615             mds, ceph_session_op_name(op), session,
2616             ceph_session_state_name(session->s_state), seq);
2617
2618        if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2619                session->s_state = CEPH_MDS_SESSION_OPEN;
2620                pr_info("mds%d came back\n", session->s_mds);
2621        }
2622
2623        switch (op) {
2624        case CEPH_SESSION_OPEN:
2625                if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2626                        pr_info("mds%d reconnect success\n", session->s_mds);
2627                session->s_state = CEPH_MDS_SESSION_OPEN;
2628                renewed_caps(mdsc, session, 0);
2629                wake = 1;
2630                if (mdsc->stopping)
2631                        __close_session(mdsc, session);
2632                break;
2633
2634        case CEPH_SESSION_RENEWCAPS:
2635                if (session->s_renew_seq == seq)
2636                        renewed_caps(mdsc, session, 1);
2637                break;
2638
2639        case CEPH_SESSION_CLOSE:
2640                if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2641                        pr_info("mds%d reconnect denied\n", session->s_mds);
2642                cleanup_session_requests(mdsc, session);
2643                remove_session_caps(session);
2644                wake = 2; /* for good measure */
2645                wake_up_all(&mdsc->session_close_wq);
2646                break;
2647
2648        case CEPH_SESSION_STALE:
2649                pr_info("mds%d caps went stale, renewing\n",
2650                        session->s_mds);
2651                spin_lock(&session->s_gen_ttl_lock);
2652                session->s_cap_gen++;
2653                session->s_cap_ttl = jiffies - 1;
2654                spin_unlock(&session->s_gen_ttl_lock);
2655                send_renew_caps(mdsc, session);
2656                break;
2657
2658        case CEPH_SESSION_RECALL_STATE:
2659                trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2660                break;
2661
2662        case CEPH_SESSION_FLUSHMSG:
2663                send_flushmsg_ack(mdsc, session, seq);
2664                break;
2665
2666        case CEPH_SESSION_FORCE_RO:
2667                dout("force_session_readonly %p\n", session);
2668                spin_lock(&session->s_cap_lock);
2669                session->s_readonly = true;
2670                spin_unlock(&session->s_cap_lock);
2671                wake_up_session_caps(session, 0);
2672                break;
2673
2674        case CEPH_SESSION_REJECT:
2675                WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
2676                pr_info("mds%d rejected session\n", session->s_mds);
2677                session->s_state = CEPH_MDS_SESSION_REJECTED;
2678                cleanup_session_requests(mdsc, session);
2679                remove_session_caps(session);
2680                wake = 2; /* for good measure */
2681                break;
2682
2683        default:
2684                pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2685                WARN_ON(1);
2686        }
2687
2688        mutex_unlock(&session->s_mutex);
2689        if (wake) {
2690                mutex_lock(&mdsc->mutex);
2691                __wake_requests(mdsc, &session->s_waiting);
2692                if (wake == 2)
2693                        kick_requests(mdsc, mds);
2694                mutex_unlock(&mdsc->mutex);
2695        }
2696        return;
2697
2698bad:
2699        pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2700               (int)msg->front.iov_len);
2701        ceph_msg_dump(msg);
2702        return;
2703}
2704
2705
2706/*
2707 * called under session->mutex.
2708 */
2709static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2710                                   struct ceph_mds_session *session)
2711{
2712        struct ceph_mds_request *req, *nreq;
2713        struct rb_node *p;
2714        int err;
2715
2716        dout("replay_unsafe_requests mds%d\n", session->s_mds);
2717
2718        mutex_lock(&mdsc->mutex);
2719        list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2720                err = __prepare_send_request(mdsc, req, session->s_mds, true);
2721                if (!err) {
2722                        ceph_msg_get(req->r_request);
2723                        ceph_con_send(&session->s_con, req->r_request);
2724                }
2725        }
2726
2727        /*
2728         * also re-send old requests when MDS enters reconnect stage. So that MDS
2729         * can process completed request in clientreplay stage.
2730         */
2731        p = rb_first(&mdsc->request_tree);
2732        while (p) {
2733                req = rb_entry(p, struct ceph_mds_request, r_node);
2734                p = rb_next(p);
2735                if (req->r_got_unsafe)
2736                        continue;
2737                if (req->r_attempts == 0)
2738                        continue; /* only old requests */
2739                if (req->r_session &&
2740                    req->r_session->s_mds == session->s_mds) {
2741                        err = __prepare_send_request(mdsc, req,
2742                                                     session->s_mds, true);
2743                        if (!err) {
2744                                ceph_msg_get(req->r_request);
2745                                ceph_con_send(&session->s_con, req->r_request);
2746                        }
2747                }
2748        }
2749        mutex_unlock(&mdsc->mutex);
2750}
2751
2752/*
2753 * Encode information about a cap for a reconnect with the MDS.
2754 */
2755static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2756                          void *arg)
2757{
2758        union {
2759                struct ceph_mds_cap_reconnect v2;
2760                struct ceph_mds_cap_reconnect_v1 v1;
2761        } rec;
2762        struct ceph_inode_info *ci;
2763        struct ceph_reconnect_state *recon_state = arg;
2764        struct ceph_pagelist *pagelist = recon_state->pagelist;
2765        char *path;
2766        int pathlen, err;
2767        u64 pathbase;
2768        u64 snap_follows;
2769        struct dentry *dentry;
2770
2771        ci = cap->ci;
2772
2773        dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2774             inode, ceph_vinop(inode), cap, cap->cap_id,
2775             ceph_cap_string(cap->issued));
2776        err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2777        if (err)
2778                return err;
2779
2780        dentry = d_find_alias(inode);
2781        if (dentry) {
2782                path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2783                if (IS_ERR(path)) {
2784                        err = PTR_ERR(path);
2785                        goto out_dput;
2786                }
2787        } else {
2788                path = NULL;
2789                pathlen = 0;
2790                pathbase = 0;
2791        }
2792
2793        spin_lock(&ci->i_ceph_lock);
2794        cap->seq = 0;        /* reset cap seq */
2795        cap->issue_seq = 0;  /* and issue_seq */
2796        cap->mseq = 0;       /* and migrate_seq */
2797        cap->cap_gen = cap->session->s_cap_gen;
2798
2799        if (recon_state->msg_version >= 2) {
2800                rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2801                rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2802                rec.v2.issued = cpu_to_le32(cap->issued);
2803                rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2804                rec.v2.pathbase = cpu_to_le64(pathbase);
2805                rec.v2.flock_len = 0;
2806        } else {
2807                rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2808                rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2809                rec.v1.issued = cpu_to_le32(cap->issued);
2810                rec.v1.size = cpu_to_le64(inode->i_size);
2811                ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2812                ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2813                rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2814                rec.v1.pathbase = cpu_to_le64(pathbase);
2815        }
2816
2817        if (list_empty(&ci->i_cap_snaps)) {
2818                snap_follows = 0;
2819        } else {
2820                struct ceph_cap_snap *capsnap =
2821                        list_first_entry(&ci->i_cap_snaps,
2822                                         struct ceph_cap_snap, ci_item);
2823                snap_follows = capsnap->follows;
2824        }
2825        spin_unlock(&ci->i_ceph_lock);
2826
2827        if (recon_state->msg_version >= 2) {
2828                int num_fcntl_locks, num_flock_locks;
2829                struct ceph_filelock *flocks;
2830                size_t struct_len, total_len = 0;
2831                u8 struct_v = 0;
2832
2833encode_again:
2834                ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2835                flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2836                                 sizeof(struct ceph_filelock), GFP_NOFS);
2837                if (!flocks) {
2838                        err = -ENOMEM;
2839                        goto out_free;
2840                }
2841                err = ceph_encode_locks_to_buffer(inode, flocks,
2842                                                  num_fcntl_locks,
2843                                                  num_flock_locks);
2844                if (err) {
2845                        kfree(flocks);
2846                        if (err == -ENOSPC)
2847                                goto encode_again;
2848                        goto out_free;
2849                }
2850
2851                if (recon_state->msg_version >= 3) {
2852                        /* version, compat_version and struct_len */
2853                        total_len = 2 * sizeof(u8) + sizeof(u32);
2854                        struct_v = 2;
2855                }
2856                /*
2857                 * number of encoded locks is stable, so copy to pagelist
2858                 */
2859                struct_len = 2 * sizeof(u32) +
2860                            (num_fcntl_locks + num_flock_locks) *
2861                            sizeof(struct ceph_filelock);
2862                rec.v2.flock_len = cpu_to_le32(struct_len);
2863
2864                struct_len += sizeof(rec.v2);
2865                struct_len += sizeof(u32) + pathlen;
2866
2867                if (struct_v >= 2)
2868                        struct_len += sizeof(u64); /* snap_follows */
2869
2870                total_len += struct_len;
2871                err = ceph_pagelist_reserve(pagelist, total_len);
2872
2873                if (!err) {
2874                        if (recon_state->msg_version >= 3) {
2875                                ceph_pagelist_encode_8(pagelist, struct_v);
2876                                ceph_pagelist_encode_8(pagelist, 1);
2877                                ceph_pagelist_encode_32(pagelist, struct_len);
2878                        }
2879                        ceph_pagelist_encode_string(pagelist, path, pathlen);
2880                        ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
2881                        ceph_locks_to_pagelist(flocks, pagelist,
2882                                               num_fcntl_locks,
2883                                               num_flock_locks);
2884                        if (struct_v >= 2)
2885                                ceph_pagelist_encode_64(pagelist, snap_follows);
2886                }
2887                kfree(flocks);
2888        } else {
2889                size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
2890                err = ceph_pagelist_reserve(pagelist, size);
2891                if (!err) {
2892                        ceph_pagelist_encode_string(pagelist, path, pathlen);
2893                        ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
2894                }
2895        }
2896
2897        recon_state->nr_caps++;
2898out_free:
2899        kfree(path);
2900out_dput:
2901        dput(dentry);
2902        return err;
2903}
2904
2905
2906/*
2907 * If an MDS fails and recovers, clients need to reconnect in order to
2908 * reestablish shared state.  This includes all caps issued through
2909 * this session _and_ the snap_realm hierarchy.  Because it's not
2910 * clear which snap realms the mds cares about, we send everything we
2911 * know about.. that ensures we'll then get any new info the
2912 * recovering MDS might have.
2913 *
2914 * This is a relatively heavyweight operation, but it's rare.
2915 *
2916 * called with mdsc->mutex held.
2917 */
2918static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2919                               struct ceph_mds_session *session)
2920{
2921        struct ceph_msg *reply;
2922        struct rb_node *p;
2923        int mds = session->s_mds;
2924        int err = -ENOMEM;
2925        int s_nr_caps;
2926        struct ceph_pagelist *pagelist;
2927        struct ceph_reconnect_state recon_state;
2928
2929        pr_info("mds%d reconnect start\n", mds);
2930
2931        pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2932        if (!pagelist)
2933                goto fail_nopagelist;
2934        ceph_pagelist_init(pagelist);
2935
2936        reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
2937        if (!reply)
2938                goto fail_nomsg;
2939
2940        mutex_lock(&session->s_mutex);
2941        session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2942        session->s_seq = 0;
2943
2944        dout("session %p state %s\n", session,
2945             ceph_session_state_name(session->s_state));
2946
2947        spin_lock(&session->s_gen_ttl_lock);
2948        session->s_cap_gen++;
2949        spin_unlock(&session->s_gen_ttl_lock);
2950
2951        spin_lock(&session->s_cap_lock);
2952        /* don't know if session is readonly */
2953        session->s_readonly = 0;
2954        /*
2955         * notify __ceph_remove_cap() that we are composing cap reconnect.
2956         * If a cap get released before being added to the cap reconnect,
2957         * __ceph_remove_cap() should skip queuing cap release.
2958         */
2959        session->s_cap_reconnect = 1;
2960        /* drop old cap expires; we're about to reestablish that state */
2961        cleanup_cap_releases(mdsc, session);
2962
2963        /* trim unused caps to reduce MDS's cache rejoin time */
2964        if (mdsc->fsc->sb->s_root)
2965                shrink_dcache_parent(mdsc->fsc->sb->s_root);
2966
2967        ceph_con_close(&session->s_con);
2968        ceph_con_open(&session->s_con,
2969                      CEPH_ENTITY_TYPE_MDS, mds,
2970                      ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2971
2972        /* replay unsafe requests */
2973        replay_unsafe_requests(mdsc, session);
2974
2975        down_read(&mdsc->snap_rwsem);
2976
2977        /* traverse this session's caps */
2978        s_nr_caps = session->s_nr_caps;
2979        err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
2980        if (err)
2981                goto fail;
2982
2983        recon_state.nr_caps = 0;
2984        recon_state.pagelist = pagelist;
2985        if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
2986                recon_state.msg_version = 3;
2987        else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
2988                recon_state.msg_version = 2;
2989        else
2990                recon_state.msg_version = 1;
2991        err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2992        if (err < 0)
2993                goto fail;
2994
2995        spin_lock(&session->s_cap_lock);
2996        session->s_cap_reconnect = 0;
2997        spin_unlock(&session->s_cap_lock);
2998
2999        /*
3000         * snaprealms.  we provide mds with the ino, seq (version), and
3001         * parent for all of our realms.  If the mds has any newer info,
3002         * it will tell us.
3003         */
3004        for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3005                struct ceph_snap_realm *realm =
3006                        rb_entry(p, struct ceph_snap_realm, node);
3007                struct ceph_mds_snaprealm_reconnect sr_rec;
3008
3009                dout(" adding snap realm %llx seq %lld parent %llx\n",
3010                     realm->ino, realm->seq, realm->parent_ino);
3011                sr_rec.ino = cpu_to_le64(realm->ino);
3012                sr_rec.seq = cpu_to_le64(realm->seq);
3013                sr_rec.parent = cpu_to_le64(realm->parent_ino);
3014                err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3015                if (err)
3016                        goto fail;
3017        }
3018
3019        reply->hdr.version = cpu_to_le16(recon_state.msg_version);
3020
3021        /* raced with cap release? */
3022        if (s_nr_caps != recon_state.nr_caps) {
3023                struct page *page = list_first_entry(&pagelist->head,
3024                                                     struct page, lru);
3025                __le32 *addr = kmap_atomic(page);
3026                *addr = cpu_to_le32(recon_state.nr_caps);
3027                kunmap_atomic(addr);
3028        }
3029
3030        reply->hdr.data_len = cpu_to_le32(pagelist->length);
3031        ceph_msg_data_add_pagelist(reply, pagelist);
3032
3033        ceph_early_kick_flushing_caps(mdsc, session);
3034
3035        ceph_con_send(&session->s_con, reply);
3036
3037        mutex_unlock(&session->s_mutex);
3038
3039        mutex_lock(&mdsc->mutex);
3040        __wake_requests(mdsc, &session->s_waiting);
3041        mutex_unlock(&mdsc->mutex);
3042
3043        up_read(&mdsc->snap_rwsem);
3044        return;
3045
3046fail:
3047        ceph_msg_put(reply);
3048        up_read(&mdsc->snap_rwsem);
3049        mutex_unlock(&session->s_mutex);
3050fail_nomsg:
3051        ceph_pagelist_release(pagelist);
3052fail_nopagelist:
3053        pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3054        return;
3055}
3056
3057
3058/*
3059 * compare old and new mdsmaps, kicking requests
3060 * and closing out old connections as necessary
3061 *
3062 * called under mdsc->mutex.
3063 */
3064static void check_new_map(struct ceph_mds_client *mdsc,
3065                          struct ceph_mdsmap *newmap,
3066                          struct ceph_mdsmap *oldmap)
3067{
3068        int i;
3069        int oldstate, newstate;
3070        struct ceph_mds_session *s;
3071
3072        dout("check_new_map new %u old %u\n",
3073             newmap->m_epoch, oldmap->m_epoch);
3074
3075        for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
3076                if (mdsc->sessions[i] == NULL)
3077                        continue;
3078                s = mdsc->sessions[i];
3079                oldstate = ceph_mdsmap_get_state(oldmap, i);
3080                newstate = ceph_mdsmap_get_state(newmap, i);
3081
3082                dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3083                     i, ceph_mds_state_name(oldstate),
3084                     ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3085                     ceph_mds_state_name(newstate),
3086                     ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3087                     ceph_session_state_name(s->s_state));
3088
3089                if (i >= newmap->m_max_mds ||
3090                    memcmp(ceph_mdsmap_get_addr(oldmap, i),
3091                           ceph_mdsmap_get_addr(newmap, i),
3092                           sizeof(struct ceph_entity_addr))) {
3093                        if (s->s_state == CEPH_MDS_SESSION_OPENING) {
3094                                /* the session never opened, just close it
3095                                 * out now */
3096                                __wake_requests(mdsc, &s->s_waiting);
3097                                __unregister_session(mdsc, s);
3098                        } else {
3099                                /* just close it */
3100                                mutex_unlock(&mdsc->mutex);
3101                                mutex_lock(&s->s_mutex);
3102                                mutex_lock(&mdsc->mutex);
3103                                ceph_con_close(&s->s_con);
3104                                mutex_unlock(&s->s_mutex);
3105                                s->s_state = CEPH_MDS_SESSION_RESTARTING;
3106                        }
3107                } else if (oldstate == newstate) {
3108                        continue;  /* nothing new with this mds */
3109                }
3110
3111                /*
3112                 * send reconnect?
3113                 */
3114                if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
3115                    newstate >= CEPH_MDS_STATE_RECONNECT) {
3116                        mutex_unlock(&mdsc->mutex);
3117                        send_mds_reconnect(mdsc, s);
3118                        mutex_lock(&mdsc->mutex);
3119                }
3120
3121                /*
3122                 * kick request on any mds that has gone active.
3123                 */
3124                if (oldstate < CEPH_MDS_STATE_ACTIVE &&
3125                    newstate >= CEPH_MDS_STATE_ACTIVE) {
3126                        if (oldstate != CEPH_MDS_STATE_CREATING &&
3127                            oldstate != CEPH_MDS_STATE_STARTING)
3128                                pr_info("mds%d recovery completed\n", s->s_mds);
3129                        kick_requests(mdsc, i);
3130                        ceph_kick_flushing_caps(mdsc, s);
3131                        wake_up_session_caps(s, 1);
3132                }
3133        }
3134
3135        for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
3136                s = mdsc->sessions[i];
3137                if (!s)
3138                        continue;
3139                if (!ceph_mdsmap_is_laggy(newmap, i))
3140                        continue;
3141                if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3142                    s->s_state == CEPH_MDS_SESSION_HUNG ||
3143                    s->s_state == CEPH_MDS_SESSION_CLOSING) {
3144                        dout(" connecting to export targets of laggy mds%d\n",
3145                             i);
3146                        __open_export_target_sessions(mdsc, s);
3147                }
3148        }
3149}
3150
3151
3152
3153/*
3154 * leases
3155 */
3156
3157/*
3158 * caller must hold session s_mutex, dentry->d_lock
3159 */
3160void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
3161{
3162        struct ceph_dentry_info *di = ceph_dentry(dentry);
3163
3164        ceph_put_mds_session(di->lease_session);
3165        di->lease_session = NULL;
3166}
3167
3168static void handle_lease(struct ceph_mds_client *mdsc,
3169                         struct ceph_mds_session *session,
3170                         struct ceph_msg *msg)
3171{
3172        struct super_block *sb = mdsc->fsc->sb;
3173        struct inode *inode;
3174        struct dentry *parent, *dentry;
3175        struct ceph_dentry_info *di;
3176        int mds = session->s_mds;
3177        struct ceph_mds_lease *h = msg->front.iov_base;
3178        u32 seq;
3179        struct ceph_vino vino;
3180        struct qstr dname;
3181        int release = 0;
3182
3183        dout("handle_lease from mds%d\n", mds);
3184
3185        /* decode */
3186        if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
3187                goto bad;
3188        vino.ino = le64_to_cpu(h->ino);
3189        vino.snap = CEPH_NOSNAP;
3190        seq = le32_to_cpu(h->seq);
3191        dname.name = (void *)h + sizeof(*h) + sizeof(u32);
3192        dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
3193        if (dname.len != get_unaligned_le32(h+1))
3194                goto bad;
3195
3196        /* lookup inode */
3197        inode = ceph_find_inode(sb, vino);
3198        dout("handle_lease %s, ino %llx %p %.*s\n",
3199             ceph_lease_op_name(h->action), vino.ino, inode,
3200             dname.len, dname.name);
3201
3202        mutex_lock(&session->s_mutex);
3203        session->s_seq++;
3204
3205        if (inode == NULL) {
3206                dout("handle_lease no inode %llx\n", vino.ino);
3207                goto release;
3208        }
3209
3210        /* dentry */
3211        parent = d_find_alias(inode);
3212        if (!parent) {
3213                dout("no parent dentry on inode %p\n", inode);
3214                WARN_ON(1);
3215                goto release;  /* hrm... */
3216        }
3217        dname.hash = full_name_hash(parent, dname.name, dname.len);
3218        dentry = d_lookup(parent, &dname);
3219        dput(parent);
3220        if (!dentry)
3221                goto release;
3222
3223        spin_lock(&dentry->d_lock);
3224        di = ceph_dentry(dentry);
3225        switch (h->action) {
3226        case CEPH_MDS_LEASE_REVOKE:
3227                if (di->lease_session == session) {
3228                        if (ceph_seq_cmp(di->lease_seq, seq) > 0)
3229                                h->seq = cpu_to_le32(di->lease_seq);
3230                        __ceph_mdsc_drop_dentry_lease(dentry);
3231                }
3232                release = 1;
3233                break;
3234
3235        case CEPH_MDS_LEASE_RENEW:
3236                if (di->lease_session == session &&
3237                    di->lease_gen == session->s_cap_gen &&
3238                    di->lease_renew_from &&
3239                    di->lease_renew_after == 0) {
3240                        unsigned long duration =
3241                                msecs_to_jiffies(le32_to_cpu(h->duration_ms));
3242
3243                        di->lease_seq = seq;
3244                        di->time = di->lease_renew_from + duration;
3245                        di->lease_renew_after = di->lease_renew_from +
3246                                (duration >> 1);
3247                        di->lease_renew_from = 0;
3248                }
3249                break;
3250        }
3251        spin_unlock(&dentry->d_lock);
3252        dput(dentry);
3253
3254        if (!release)
3255                goto out;
3256
3257release:
3258        /* let's just reuse the same message */
3259        h->action = CEPH_MDS_LEASE_REVOKE_ACK;
3260        ceph_msg_get(msg);
3261        ceph_con_send(&session->s_con, msg);
3262
3263out:
3264        iput(inode);
3265        mutex_unlock(&session->s_mutex);
3266        return;
3267
3268bad:
3269        pr_err("corrupt lease message\n");
3270        ceph_msg_dump(msg);
3271}
3272
3273void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
3274                              struct inode *inode,
3275                              struct dentry *dentry, char action,
3276                              u32 seq)
3277{
3278        struct ceph_msg *msg;
3279        struct ceph_mds_lease *lease;
3280        int len = sizeof(*lease) + sizeof(u32);
3281        int dnamelen = 0;
3282
3283        dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3284             inode, dentry, ceph_lease_op_name(action), session->s_mds);
3285        dnamelen = dentry->d_name.len;
3286        len += dnamelen;
3287
3288        msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
3289        if (!msg)
3290                return;
3291        lease = msg->front.iov_base;
3292        lease->action = action;
3293        lease->ino = cpu_to_le64(ceph_vino(inode).ino);
3294        lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
3295        lease->seq = cpu_to_le32(seq);
3296        put_unaligned_le32(dnamelen, lease + 1);
3297        memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
3298
3299        /*
3300         * if this is a preemptive lease RELEASE, no need to
3301         * flush request stream, since the actual request will
3302         * soon follow.
3303         */
3304        msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
3305
3306        ceph_con_send(&session->s_con, msg);
3307}
3308
3309/*
3310 * drop all leases (and dentry refs) in preparation for umount
3311 */
3312static void drop_leases(struct ceph_mds_client *mdsc)
3313{
3314        int i;
3315
3316        dout("drop_leases\n");
3317        mutex_lock(&mdsc->mutex);
3318        for (i = 0; i < mdsc->max_sessions; i++) {
3319                struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3320                if (!s)
3321                        continue;
3322                mutex_unlock(&mdsc->mutex);
3323                mutex_lock(&s->s_mutex);
3324                mutex_unlock(&s->s_mutex);
3325                ceph_put_mds_session(s);
3326                mutex_lock(&mdsc->mutex);
3327        }
3328        mutex_unlock(&mdsc->mutex);
3329}
3330
3331
3332
3333/*
3334 * delayed work -- periodically trim expired leases, renew caps with mds
3335 */
3336static void schedule_delayed(struct ceph_mds_client *mdsc)
3337{
3338        int delay = 5;
3339        unsigned hz = round_jiffies_relative(HZ * delay);
3340        schedule_delayed_work(&mdsc->delayed_work, hz);
3341}
3342
3343static void delayed_work(struct work_struct *work)
3344{
3345        int i;
3346        struct ceph_mds_client *mdsc =
3347                container_of(work, struct ceph_mds_client, delayed_work.work);
3348        int renew_interval;
3349        int renew_caps;
3350
3351        dout("mdsc delayed_work\n");
3352        ceph_check_delayed_caps(mdsc);
3353
3354        mutex_lock(&mdsc->mutex);
3355        renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
3356        renew_caps = time_after_eq(jiffies, HZ*renew_interval +
3357                                   mdsc->last_renew_caps);
3358        if (renew_caps)
3359                mdsc->last_renew_caps = jiffies;
3360
3361        for (i = 0; i < mdsc->max_sessions; i++) {
3362                struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3363                if (s == NULL)
3364                        continue;
3365                if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
3366                        dout("resending session close request for mds%d\n",
3367                             s->s_mds);
3368                        request_close_session(mdsc, s);
3369                        ceph_put_mds_session(s);
3370                        continue;
3371                }
3372                if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
3373                        if (s->s_state == CEPH_MDS_SESSION_OPEN) {
3374                                s->s_state = CEPH_MDS_SESSION_HUNG;
3375                                pr_info("mds%d hung\n", s->s_mds);
3376                        }
3377                }
3378                if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3379                        /* this mds is failed or recovering, just wait */
3380                        ceph_put_mds_session(s);
3381                        continue;
3382                }
3383                mutex_unlock(&mdsc->mutex);
3384
3385                mutex_lock(&s->s_mutex);
3386                if (renew_caps)
3387                        send_renew_caps(mdsc, s);
3388                else
3389                        ceph_con_keepalive(&s->s_con);
3390                if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3391                    s->s_state == CEPH_MDS_SESSION_HUNG)
3392                        ceph_send_cap_releases(mdsc, s);
3393                mutex_unlock(&s->s_mutex);
3394                ceph_put_mds_session(s);
3395
3396                mutex_lock(&mdsc->mutex);
3397        }
3398        mutex_unlock(&mdsc->mutex);
3399
3400        schedule_delayed(mdsc);
3401}
3402
3403int ceph_mdsc_init(struct ceph_fs_client *fsc)
3404
3405{
3406        struct ceph_mds_client *mdsc;
3407
3408        mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
3409        if (!mdsc)
3410                return -ENOMEM;
3411        mdsc->fsc = fsc;
3412        fsc->mdsc = mdsc;
3413        mutex_init(&mdsc->mutex);
3414        mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
3415        if (mdsc->mdsmap == NULL) {
3416                kfree(mdsc);
3417                return -ENOMEM;
3418        }
3419
3420        init_completion(&mdsc->safe_umount_waiters);
3421        init_waitqueue_head(&mdsc->session_close_wq);
3422        INIT_LIST_HEAD(&mdsc->waiting_for_map);
3423        mdsc->sessions = NULL;
3424        atomic_set(&mdsc->num_sessions, 0);
3425        mdsc->max_sessions = 0;
3426        mdsc->stopping = 0;
3427        mdsc->last_snap_seq = 0;
3428        init_rwsem(&mdsc->snap_rwsem);
3429        mdsc->snap_realms = RB_ROOT;
3430        INIT_LIST_HEAD(&mdsc->snap_empty);
3431        spin_lock_init(&mdsc->snap_empty_lock);
3432        mdsc->last_tid = 0;
3433        mdsc->oldest_tid = 0;
3434        mdsc->request_tree = RB_ROOT;
3435        INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
3436        mdsc->last_renew_caps = jiffies;
3437        INIT_LIST_HEAD(&mdsc->cap_delay_list);
3438        spin_lock_init(&mdsc->cap_delay_lock);
3439        INIT_LIST_HEAD(&mdsc->snap_flush_list);
3440        spin_lock_init(&mdsc->snap_flush_lock);
3441        mdsc->last_cap_flush_tid = 1;
3442        INIT_LIST_HEAD(&mdsc->cap_flush_list);
3443        INIT_LIST_HEAD(&mdsc->cap_dirty);
3444        INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3445        mdsc->num_cap_flushing = 0;
3446        spin_lock_init(&mdsc->cap_dirty_lock);
3447        init_waitqueue_head(&mdsc->cap_flushing_wq);
3448        spin_lock_init(&mdsc->dentry_lru_lock);
3449        INIT_LIST_HEAD(&mdsc->dentry_lru);
3450
3451        ceph_caps_init(mdsc);
3452        ceph_adjust_min_caps(mdsc, fsc->min_caps);
3453
3454        init_rwsem(&mdsc->pool_perm_rwsem);
3455        mdsc->pool_perm_tree = RB_ROOT;
3456
3457        return 0;
3458}
3459
3460/*
3461 * Wait for safe replies on open mds requests.  If we time out, drop
3462 * all requests from the tree to avoid dangling dentry refs.
3463 */
3464static void wait_requests(struct ceph_mds_client *mdsc)
3465{
3466        struct ceph_options *opts = mdsc->fsc->client->options;
3467        struct ceph_mds_request *req;
3468
3469        mutex_lock(&mdsc->mutex);
3470        if (__get_oldest_req(mdsc)) {
3471                mutex_unlock(&mdsc->mutex);
3472
3473                dout("wait_requests waiting for requests\n");
3474                wait_for_completion_timeout(&mdsc->safe_umount_waiters,
3475                                    ceph_timeout_jiffies(opts->mount_timeout));
3476
3477                /* tear down remaining requests */
3478                mutex_lock(&mdsc->mutex);
3479                while ((req = __get_oldest_req(mdsc))) {
3480                        dout("wait_requests timed out on tid %llu\n",
3481                             req->r_tid);
3482                        __unregister_request(mdsc, req);
3483                }
3484        }
3485        mutex_unlock(&mdsc->mutex);
3486        dout("wait_requests done\n");
3487}
3488
3489/*
3490 * called before mount is ro, and before dentries are torn down.
3491 * (hmm, does this still race with new lookups?)
3492 */
3493void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
3494{
3495        dout("pre_umount\n");
3496        mdsc->stopping = 1;
3497
3498        drop_leases(mdsc);
3499        ceph_flush_dirty_caps(mdsc);
3500        wait_requests(mdsc);
3501
3502        /*
3503         * wait for reply handlers to drop their request refs and
3504         * their inode/dcache refs
3505         */
3506        ceph_msgr_flush();
3507}
3508
3509/*
3510 * wait for all write mds requests to flush.
3511 */
3512static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3513{
3514        struct ceph_mds_request *req = NULL, *nextreq;
3515        struct rb_node *n;
3516
3517        mutex_lock(&mdsc->mutex);
3518        dout("wait_unsafe_requests want %lld\n", want_tid);
3519restart:
3520        req = __get_oldest_req(mdsc);
3521        while (req && req->r_tid <= want_tid) {
3522                /* find next request */
3523                n = rb_next(&req->r_node);
3524                if (n)
3525                        nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3526                else
3527                        nextreq = NULL;
3528                if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
3529                    (req->r_op & CEPH_MDS_OP_WRITE)) {
3530                        /* write op */
3531                        ceph_mdsc_get_request(req);
3532                        if (nextreq)
3533                                ceph_mdsc_get_request(nextreq);
3534                        mutex_unlock(&mdsc->mutex);
3535                        dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
3536                             req->r_tid, want_tid);
3537                        wait_for_completion(&req->r_safe_completion);
3538                        mutex_lock(&mdsc->mutex);
3539                        ceph_mdsc_put_request(req);
3540                        if (!nextreq)
3541                                break;  /* next dne before, so we're done! */
3542                        if (RB_EMPTY_NODE(&nextreq->r_node)) {
3543                                /* next request was removed from tree */
3544                                ceph_mdsc_put_request(nextreq);
3545                                goto restart;
3546                        }
3547                        ceph_mdsc_put_request(nextreq);  /* won't go away */
3548                }
3549                req = nextreq;
3550        }
3551        mutex_unlock(&mdsc->mutex);
3552        dout("wait_unsafe_requests done\n");
3553}
3554
3555void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3556{
3557        u64 want_tid, want_flush;
3558
3559        if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3560                return;
3561
3562        dout("sync\n");
3563        mutex_lock(&mdsc->mutex);
3564        want_tid = mdsc->last_tid;
3565        mutex_unlock(&mdsc->mutex);
3566
3567        ceph_flush_dirty_caps(mdsc);
3568        spin_lock(&mdsc->cap_dirty_lock);
3569        want_flush = mdsc->last_cap_flush_tid;
3570        if (!list_empty(&mdsc->cap_flush_list)) {
3571                struct ceph_cap_flush *cf =
3572                        list_last_entry(&mdsc->cap_flush_list,
3573                                        struct ceph_cap_flush, g_list);
3574                cf->wake = true;
3575        }
3576        spin_unlock(&mdsc->cap_dirty_lock);
3577
3578        dout("sync want tid %lld flush_seq %lld\n",
3579             want_tid, want_flush);
3580
3581        wait_unsafe_requests(mdsc, want_tid);
3582        wait_caps_flush(mdsc, want_flush);
3583}
3584
3585/*
3586 * true if all sessions are closed, or we force unmount
3587 */
3588static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
3589{
3590        if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3591                return true;
3592        return atomic_read(&mdsc->num_sessions) <= skipped;
3593}
3594
3595/*
3596 * called after sb is ro.
3597 */
3598void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3599{
3600        struct ceph_options *opts = mdsc->fsc->client->options;
3601        struct ceph_mds_session *session;
3602        int i;
3603        int skipped = 0;
3604
3605        dout("close_sessions\n");
3606
3607        /* close sessions */
3608        mutex_lock(&mdsc->mutex);
3609        for (i = 0; i < mdsc->max_sessions; i++) {
3610                session = __ceph_lookup_mds_session(mdsc, i);
3611                if (!session)
3612                        continue;
3613                mutex_unlock(&mdsc->mutex);
3614                mutex_lock(&session->s_mutex);
3615                if (__close_session(mdsc, session) <= 0)
3616                        skipped++;
3617                mutex_unlock(&session->s_mutex);
3618                ceph_put_mds_session(session);
3619                mutex_lock(&mdsc->mutex);
3620        }
3621        mutex_unlock(&mdsc->mutex);
3622
3623        dout("waiting for sessions to close\n");
3624        wait_event_timeout(mdsc->session_close_wq,
3625                           done_closing_sessions(mdsc, skipped),
3626                           ceph_timeout_jiffies(opts->mount_timeout));
3627
3628        /* tear down remaining sessions */
3629        mutex_lock(&mdsc->mutex);
3630        for (i = 0; i < mdsc->max_sessions; i++) {
3631                if (mdsc->sessions[i]) {
3632                        session = get_session(mdsc->sessions[i]);
3633                        __unregister_session(mdsc, session);
3634                        mutex_unlock(&mdsc->mutex);
3635                        mutex_lock(&session->s_mutex);
3636                        remove_session_caps(session);
3637                        mutex_unlock(&session->s_mutex);
3638                        ceph_put_mds_session(session);
3639                        mutex_lock(&mdsc->mutex);
3640                }
3641        }
3642        WARN_ON(!list_empty(&mdsc->cap_delay_list));
3643        mutex_unlock(&mdsc->mutex);
3644
3645        ceph_cleanup_empty_realms(mdsc);
3646
3647        cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3648
3649        dout("stopped\n");
3650}
3651
3652void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
3653{
3654        struct ceph_mds_session *session;
3655        int mds;
3656
3657        dout("force umount\n");
3658
3659        mutex_lock(&mdsc->mutex);
3660        for (mds = 0; mds < mdsc->max_sessions; mds++) {
3661                session = __ceph_lookup_mds_session(mdsc, mds);
3662                if (!session)
3663                        continue;
3664                mutex_unlock(&mdsc->mutex);
3665                mutex_lock(&session->s_mutex);
3666                __close_session(mdsc, session);
3667                if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
3668                        cleanup_session_requests(mdsc, session);
3669                        remove_session_caps(session);
3670                }
3671                mutex_unlock(&session->s_mutex);
3672                ceph_put_mds_session(session);
3673                mutex_lock(&mdsc->mutex);
3674                kick_requests(mdsc, mds);
3675        }
3676        __wake_requests(mdsc, &mdsc->waiting_for_map);
3677        mutex_unlock(&mdsc->mutex);
3678}
3679
3680static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3681{
3682        dout("stop\n");
3683        cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3684        if (mdsc->mdsmap)
3685                ceph_mdsmap_destroy(mdsc->mdsmap);
3686        kfree(mdsc->sessions);
3687        ceph_caps_finalize(mdsc);
3688        ceph_pool_perm_destroy(mdsc);
3689}
3690
3691void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3692{
3693        struct ceph_mds_client *mdsc = fsc->mdsc;
3694
3695        dout("mdsc_destroy %p\n", mdsc);
3696        ceph_mdsc_stop(mdsc);
3697
3698        /* flush out any connection work with references to us */
3699        ceph_msgr_flush();
3700
3701        fsc->mdsc = NULL;
3702        kfree(mdsc);
3703        dout("mdsc_destroy %p done\n", mdsc);
3704}
3705
3706void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3707{
3708        struct ceph_fs_client *fsc = mdsc->fsc;
3709        const char *mds_namespace = fsc->mount_options->mds_namespace;
3710        void *p = msg->front.iov_base;
3711        void *end = p + msg->front.iov_len;
3712        u32 epoch;
3713        u32 map_len;
3714        u32 num_fs;
3715        u32 mount_fscid = (u32)-1;
3716        u8 struct_v, struct_cv;
3717        int err = -EINVAL;
3718
3719        ceph_decode_need(&p, end, sizeof(u32), bad);
3720        epoch = ceph_decode_32(&p);
3721
3722        dout("handle_fsmap epoch %u\n", epoch);
3723
3724        ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3725        struct_v = ceph_decode_8(&p);
3726        struct_cv = ceph_decode_8(&p);
3727        map_len = ceph_decode_32(&p);
3728
3729        ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
3730        p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
3731
3732        num_fs = ceph_decode_32(&p);
3733        while (num_fs-- > 0) {
3734                void *info_p, *info_end;
3735                u32 info_len;
3736                u8 info_v, info_cv;
3737                u32 fscid, namelen;
3738
3739                ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3740                info_v = ceph_decode_8(&p);
3741                info_cv = ceph_decode_8(&p);
3742                info_len = ceph_decode_32(&p);
3743                ceph_decode_need(&p, end, info_len, bad);
3744                info_p = p;
3745                info_end = p + info_len;
3746                p = info_end;
3747
3748                ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
3749                fscid = ceph_decode_32(&info_p);
3750                namelen = ceph_decode_32(&info_p);
3751                ceph_decode_need(&info_p, info_end, namelen, bad);
3752
3753                if (mds_namespace &&
3754                    strlen(mds_namespace) == namelen &&
3755                    !strncmp(mds_namespace, (char *)info_p, namelen)) {
3756                        mount_fscid = fscid;
3757                        break;
3758                }
3759        }
3760
3761        ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
3762        if (mount_fscid != (u32)-1) {
3763                fsc->client->monc.fs_cluster_id = mount_fscid;
3764                ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
3765                                   0, true);
3766                ceph_monc_renew_subs(&fsc->client->monc);
3767        } else {
3768                err = -ENOENT;
3769                goto err_out;
3770        }
3771        return;
3772bad:
3773        pr_err("error decoding fsmap\n");
3774err_out:
3775        mutex_lock(&mdsc->mutex);
3776        mdsc->mdsmap_err = -ENOENT;
3777        __wake_requests(mdsc, &mdsc->waiting_for_map);
3778        mutex_unlock(&mdsc->mutex);
3779        return;
3780}
3781
3782/*
3783 * handle mds map update.
3784 */
3785void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3786{
3787        u32 epoch;
3788        u32 maplen;
3789        void *p = msg->front.iov_base;
3790        void *end = p + msg->front.iov_len;
3791        struct ceph_mdsmap *newmap, *oldmap;
3792        struct ceph_fsid fsid;
3793        int err = -EINVAL;
3794
3795        ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3796        ceph_decode_copy(&p, &fsid, sizeof(fsid));
3797        if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3798                return;
3799        epoch = ceph_decode_32(&p);
3800        maplen = ceph_decode_32(&p);
3801        dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3802
3803        /* do we need it? */
3804        mutex_lock(&mdsc->mutex);
3805        if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3806                dout("handle_map epoch %u <= our %u\n",
3807                     epoch, mdsc->mdsmap->m_epoch);
3808                mutex_unlock(&mdsc->mutex);
3809                return;
3810        }
3811
3812        newmap = ceph_mdsmap_decode(&p, end);
3813        if (IS_ERR(newmap)) {
3814                err = PTR_ERR(newmap);
3815                goto bad_unlock;
3816        }
3817
3818        /* swap into place */
3819        if (mdsc->mdsmap) {
3820                oldmap = mdsc->mdsmap;
3821                mdsc->mdsmap = newmap;
3822                check_new_map(mdsc, newmap, oldmap);
3823                ceph_mdsmap_destroy(oldmap);
3824        } else {
3825                mdsc->mdsmap = newmap;  /* first mds map */
3826        }
3827        mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3828
3829        __wake_requests(mdsc, &mdsc->waiting_for_map);
3830        ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
3831                          mdsc->mdsmap->m_epoch);
3832
3833        mutex_unlock(&mdsc->mutex);
3834        schedule_delayed(mdsc);
3835        return;
3836
3837bad_unlock:
3838        mutex_unlock(&mdsc->mutex);
3839bad:
3840        pr_err("error decoding mdsmap %d\n", err);
3841        return;
3842}
3843
3844static struct ceph_connection *con_get(struct ceph_connection *con)
3845{
3846        struct ceph_mds_session *s = con->private;
3847
3848        if (get_session(s)) {
3849                dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
3850                return con;
3851        }
3852        dout("mdsc con_get %p FAIL\n", s);
3853        return NULL;
3854}
3855
3856static void con_put(struct ceph_connection *con)
3857{
3858        struct ceph_mds_session *s = con->private;
3859
3860        dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
3861        ceph_put_mds_session(s);
3862}
3863
3864/*
3865 * if the client is unresponsive for long enough, the mds will kill
3866 * the session entirely.
3867 */
3868static void peer_reset(struct ceph_connection *con)
3869{
3870        struct ceph_mds_session *s = con->private;
3871        struct ceph_mds_client *mdsc = s->s_mdsc;
3872
3873        pr_warn("mds%d closed our session\n", s->s_mds);
3874        send_mds_reconnect(mdsc, s);
3875}
3876
3877static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3878{
3879        struct ceph_mds_session *s = con->private;
3880        struct ceph_mds_client *mdsc = s->s_mdsc;
3881        int type = le16_to_cpu(msg->hdr.type);
3882
3883        mutex_lock(&mdsc->mutex);
3884        if (__verify_registered_session(mdsc, s) < 0) {
3885                mutex_unlock(&mdsc->mutex);
3886                goto out;
3887        }
3888        mutex_unlock(&mdsc->mutex);
3889
3890        switch (type) {
3891        case CEPH_MSG_MDS_MAP:
3892                ceph_mdsc_handle_mdsmap(mdsc, msg);
3893                break;
3894        case CEPH_MSG_FS_MAP_USER:
3895                ceph_mdsc_handle_fsmap(mdsc, msg);
3896                break;
3897        case CEPH_MSG_CLIENT_SESSION:
3898                handle_session(s, msg);
3899                break;
3900        case CEPH_MSG_CLIENT_REPLY:
3901                handle_reply(s, msg);
3902                break;
3903        case CEPH_MSG_CLIENT_REQUEST_FORWARD:
3904                handle_forward(mdsc, s, msg);
3905                break;
3906        case CEPH_MSG_CLIENT_CAPS:
3907                ceph_handle_caps(s, msg);
3908                break;
3909        case CEPH_MSG_CLIENT_SNAP:
3910                ceph_handle_snap(mdsc, s, msg);
3911                break;
3912        case CEPH_MSG_CLIENT_LEASE:
3913                handle_lease(mdsc, s, msg);
3914                break;
3915
3916        default:
3917                pr_err("received unknown message type %d %s\n", type,
3918                       ceph_msg_type_name(type));
3919        }
3920out:
3921        ceph_msg_put(msg);
3922}
3923
3924/*
3925 * authentication
3926 */
3927
3928/*
3929 * Note: returned pointer is the address of a structure that's
3930 * managed separately.  Caller must *not* attempt to free it.
3931 */
3932static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3933                                        int *proto, int force_new)
3934{
3935        struct ceph_mds_session *s = con->private;
3936        struct ceph_mds_client *mdsc = s->s_mdsc;
3937        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3938        struct ceph_auth_handshake *auth = &s->s_auth;
3939
3940        if (force_new && auth->authorizer) {
3941                ceph_auth_destroy_authorizer(auth->authorizer);
3942                auth->authorizer = NULL;
3943        }
3944        if (!auth->authorizer) {
3945                int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3946                                                      auth);
3947                if (ret)
3948                        return ERR_PTR(ret);
3949        } else {
3950                int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3951                                                      auth);
3952                if (ret)
3953                        return ERR_PTR(ret);
3954        }
3955        *proto = ac->protocol;
3956
3957        return auth;
3958}
3959
3960
3961static int verify_authorizer_reply(struct ceph_connection *con)
3962{
3963        struct ceph_mds_session *s = con->private;
3964        struct ceph_mds_client *mdsc = s->s_mdsc;
3965        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3966
3967        return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
3968}
3969
3970static int invalidate_authorizer(struct ceph_connection *con)
3971{
3972        struct ceph_mds_session *s = con->private;
3973        struct ceph_mds_client *mdsc = s->s_mdsc;
3974        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3975
3976        ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3977
3978        return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3979}
3980
3981static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
3982                                struct ceph_msg_header *hdr, int *skip)
3983{
3984        struct ceph_msg *msg;
3985        int type = (int) le16_to_cpu(hdr->type);
3986        int front_len = (int) le32_to_cpu(hdr->front_len);
3987
3988        if (con->in_msg)
3989                return con->in_msg;
3990
3991        *skip = 0;
3992        msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
3993        if (!msg) {
3994                pr_err("unable to allocate msg type %d len %d\n",
3995                       type, front_len);
3996                return NULL;
3997        }
3998
3999        return msg;
4000}
4001
4002static int mds_sign_message(struct ceph_msg *msg)
4003{
4004       struct ceph_mds_session *s = msg->con->private;
4005       struct ceph_auth_handshake *auth = &s->s_auth;
4006
4007       return ceph_auth_sign_message(auth, msg);
4008}
4009
4010static int mds_check_message_signature(struct ceph_msg *msg)
4011{
4012       struct ceph_mds_session *s = msg->con->private;
4013       struct ceph_auth_handshake *auth = &s->s_auth;
4014
4015       return ceph_auth_check_message_signature(auth, msg);
4016}
4017
4018static const struct ceph_connection_operations mds_con_ops = {
4019        .get = con_get,
4020        .put = con_put,
4021        .dispatch = dispatch,
4022        .get_authorizer = get_authorizer,
4023        .verify_authorizer_reply = verify_authorizer_reply,
4024        .invalidate_authorizer = invalidate_authorizer,
4025        .peer_reset = peer_reset,
4026        .alloc_msg = mds_alloc_msg,
4027        .sign_message = mds_sign_message,
4028        .check_message_signature = mds_check_message_signature,
4029};
4030
4031/* eof */
4032