linux/fs/ceph/mds_client.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3
   4#include <linux/fs.h>
   5#include <linux/wait.h>
   6#include <linux/slab.h>
   7#include <linux/gfp.h>
   8#include <linux/sched.h>
   9#include <linux/debugfs.h>
  10#include <linux/seq_file.h>
  11#include <linux/ratelimit.h>
  12#include <linux/bits.h>
  13#include <linux/ktime.h>
  14#include <linux/bitmap.h>
  15
  16#include "super.h"
  17#include "mds_client.h"
  18
  19#include <linux/ceph/ceph_features.h>
  20#include <linux/ceph/messenger.h>
  21#include <linux/ceph/decode.h>
  22#include <linux/ceph/pagelist.h>
  23#include <linux/ceph/auth.h>
  24#include <linux/ceph/debugfs.h>
  25
  26#define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
  27
  28/*
  29 * A cluster of MDS (metadata server) daemons is responsible for
  30 * managing the file system namespace (the directory hierarchy and
  31 * inodes) and for coordinating shared access to storage.  Metadata is
  32 * partitioning hierarchically across a number of servers, and that
  33 * partition varies over time as the cluster adjusts the distribution
  34 * in order to balance load.
  35 *
  36 * The MDS client is primarily responsible to managing synchronous
  37 * metadata requests for operations like open, unlink, and so forth.
  38 * If there is a MDS failure, we find out about it when we (possibly
  39 * request and) receive a new MDS map, and can resubmit affected
  40 * requests.
  41 *
  42 * For the most part, though, we take advantage of a lossless
  43 * communications channel to the MDS, and do not need to worry about
  44 * timing out or resubmitting requests.
  45 *
  46 * We maintain a stateful "session" with each MDS we interact with.
  47 * Within each session, we sent periodic heartbeat messages to ensure
  48 * any capabilities or leases we have been issues remain valid.  If
  49 * the session times out and goes stale, our leases and capabilities
  50 * are no longer valid.
  51 */
  52
  53struct ceph_reconnect_state {
  54        struct ceph_mds_session *session;
  55        int nr_caps, nr_realms;
  56        struct ceph_pagelist *pagelist;
  57        unsigned msg_version;
  58        bool allow_multi;
  59};
  60
  61static void __wake_requests(struct ceph_mds_client *mdsc,
  62                            struct list_head *head);
  63static void ceph_cap_release_work(struct work_struct *work);
  64static void ceph_cap_reclaim_work(struct work_struct *work);
  65
  66static const struct ceph_connection_operations mds_con_ops;
  67
  68
  69/*
  70 * mds reply parsing
  71 */
  72
  73static int parse_reply_info_quota(void **p, void *end,
  74                                  struct ceph_mds_reply_info_in *info)
  75{
  76        u8 struct_v, struct_compat;
  77        u32 struct_len;
  78
  79        ceph_decode_8_safe(p, end, struct_v, bad);
  80        ceph_decode_8_safe(p, end, struct_compat, bad);
  81        /* struct_v is expected to be >= 1. we only
  82         * understand encoding with struct_compat == 1. */
  83        if (!struct_v || struct_compat != 1)
  84                goto bad;
  85        ceph_decode_32_safe(p, end, struct_len, bad);
  86        ceph_decode_need(p, end, struct_len, bad);
  87        end = *p + struct_len;
  88        ceph_decode_64_safe(p, end, info->max_bytes, bad);
  89        ceph_decode_64_safe(p, end, info->max_files, bad);
  90        *p = end;
  91        return 0;
  92bad:
  93        return -EIO;
  94}
  95
  96/*
  97 * parse individual inode info
  98 */
  99static int parse_reply_info_in(void **p, void *end,
 100                               struct ceph_mds_reply_info_in *info,
 101                               u64 features)
 102{
 103        int err = 0;
 104        u8 struct_v = 0;
 105
 106        if (features == (u64)-1) {
 107                u32 struct_len;
 108                u8 struct_compat;
 109                ceph_decode_8_safe(p, end, struct_v, bad);
 110                ceph_decode_8_safe(p, end, struct_compat, bad);
 111                /* struct_v is expected to be >= 1. we only understand
 112                 * encoding with struct_compat == 1. */
 113                if (!struct_v || struct_compat != 1)
 114                        goto bad;
 115                ceph_decode_32_safe(p, end, struct_len, bad);
 116                ceph_decode_need(p, end, struct_len, bad);
 117                end = *p + struct_len;
 118        }
 119
 120        ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
 121        info->in = *p;
 122        *p += sizeof(struct ceph_mds_reply_inode) +
 123                sizeof(*info->in->fragtree.splits) *
 124                le32_to_cpu(info->in->fragtree.nsplits);
 125
 126        ceph_decode_32_safe(p, end, info->symlink_len, bad);
 127        ceph_decode_need(p, end, info->symlink_len, bad);
 128        info->symlink = *p;
 129        *p += info->symlink_len;
 130
 131        ceph_decode_copy_safe(p, end, &info->dir_layout,
 132                              sizeof(info->dir_layout), bad);
 133        ceph_decode_32_safe(p, end, info->xattr_len, bad);
 134        ceph_decode_need(p, end, info->xattr_len, bad);
 135        info->xattr_data = *p;
 136        *p += info->xattr_len;
 137
 138        if (features == (u64)-1) {
 139                /* inline data */
 140                ceph_decode_64_safe(p, end, info->inline_version, bad);
 141                ceph_decode_32_safe(p, end, info->inline_len, bad);
 142                ceph_decode_need(p, end, info->inline_len, bad);
 143                info->inline_data = *p;
 144                *p += info->inline_len;
 145                /* quota */
 146                err = parse_reply_info_quota(p, end, info);
 147                if (err < 0)
 148                        goto out_bad;
 149                /* pool namespace */
 150                ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
 151                if (info->pool_ns_len > 0) {
 152                        ceph_decode_need(p, end, info->pool_ns_len, bad);
 153                        info->pool_ns_data = *p;
 154                        *p += info->pool_ns_len;
 155                }
 156
 157                /* btime */
 158                ceph_decode_need(p, end, sizeof(info->btime), bad);
 159                ceph_decode_copy(p, &info->btime, sizeof(info->btime));
 160
 161                /* change attribute */
 162                ceph_decode_64_safe(p, end, info->change_attr, bad);
 163
 164                /* dir pin */
 165                if (struct_v >= 2) {
 166                        ceph_decode_32_safe(p, end, info->dir_pin, bad);
 167                } else {
 168                        info->dir_pin = -ENODATA;
 169                }
 170
 171                /* snapshot birth time, remains zero for v<=2 */
 172                if (struct_v >= 3) {
 173                        ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
 174                        ceph_decode_copy(p, &info->snap_btime,
 175                                         sizeof(info->snap_btime));
 176                } else {
 177                        memset(&info->snap_btime, 0, sizeof(info->snap_btime));
 178                }
 179
 180                /* snapshot count, remains zero for v<=3 */
 181                if (struct_v >= 4) {
 182                        ceph_decode_64_safe(p, end, info->rsnaps, bad);
 183                } else {
 184                        info->rsnaps = 0;
 185                }
 186
 187                *p = end;
 188        } else {
 189                if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
 190                        ceph_decode_64_safe(p, end, info->inline_version, bad);
 191                        ceph_decode_32_safe(p, end, info->inline_len, bad);
 192                        ceph_decode_need(p, end, info->inline_len, bad);
 193                        info->inline_data = *p;
 194                        *p += info->inline_len;
 195                } else
 196                        info->inline_version = CEPH_INLINE_NONE;
 197
 198                if (features & CEPH_FEATURE_MDS_QUOTA) {
 199                        err = parse_reply_info_quota(p, end, info);
 200                        if (err < 0)
 201                                goto out_bad;
 202                } else {
 203                        info->max_bytes = 0;
 204                        info->max_files = 0;
 205                }
 206
 207                info->pool_ns_len = 0;
 208                info->pool_ns_data = NULL;
 209                if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
 210                        ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
 211                        if (info->pool_ns_len > 0) {
 212                                ceph_decode_need(p, end, info->pool_ns_len, bad);
 213                                info->pool_ns_data = *p;
 214                                *p += info->pool_ns_len;
 215                        }
 216                }
 217
 218                if (features & CEPH_FEATURE_FS_BTIME) {
 219                        ceph_decode_need(p, end, sizeof(info->btime), bad);
 220                        ceph_decode_copy(p, &info->btime, sizeof(info->btime));
 221                        ceph_decode_64_safe(p, end, info->change_attr, bad);
 222                }
 223
 224                info->dir_pin = -ENODATA;
 225                /* info->snap_btime and info->rsnaps remain zero */
 226        }
 227        return 0;
 228bad:
 229        err = -EIO;
 230out_bad:
 231        return err;
 232}
 233
 234static int parse_reply_info_dir(void **p, void *end,
 235                                struct ceph_mds_reply_dirfrag **dirfrag,
 236                                u64 features)
 237{
 238        if (features == (u64)-1) {
 239                u8 struct_v, struct_compat;
 240                u32 struct_len;
 241                ceph_decode_8_safe(p, end, struct_v, bad);
 242                ceph_decode_8_safe(p, end, struct_compat, bad);
 243                /* struct_v is expected to be >= 1. we only understand
 244                 * encoding whose struct_compat == 1. */
 245                if (!struct_v || struct_compat != 1)
 246                        goto bad;
 247                ceph_decode_32_safe(p, end, struct_len, bad);
 248                ceph_decode_need(p, end, struct_len, bad);
 249                end = *p + struct_len;
 250        }
 251
 252        ceph_decode_need(p, end, sizeof(**dirfrag), bad);
 253        *dirfrag = *p;
 254        *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
 255        if (unlikely(*p > end))
 256                goto bad;
 257        if (features == (u64)-1)
 258                *p = end;
 259        return 0;
 260bad:
 261        return -EIO;
 262}
 263
 264static int parse_reply_info_lease(void **p, void *end,
 265                                  struct ceph_mds_reply_lease **lease,
 266                                  u64 features)
 267{
 268        if (features == (u64)-1) {
 269                u8 struct_v, struct_compat;
 270                u32 struct_len;
 271                ceph_decode_8_safe(p, end, struct_v, bad);
 272                ceph_decode_8_safe(p, end, struct_compat, bad);
 273                /* struct_v is expected to be >= 1. we only understand
 274                 * encoding whose struct_compat == 1. */
 275                if (!struct_v || struct_compat != 1)
 276                        goto bad;
 277                ceph_decode_32_safe(p, end, struct_len, bad);
 278                ceph_decode_need(p, end, struct_len, bad);
 279                end = *p + struct_len;
 280        }
 281
 282        ceph_decode_need(p, end, sizeof(**lease), bad);
 283        *lease = *p;
 284        *p += sizeof(**lease);
 285        if (features == (u64)-1)
 286                *p = end;
 287        return 0;
 288bad:
 289        return -EIO;
 290}
 291
 292/*
 293 * parse a normal reply, which may contain a (dir+)dentry and/or a
 294 * target inode.
 295 */
 296static int parse_reply_info_trace(void **p, void *end,
 297                                  struct ceph_mds_reply_info_parsed *info,
 298                                  u64 features)
 299{
 300        int err;
 301
 302        if (info->head->is_dentry) {
 303                err = parse_reply_info_in(p, end, &info->diri, features);
 304                if (err < 0)
 305                        goto out_bad;
 306
 307                err = parse_reply_info_dir(p, end, &info->dirfrag, features);
 308                if (err < 0)
 309                        goto out_bad;
 310
 311                ceph_decode_32_safe(p, end, info->dname_len, bad);
 312                ceph_decode_need(p, end, info->dname_len, bad);
 313                info->dname = *p;
 314                *p += info->dname_len;
 315
 316                err = parse_reply_info_lease(p, end, &info->dlease, features);
 317                if (err < 0)
 318                        goto out_bad;
 319        }
 320
 321        if (info->head->is_target) {
 322                err = parse_reply_info_in(p, end, &info->targeti, features);
 323                if (err < 0)
 324                        goto out_bad;
 325        }
 326
 327        if (unlikely(*p != end))
 328                goto bad;
 329        return 0;
 330
 331bad:
 332        err = -EIO;
 333out_bad:
 334        pr_err("problem parsing mds trace %d\n", err);
 335        return err;
 336}
 337
 338/*
 339 * parse readdir results
 340 */
 341static int parse_reply_info_readdir(void **p, void *end,
 342                                struct ceph_mds_reply_info_parsed *info,
 343                                u64 features)
 344{
 345        u32 num, i = 0;
 346        int err;
 347
 348        err = parse_reply_info_dir(p, end, &info->dir_dir, features);
 349        if (err < 0)
 350                goto out_bad;
 351
 352        ceph_decode_need(p, end, sizeof(num) + 2, bad);
 353        num = ceph_decode_32(p);
 354        {
 355                u16 flags = ceph_decode_16(p);
 356                info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
 357                info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
 358                info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
 359                info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
 360        }
 361        if (num == 0)
 362                goto done;
 363
 364        BUG_ON(!info->dir_entries);
 365        if ((unsigned long)(info->dir_entries + num) >
 366            (unsigned long)info->dir_entries + info->dir_buf_size) {
 367                pr_err("dir contents are larger than expected\n");
 368                WARN_ON(1);
 369                goto bad;
 370        }
 371
 372        info->dir_nr = num;
 373        while (num) {
 374                struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
 375                /* dentry */
 376                ceph_decode_32_safe(p, end, rde->name_len, bad);
 377                ceph_decode_need(p, end, rde->name_len, bad);
 378                rde->name = *p;
 379                *p += rde->name_len;
 380                dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
 381
 382                /* dentry lease */
 383                err = parse_reply_info_lease(p, end, &rde->lease, features);
 384                if (err)
 385                        goto out_bad;
 386                /* inode */
 387                err = parse_reply_info_in(p, end, &rde->inode, features);
 388                if (err < 0)
 389                        goto out_bad;
 390                /* ceph_readdir_prepopulate() will update it */
 391                rde->offset = 0;
 392                i++;
 393                num--;
 394        }
 395
 396done:
 397        /* Skip over any unrecognized fields */
 398        *p = end;
 399        return 0;
 400
 401bad:
 402        err = -EIO;
 403out_bad:
 404        pr_err("problem parsing dir contents %d\n", err);
 405        return err;
 406}
 407
 408/*
 409 * parse fcntl F_GETLK results
 410 */
 411static int parse_reply_info_filelock(void **p, void *end,
 412                                     struct ceph_mds_reply_info_parsed *info,
 413                                     u64 features)
 414{
 415        if (*p + sizeof(*info->filelock_reply) > end)
 416                goto bad;
 417
 418        info->filelock_reply = *p;
 419
 420        /* Skip over any unrecognized fields */
 421        *p = end;
 422        return 0;
 423bad:
 424        return -EIO;
 425}
 426
 427
 428#if BITS_PER_LONG == 64
 429
 430#define DELEGATED_INO_AVAILABLE         xa_mk_value(1)
 431
 432static int ceph_parse_deleg_inos(void **p, void *end,
 433                                 struct ceph_mds_session *s)
 434{
 435        u32 sets;
 436
 437        ceph_decode_32_safe(p, end, sets, bad);
 438        dout("got %u sets of delegated inodes\n", sets);
 439        while (sets--) {
 440                u64 start, len, ino;
 441
 442                ceph_decode_64_safe(p, end, start, bad);
 443                ceph_decode_64_safe(p, end, len, bad);
 444
 445                /* Don't accept a delegation of system inodes */
 446                if (start < CEPH_INO_SYSTEM_BASE) {
 447                        pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
 448                                        start, len);
 449                        continue;
 450                }
 451                while (len--) {
 452                        int err = xa_insert(&s->s_delegated_inos, ino = start++,
 453                                            DELEGATED_INO_AVAILABLE,
 454                                            GFP_KERNEL);
 455                        if (!err) {
 456                                dout("added delegated inode 0x%llx\n",
 457                                     start - 1);
 458                        } else if (err == -EBUSY) {
 459                                pr_warn("ceph: MDS delegated inode 0x%llx more than once.\n",
 460                                        start - 1);
 461                        } else {
 462                                return err;
 463                        }
 464                }
 465        }
 466        return 0;
 467bad:
 468        return -EIO;
 469}
 470
 471u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
 472{
 473        unsigned long ino;
 474        void *val;
 475
 476        xa_for_each(&s->s_delegated_inos, ino, val) {
 477                val = xa_erase(&s->s_delegated_inos, ino);
 478                if (val == DELEGATED_INO_AVAILABLE)
 479                        return ino;
 480        }
 481        return 0;
 482}
 483
 484int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
 485{
 486        return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
 487                         GFP_KERNEL);
 488}
 489#else /* BITS_PER_LONG == 64 */
 490/*
 491 * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
 492 * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
 493 * and bottom words?
 494 */
 495static int ceph_parse_deleg_inos(void **p, void *end,
 496                                 struct ceph_mds_session *s)
 497{
 498        u32 sets;
 499
 500        ceph_decode_32_safe(p, end, sets, bad);
 501        if (sets)
 502                ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
 503        return 0;
 504bad:
 505        return -EIO;
 506}
 507
 508u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
 509{
 510        return 0;
 511}
 512
 513int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
 514{
 515        return 0;
 516}
 517#endif /* BITS_PER_LONG == 64 */
 518
 519/*
 520 * parse create results
 521 */
 522static int parse_reply_info_create(void **p, void *end,
 523                                  struct ceph_mds_reply_info_parsed *info,
 524                                  u64 features, struct ceph_mds_session *s)
 525{
 526        int ret;
 527
 528        if (features == (u64)-1 ||
 529            (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
 530                if (*p == end) {
 531                        /* Malformed reply? */
 532                        info->has_create_ino = false;
 533                } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
 534                        info->has_create_ino = true;
 535                        /* struct_v, struct_compat, and len */
 536                        ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad);
 537                        ceph_decode_64_safe(p, end, info->ino, bad);
 538                        ret = ceph_parse_deleg_inos(p, end, s);
 539                        if (ret)
 540                                return ret;
 541                } else {
 542                        /* legacy */
 543                        ceph_decode_64_safe(p, end, info->ino, bad);
 544                        info->has_create_ino = true;
 545                }
 546        } else {
 547                if (*p != end)
 548                        goto bad;
 549        }
 550
 551        /* Skip over any unrecognized fields */
 552        *p = end;
 553        return 0;
 554bad:
 555        return -EIO;
 556}
 557
 558/*
 559 * parse extra results
 560 */
 561static int parse_reply_info_extra(void **p, void *end,
 562                                  struct ceph_mds_reply_info_parsed *info,
 563                                  u64 features, struct ceph_mds_session *s)
 564{
 565        u32 op = le32_to_cpu(info->head->op);
 566
 567        if (op == CEPH_MDS_OP_GETFILELOCK)
 568                return parse_reply_info_filelock(p, end, info, features);
 569        else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
 570                return parse_reply_info_readdir(p, end, info, features);
 571        else if (op == CEPH_MDS_OP_CREATE)
 572                return parse_reply_info_create(p, end, info, features, s);
 573        else
 574                return -EIO;
 575}
 576
 577/*
 578 * parse entire mds reply
 579 */
 580static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
 581                            struct ceph_mds_reply_info_parsed *info,
 582                            u64 features)
 583{
 584        void *p, *end;
 585        u32 len;
 586        int err;
 587
 588        info->head = msg->front.iov_base;
 589        p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
 590        end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
 591
 592        /* trace */
 593        ceph_decode_32_safe(&p, end, len, bad);
 594        if (len > 0) {
 595                ceph_decode_need(&p, end, len, bad);
 596                err = parse_reply_info_trace(&p, p+len, info, features);
 597                if (err < 0)
 598                        goto out_bad;
 599        }
 600
 601        /* extra */
 602        ceph_decode_32_safe(&p, end, len, bad);
 603        if (len > 0) {
 604                ceph_decode_need(&p, end, len, bad);
 605                err = parse_reply_info_extra(&p, p+len, info, features, s);
 606                if (err < 0)
 607                        goto out_bad;
 608        }
 609
 610        /* snap blob */
 611        ceph_decode_32_safe(&p, end, len, bad);
 612        info->snapblob_len = len;
 613        info->snapblob = p;
 614        p += len;
 615
 616        if (p != end)
 617                goto bad;
 618        return 0;
 619
 620bad:
 621        err = -EIO;
 622out_bad:
 623        pr_err("mds parse_reply err %d\n", err);
 624        return err;
 625}
 626
 627static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
 628{
 629        if (!info->dir_entries)
 630                return;
 631        free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
 632}
 633
 634
 635/*
 636 * sessions
 637 */
 638const char *ceph_session_state_name(int s)
 639{
 640        switch (s) {
 641        case CEPH_MDS_SESSION_NEW: return "new";
 642        case CEPH_MDS_SESSION_OPENING: return "opening";
 643        case CEPH_MDS_SESSION_OPEN: return "open";
 644        case CEPH_MDS_SESSION_HUNG: return "hung";
 645        case CEPH_MDS_SESSION_CLOSING: return "closing";
 646        case CEPH_MDS_SESSION_CLOSED: return "closed";
 647        case CEPH_MDS_SESSION_RESTARTING: return "restarting";
 648        case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
 649        case CEPH_MDS_SESSION_REJECTED: return "rejected";
 650        default: return "???";
 651        }
 652}
 653
 654struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
 655{
 656        if (refcount_inc_not_zero(&s->s_ref))
 657                return s;
 658        return NULL;
 659}
 660
 661void ceph_put_mds_session(struct ceph_mds_session *s)
 662{
 663        if (IS_ERR_OR_NULL(s))
 664                return;
 665
 666        if (refcount_dec_and_test(&s->s_ref)) {
 667                if (s->s_auth.authorizer)
 668                        ceph_auth_destroy_authorizer(s->s_auth.authorizer);
 669                WARN_ON(mutex_is_locked(&s->s_mutex));
 670                xa_destroy(&s->s_delegated_inos);
 671                kfree(s);
 672        }
 673}
 674
 675/*
 676 * called under mdsc->mutex
 677 */
 678struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
 679                                                   int mds)
 680{
 681        if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
 682                return NULL;
 683        return ceph_get_mds_session(mdsc->sessions[mds]);
 684}
 685
 686static bool __have_session(struct ceph_mds_client *mdsc, int mds)
 687{
 688        if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
 689                return false;
 690        else
 691                return true;
 692}
 693
 694static int __verify_registered_session(struct ceph_mds_client *mdsc,
 695                                       struct ceph_mds_session *s)
 696{
 697        if (s->s_mds >= mdsc->max_sessions ||
 698            mdsc->sessions[s->s_mds] != s)
 699                return -ENOENT;
 700        return 0;
 701}
 702
 703/*
 704 * create+register a new session for given mds.
 705 * called under mdsc->mutex.
 706 */
 707static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
 708                                                 int mds)
 709{
 710        struct ceph_mds_session *s;
 711
 712        if (mds >= mdsc->mdsmap->possible_max_rank)
 713                return ERR_PTR(-EINVAL);
 714
 715        s = kzalloc(sizeof(*s), GFP_NOFS);
 716        if (!s)
 717                return ERR_PTR(-ENOMEM);
 718
 719        if (mds >= mdsc->max_sessions) {
 720                int newmax = 1 << get_count_order(mds + 1);
 721                struct ceph_mds_session **sa;
 722
 723                dout("%s: realloc to %d\n", __func__, newmax);
 724                sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
 725                if (!sa)
 726                        goto fail_realloc;
 727                if (mdsc->sessions) {
 728                        memcpy(sa, mdsc->sessions,
 729                               mdsc->max_sessions * sizeof(void *));
 730                        kfree(mdsc->sessions);
 731                }
 732                mdsc->sessions = sa;
 733                mdsc->max_sessions = newmax;
 734        }
 735
 736        dout("%s: mds%d\n", __func__, mds);
 737        s->s_mdsc = mdsc;
 738        s->s_mds = mds;
 739        s->s_state = CEPH_MDS_SESSION_NEW;
 740        mutex_init(&s->s_mutex);
 741
 742        ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
 743
 744        atomic_set(&s->s_cap_gen, 1);
 745        s->s_cap_ttl = jiffies - 1;
 746
 747        spin_lock_init(&s->s_cap_lock);
 748        INIT_LIST_HEAD(&s->s_caps);
 749        refcount_set(&s->s_ref, 1);
 750        INIT_LIST_HEAD(&s->s_waiting);
 751        INIT_LIST_HEAD(&s->s_unsafe);
 752        xa_init(&s->s_delegated_inos);
 753        INIT_LIST_HEAD(&s->s_cap_releases);
 754        INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
 755
 756        INIT_LIST_HEAD(&s->s_cap_dirty);
 757        INIT_LIST_HEAD(&s->s_cap_flushing);
 758
 759        mdsc->sessions[mds] = s;
 760        atomic_inc(&mdsc->num_sessions);
 761        refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
 762
 763        ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
 764                      ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
 765
 766        return s;
 767
 768fail_realloc:
 769        kfree(s);
 770        return ERR_PTR(-ENOMEM);
 771}
 772
 773/*
 774 * called under mdsc->mutex
 775 */
 776static void __unregister_session(struct ceph_mds_client *mdsc,
 777                               struct ceph_mds_session *s)
 778{
 779        dout("__unregister_session mds%d %p\n", s->s_mds, s);
 780        BUG_ON(mdsc->sessions[s->s_mds] != s);
 781        mdsc->sessions[s->s_mds] = NULL;
 782        ceph_con_close(&s->s_con);
 783        ceph_put_mds_session(s);
 784        atomic_dec(&mdsc->num_sessions);
 785}
 786
 787/*
 788 * drop session refs in request.
 789 *
 790 * should be last request ref, or hold mdsc->mutex
 791 */
 792static void put_request_session(struct ceph_mds_request *req)
 793{
 794        if (req->r_session) {
 795                ceph_put_mds_session(req->r_session);
 796                req->r_session = NULL;
 797        }
 798}
 799
 800void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
 801                                void (*cb)(struct ceph_mds_session *),
 802                                bool check_state)
 803{
 804        int mds;
 805
 806        mutex_lock(&mdsc->mutex);
 807        for (mds = 0; mds < mdsc->max_sessions; ++mds) {
 808                struct ceph_mds_session *s;
 809
 810                s = __ceph_lookup_mds_session(mdsc, mds);
 811                if (!s)
 812                        continue;
 813
 814                if (check_state && !check_session_state(s)) {
 815                        ceph_put_mds_session(s);
 816                        continue;
 817                }
 818
 819                mutex_unlock(&mdsc->mutex);
 820                cb(s);
 821                ceph_put_mds_session(s);
 822                mutex_lock(&mdsc->mutex);
 823        }
 824        mutex_unlock(&mdsc->mutex);
 825}
 826
 827void ceph_mdsc_release_request(struct kref *kref)
 828{
 829        struct ceph_mds_request *req = container_of(kref,
 830                                                    struct ceph_mds_request,
 831                                                    r_kref);
 832        ceph_mdsc_release_dir_caps_no_check(req);
 833        destroy_reply_info(&req->r_reply_info);
 834        if (req->r_request)
 835                ceph_msg_put(req->r_request);
 836        if (req->r_reply)
 837                ceph_msg_put(req->r_reply);
 838        if (req->r_inode) {
 839                ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
 840                iput(req->r_inode);
 841        }
 842        if (req->r_parent) {
 843                ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
 844                iput(req->r_parent);
 845        }
 846        iput(req->r_target_inode);
 847        if (req->r_dentry)
 848                dput(req->r_dentry);
 849        if (req->r_old_dentry)
 850                dput(req->r_old_dentry);
 851        if (req->r_old_dentry_dir) {
 852                /*
 853                 * track (and drop pins for) r_old_dentry_dir
 854                 * separately, since r_old_dentry's d_parent may have
 855                 * changed between the dir mutex being dropped and
 856                 * this request being freed.
 857                 */
 858                ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
 859                                  CEPH_CAP_PIN);
 860                iput(req->r_old_dentry_dir);
 861        }
 862        kfree(req->r_path1);
 863        kfree(req->r_path2);
 864        put_cred(req->r_cred);
 865        if (req->r_pagelist)
 866                ceph_pagelist_release(req->r_pagelist);
 867        put_request_session(req);
 868        ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
 869        WARN_ON_ONCE(!list_empty(&req->r_wait));
 870        kmem_cache_free(ceph_mds_request_cachep, req);
 871}
 872
 873DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
 874
 875/*
 876 * lookup session, bump ref if found.
 877 *
 878 * called under mdsc->mutex.
 879 */
 880static struct ceph_mds_request *
 881lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
 882{
 883        struct ceph_mds_request *req;
 884
 885        req = lookup_request(&mdsc->request_tree, tid);
 886        if (req)
 887                ceph_mdsc_get_request(req);
 888
 889        return req;
 890}
 891
 892/*
 893 * Register an in-flight request, and assign a tid.  Link to directory
 894 * are modifying (if any).
 895 *
 896 * Called under mdsc->mutex.
 897 */
 898static void __register_request(struct ceph_mds_client *mdsc,
 899                               struct ceph_mds_request *req,
 900                               struct inode *dir)
 901{
 902        int ret = 0;
 903
 904        req->r_tid = ++mdsc->last_tid;
 905        if (req->r_num_caps) {
 906                ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
 907                                        req->r_num_caps);
 908                if (ret < 0) {
 909                        pr_err("__register_request %p "
 910                               "failed to reserve caps: %d\n", req, ret);
 911                        /* set req->r_err to fail early from __do_request */
 912                        req->r_err = ret;
 913                        return;
 914                }
 915        }
 916        dout("__register_request %p tid %lld\n", req, req->r_tid);
 917        ceph_mdsc_get_request(req);
 918        insert_request(&mdsc->request_tree, req);
 919
 920        req->r_cred = get_current_cred();
 921
 922        if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
 923                mdsc->oldest_tid = req->r_tid;
 924
 925        if (dir) {
 926                struct ceph_inode_info *ci = ceph_inode(dir);
 927
 928                ihold(dir);
 929                req->r_unsafe_dir = dir;
 930                spin_lock(&ci->i_unsafe_lock);
 931                list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
 932                spin_unlock(&ci->i_unsafe_lock);
 933        }
 934}
 935
 936static void __unregister_request(struct ceph_mds_client *mdsc,
 937                                 struct ceph_mds_request *req)
 938{
 939        dout("__unregister_request %p tid %lld\n", req, req->r_tid);
 940
 941        /* Never leave an unregistered request on an unsafe list! */
 942        list_del_init(&req->r_unsafe_item);
 943
 944        if (req->r_tid == mdsc->oldest_tid) {
 945                struct rb_node *p = rb_next(&req->r_node);
 946                mdsc->oldest_tid = 0;
 947                while (p) {
 948                        struct ceph_mds_request *next_req =
 949                                rb_entry(p, struct ceph_mds_request, r_node);
 950                        if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
 951                                mdsc->oldest_tid = next_req->r_tid;
 952                                break;
 953                        }
 954                        p = rb_next(p);
 955                }
 956        }
 957
 958        erase_request(&mdsc->request_tree, req);
 959
 960        if (req->r_unsafe_dir) {
 961                struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
 962                spin_lock(&ci->i_unsafe_lock);
 963                list_del_init(&req->r_unsafe_dir_item);
 964                spin_unlock(&ci->i_unsafe_lock);
 965        }
 966        if (req->r_target_inode &&
 967            test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
 968                struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
 969                spin_lock(&ci->i_unsafe_lock);
 970                list_del_init(&req->r_unsafe_target_item);
 971                spin_unlock(&ci->i_unsafe_lock);
 972        }
 973
 974        if (req->r_unsafe_dir) {
 975                iput(req->r_unsafe_dir);
 976                req->r_unsafe_dir = NULL;
 977        }
 978
 979        complete_all(&req->r_safe_completion);
 980
 981        ceph_mdsc_put_request(req);
 982}
 983
 984/*
 985 * Walk back up the dentry tree until we hit a dentry representing a
 986 * non-snapshot inode. We do this using the rcu_read_lock (which must be held
 987 * when calling this) to ensure that the objects won't disappear while we're
 988 * working with them. Once we hit a candidate dentry, we attempt to take a
 989 * reference to it, and return that as the result.
 990 */
 991static struct inode *get_nonsnap_parent(struct dentry *dentry)
 992{
 993        struct inode *inode = NULL;
 994
 995        while (dentry && !IS_ROOT(dentry)) {
 996                inode = d_inode_rcu(dentry);
 997                if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
 998                        break;
 999                dentry = dentry->d_parent;
1000        }
1001        if (inode)
1002                inode = igrab(inode);
1003        return inode;
1004}
1005
1006/*
1007 * Choose mds to send request to next.  If there is a hint set in the
1008 * request (e.g., due to a prior forward hint from the mds), use that.
1009 * Otherwise, consult frag tree and/or caps to identify the
1010 * appropriate mds.  If all else fails, choose randomly.
1011 *
1012 * Called under mdsc->mutex.
1013 */
1014static int __choose_mds(struct ceph_mds_client *mdsc,
1015                        struct ceph_mds_request *req,
1016                        bool *random)
1017{
1018        struct inode *inode;
1019        struct ceph_inode_info *ci;
1020        struct ceph_cap *cap;
1021        int mode = req->r_direct_mode;
1022        int mds = -1;
1023        u32 hash = req->r_direct_hash;
1024        bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1025
1026        if (random)
1027                *random = false;
1028
1029        /*
1030         * is there a specific mds we should try?  ignore hint if we have
1031         * no session and the mds is not up (active or recovering).
1032         */
1033        if (req->r_resend_mds >= 0 &&
1034            (__have_session(mdsc, req->r_resend_mds) ||
1035             ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1036                dout("%s using resend_mds mds%d\n", __func__,
1037                     req->r_resend_mds);
1038                return req->r_resend_mds;
1039        }
1040
1041        if (mode == USE_RANDOM_MDS)
1042                goto random;
1043
1044        inode = NULL;
1045        if (req->r_inode) {
1046                if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1047                        inode = req->r_inode;
1048                        ihold(inode);
1049                } else {
1050                        /* req->r_dentry is non-null for LSSNAP request */
1051                        rcu_read_lock();
1052                        inode = get_nonsnap_parent(req->r_dentry);
1053                        rcu_read_unlock();
1054                        dout("%s using snapdir's parent %p\n", __func__, inode);
1055                }
1056        } else if (req->r_dentry) {
1057                /* ignore race with rename; old or new d_parent is okay */
1058                struct dentry *parent;
1059                struct inode *dir;
1060
1061                rcu_read_lock();
1062                parent = READ_ONCE(req->r_dentry->d_parent);
1063                dir = req->r_parent ? : d_inode_rcu(parent);
1064
1065                if (!dir || dir->i_sb != mdsc->fsc->sb) {
1066                        /*  not this fs or parent went negative */
1067                        inode = d_inode(req->r_dentry);
1068                        if (inode)
1069                                ihold(inode);
1070                } else if (ceph_snap(dir) != CEPH_NOSNAP) {
1071                        /* direct snapped/virtual snapdir requests
1072                         * based on parent dir inode */
1073                        inode = get_nonsnap_parent(parent);
1074                        dout("%s using nonsnap parent %p\n", __func__, inode);
1075                } else {
1076                        /* dentry target */
1077                        inode = d_inode(req->r_dentry);
1078                        if (!inode || mode == USE_AUTH_MDS) {
1079                                /* dir + name */
1080                                inode = igrab(dir);
1081                                hash = ceph_dentry_hash(dir, req->r_dentry);
1082                                is_hash = true;
1083                        } else {
1084                                ihold(inode);
1085                        }
1086                }
1087                rcu_read_unlock();
1088        }
1089
1090        dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
1091             hash, mode);
1092        if (!inode)
1093                goto random;
1094        ci = ceph_inode(inode);
1095
1096        if (is_hash && S_ISDIR(inode->i_mode)) {
1097                struct ceph_inode_frag frag;
1098                int found;
1099
1100                ceph_choose_frag(ci, hash, &frag, &found);
1101                if (found) {
1102                        if (mode == USE_ANY_MDS && frag.ndist > 0) {
1103                                u8 r;
1104
1105                                /* choose a random replica */
1106                                get_random_bytes(&r, 1);
1107                                r %= frag.ndist;
1108                                mds = frag.dist[r];
1109                                dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
1110                                     __func__, inode, ceph_vinop(inode),
1111                                     frag.frag, mds, (int)r, frag.ndist);
1112                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1113                                    CEPH_MDS_STATE_ACTIVE &&
1114                                    !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1115                                        goto out;
1116                        }
1117
1118                        /* since this file/dir wasn't known to be
1119                         * replicated, then we want to look for the
1120                         * authoritative mds. */
1121                        if (frag.mds >= 0) {
1122                                /* choose auth mds */
1123                                mds = frag.mds;
1124                                dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
1125                                     __func__, inode, ceph_vinop(inode),
1126                                     frag.frag, mds);
1127                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1128                                    CEPH_MDS_STATE_ACTIVE) {
1129                                        if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1130                                                                  mds))
1131                                                goto out;
1132                                }
1133                        }
1134                        mode = USE_AUTH_MDS;
1135                }
1136        }
1137
1138        spin_lock(&ci->i_ceph_lock);
1139        cap = NULL;
1140        if (mode == USE_AUTH_MDS)
1141                cap = ci->i_auth_cap;
1142        if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1143                cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1144        if (!cap) {
1145                spin_unlock(&ci->i_ceph_lock);
1146                iput(inode);
1147                goto random;
1148        }
1149        mds = cap->session->s_mds;
1150        dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1151             inode, ceph_vinop(inode), mds,
1152             cap == ci->i_auth_cap ? "auth " : "", cap);
1153        spin_unlock(&ci->i_ceph_lock);
1154out:
1155        iput(inode);
1156        return mds;
1157
1158random:
1159        if (random)
1160                *random = true;
1161
1162        mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1163        dout("%s chose random mds%d\n", __func__, mds);
1164        return mds;
1165}
1166
1167
1168/*
1169 * session messages
1170 */
1171struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
1172{
1173        struct ceph_msg *msg;
1174        struct ceph_mds_session_head *h;
1175
1176        msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1177                           false);
1178        if (!msg) {
1179                pr_err("ENOMEM creating session %s msg\n",
1180                       ceph_session_op_name(op));
1181                return NULL;
1182        }
1183        h = msg->front.iov_base;
1184        h->op = cpu_to_le32(op);
1185        h->seq = cpu_to_le64(seq);
1186
1187        return msg;
1188}
1189
1190static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1191#define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1192static int encode_supported_features(void **p, void *end)
1193{
1194        static const size_t count = ARRAY_SIZE(feature_bits);
1195
1196        if (count > 0) {
1197                size_t i;
1198                size_t size = FEATURE_BYTES(count);
1199
1200                if (WARN_ON_ONCE(*p + 4 + size > end))
1201                        return -ERANGE;
1202
1203                ceph_encode_32(p, size);
1204                memset(*p, 0, size);
1205                for (i = 0; i < count; i++)
1206                        ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
1207                *p += size;
1208        } else {
1209                if (WARN_ON_ONCE(*p + 4 > end))
1210                        return -ERANGE;
1211
1212                ceph_encode_32(p, 0);
1213        }
1214
1215        return 0;
1216}
1217
1218static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
1219#define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
1220static int encode_metric_spec(void **p, void *end)
1221{
1222        static const size_t count = ARRAY_SIZE(metric_bits);
1223
1224        /* header */
1225        if (WARN_ON_ONCE(*p + 2 > end))
1226                return -ERANGE;
1227
1228        ceph_encode_8(p, 1); /* version */
1229        ceph_encode_8(p, 1); /* compat */
1230
1231        if (count > 0) {
1232                size_t i;
1233                size_t size = METRIC_BYTES(count);
1234
1235                if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
1236                        return -ERANGE;
1237
1238                /* metric spec info length */
1239                ceph_encode_32(p, 4 + size);
1240
1241                /* metric spec */
1242                ceph_encode_32(p, size);
1243                memset(*p, 0, size);
1244                for (i = 0; i < count; i++)
1245                        ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
1246                *p += size;
1247        } else {
1248                if (WARN_ON_ONCE(*p + 4 + 4 > end))
1249                        return -ERANGE;
1250
1251                /* metric spec info length */
1252                ceph_encode_32(p, 4);
1253                /* metric spec */
1254                ceph_encode_32(p, 0);
1255        }
1256
1257        return 0;
1258}
1259
1260/*
1261 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1262 * to include additional client metadata fields.
1263 */
1264static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1265{
1266        struct ceph_msg *msg;
1267        struct ceph_mds_session_head *h;
1268        int i;
1269        int extra_bytes = 0;
1270        int metadata_key_count = 0;
1271        struct ceph_options *opt = mdsc->fsc->client->options;
1272        struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1273        size_t size, count;
1274        void *p, *end;
1275        int ret;
1276
1277        const char* metadata[][2] = {
1278                {"hostname", mdsc->nodename},
1279                {"kernel_version", init_utsname()->release},
1280                {"entity_id", opt->name ? : ""},
1281                {"root", fsopt->server_path ? : "/"},
1282                {NULL, NULL}
1283        };
1284
1285        /* Calculate serialized length of metadata */
1286        extra_bytes = 4;  /* map length */
1287        for (i = 0; metadata[i][0]; ++i) {
1288                extra_bytes += 8 + strlen(metadata[i][0]) +
1289                        strlen(metadata[i][1]);
1290                metadata_key_count++;
1291        }
1292
1293        /* supported feature */
1294        size = 0;
1295        count = ARRAY_SIZE(feature_bits);
1296        if (count > 0)
1297                size = FEATURE_BYTES(count);
1298        extra_bytes += 4 + size;
1299
1300        /* metric spec */
1301        size = 0;
1302        count = ARRAY_SIZE(metric_bits);
1303        if (count > 0)
1304                size = METRIC_BYTES(count);
1305        extra_bytes += 2 + 4 + 4 + size;
1306
1307        /* Allocate the message */
1308        msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1309                           GFP_NOFS, false);
1310        if (!msg) {
1311                pr_err("ENOMEM creating session open msg\n");
1312                return ERR_PTR(-ENOMEM);
1313        }
1314        p = msg->front.iov_base;
1315        end = p + msg->front.iov_len;
1316
1317        h = p;
1318        h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1319        h->seq = cpu_to_le64(seq);
1320
1321        /*
1322         * Serialize client metadata into waiting buffer space, using
1323         * the format that userspace expects for map<string, string>
1324         *
1325         * ClientSession messages with metadata are v4
1326         */
1327        msg->hdr.version = cpu_to_le16(4);
1328        msg->hdr.compat_version = cpu_to_le16(1);
1329
1330        /* The write pointer, following the session_head structure */
1331        p += sizeof(*h);
1332
1333        /* Number of entries in the map */
1334        ceph_encode_32(&p, metadata_key_count);
1335
1336        /* Two length-prefixed strings for each entry in the map */
1337        for (i = 0; metadata[i][0]; ++i) {
1338                size_t const key_len = strlen(metadata[i][0]);
1339                size_t const val_len = strlen(metadata[i][1]);
1340
1341                ceph_encode_32(&p, key_len);
1342                memcpy(p, metadata[i][0], key_len);
1343                p += key_len;
1344                ceph_encode_32(&p, val_len);
1345                memcpy(p, metadata[i][1], val_len);
1346                p += val_len;
1347        }
1348
1349        ret = encode_supported_features(&p, end);
1350        if (ret) {
1351                pr_err("encode_supported_features failed!\n");
1352                ceph_msg_put(msg);
1353                return ERR_PTR(ret);
1354        }
1355
1356        ret = encode_metric_spec(&p, end);
1357        if (ret) {
1358                pr_err("encode_metric_spec failed!\n");
1359                ceph_msg_put(msg);
1360                return ERR_PTR(ret);
1361        }
1362
1363        msg->front.iov_len = p - msg->front.iov_base;
1364        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1365
1366        return msg;
1367}
1368
1369/*
1370 * send session open request.
1371 *
1372 * called under mdsc->mutex
1373 */
1374static int __open_session(struct ceph_mds_client *mdsc,
1375                          struct ceph_mds_session *session)
1376{
1377        struct ceph_msg *msg;
1378        int mstate;
1379        int mds = session->s_mds;
1380
1381        /* wait for mds to go active? */
1382        mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1383        dout("open_session to mds%d (%s)\n", mds,
1384             ceph_mds_state_name(mstate));
1385        session->s_state = CEPH_MDS_SESSION_OPENING;
1386        session->s_renew_requested = jiffies;
1387
1388        /* send connect message */
1389        msg = create_session_open_msg(mdsc, session->s_seq);
1390        if (IS_ERR(msg))
1391                return PTR_ERR(msg);
1392        ceph_con_send(&session->s_con, msg);
1393        return 0;
1394}
1395
1396/*
1397 * open sessions for any export targets for the given mds
1398 *
1399 * called under mdsc->mutex
1400 */
1401static struct ceph_mds_session *
1402__open_export_target_session(struct ceph_mds_client *mdsc, int target)
1403{
1404        struct ceph_mds_session *session;
1405        int ret;
1406
1407        session = __ceph_lookup_mds_session(mdsc, target);
1408        if (!session) {
1409                session = register_session(mdsc, target);
1410                if (IS_ERR(session))
1411                        return session;
1412        }
1413        if (session->s_state == CEPH_MDS_SESSION_NEW ||
1414            session->s_state == CEPH_MDS_SESSION_CLOSING) {
1415                ret = __open_session(mdsc, session);
1416                if (ret)
1417                        return ERR_PTR(ret);
1418        }
1419
1420        return session;
1421}
1422
1423struct ceph_mds_session *
1424ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1425{
1426        struct ceph_mds_session *session;
1427
1428        dout("open_export_target_session to mds%d\n", target);
1429
1430        mutex_lock(&mdsc->mutex);
1431        session = __open_export_target_session(mdsc, target);
1432        mutex_unlock(&mdsc->mutex);
1433
1434        return session;
1435}
1436
1437static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1438                                          struct ceph_mds_session *session)
1439{
1440        struct ceph_mds_info *mi;
1441        struct ceph_mds_session *ts;
1442        int i, mds = session->s_mds;
1443
1444        if (mds >= mdsc->mdsmap->possible_max_rank)
1445                return;
1446
1447        mi = &mdsc->mdsmap->m_info[mds];
1448        dout("open_export_target_sessions for mds%d (%d targets)\n",
1449             session->s_mds, mi->num_export_targets);
1450
1451        for (i = 0; i < mi->num_export_targets; i++) {
1452                ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1453                ceph_put_mds_session(ts);
1454        }
1455}
1456
1457void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1458                                           struct ceph_mds_session *session)
1459{
1460        mutex_lock(&mdsc->mutex);
1461        __open_export_target_sessions(mdsc, session);
1462        mutex_unlock(&mdsc->mutex);
1463}
1464
1465/*
1466 * session caps
1467 */
1468
1469static void detach_cap_releases(struct ceph_mds_session *session,
1470                                struct list_head *target)
1471{
1472        lockdep_assert_held(&session->s_cap_lock);
1473
1474        list_splice_init(&session->s_cap_releases, target);
1475        session->s_num_cap_releases = 0;
1476        dout("dispose_cap_releases mds%d\n", session->s_mds);
1477}
1478
1479static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1480                                 struct list_head *dispose)
1481{
1482        while (!list_empty(dispose)) {
1483                struct ceph_cap *cap;
1484                /* zero out the in-progress message */
1485                cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1486                list_del(&cap->session_caps);
1487                ceph_put_cap(mdsc, cap);
1488        }
1489}
1490
1491static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1492                                     struct ceph_mds_session *session)
1493{
1494        struct ceph_mds_request *req;
1495        struct rb_node *p;
1496
1497        dout("cleanup_session_requests mds%d\n", session->s_mds);
1498        mutex_lock(&mdsc->mutex);
1499        while (!list_empty(&session->s_unsafe)) {
1500                req = list_first_entry(&session->s_unsafe,
1501                                       struct ceph_mds_request, r_unsafe_item);
1502                pr_warn_ratelimited(" dropping unsafe request %llu\n",
1503                                    req->r_tid);
1504                if (req->r_target_inode)
1505                        mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1506                if (req->r_unsafe_dir)
1507                        mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1508                __unregister_request(mdsc, req);
1509        }
1510        /* zero r_attempts, so kick_requests() will re-send requests */
1511        p = rb_first(&mdsc->request_tree);
1512        while (p) {
1513                req = rb_entry(p, struct ceph_mds_request, r_node);
1514                p = rb_next(p);
1515                if (req->r_session &&
1516                    req->r_session->s_mds == session->s_mds)
1517                        req->r_attempts = 0;
1518        }
1519        mutex_unlock(&mdsc->mutex);
1520}
1521
1522/*
1523 * Helper to safely iterate over all caps associated with a session, with
1524 * special care taken to handle a racing __ceph_remove_cap().
1525 *
1526 * Caller must hold session s_mutex.
1527 */
1528int ceph_iterate_session_caps(struct ceph_mds_session *session,
1529                              int (*cb)(struct inode *, struct ceph_cap *,
1530                                        void *), void *arg)
1531{
1532        struct list_head *p;
1533        struct ceph_cap *cap;
1534        struct inode *inode, *last_inode = NULL;
1535        struct ceph_cap *old_cap = NULL;
1536        int ret;
1537
1538        dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1539        spin_lock(&session->s_cap_lock);
1540        p = session->s_caps.next;
1541        while (p != &session->s_caps) {
1542                cap = list_entry(p, struct ceph_cap, session_caps);
1543                inode = igrab(&cap->ci->vfs_inode);
1544                if (!inode) {
1545                        p = p->next;
1546                        continue;
1547                }
1548                session->s_cap_iterator = cap;
1549                spin_unlock(&session->s_cap_lock);
1550
1551                if (last_inode) {
1552                        iput(last_inode);
1553                        last_inode = NULL;
1554                }
1555                if (old_cap) {
1556                        ceph_put_cap(session->s_mdsc, old_cap);
1557                        old_cap = NULL;
1558                }
1559
1560                ret = cb(inode, cap, arg);
1561                last_inode = inode;
1562
1563                spin_lock(&session->s_cap_lock);
1564                p = p->next;
1565                if (!cap->ci) {
1566                        dout("iterate_session_caps  finishing cap %p removal\n",
1567                             cap);
1568                        BUG_ON(cap->session != session);
1569                        cap->session = NULL;
1570                        list_del_init(&cap->session_caps);
1571                        session->s_nr_caps--;
1572                        atomic64_dec(&session->s_mdsc->metric.total_caps);
1573                        if (cap->queue_release)
1574                                __ceph_queue_cap_release(session, cap);
1575                        else
1576                                old_cap = cap;  /* put_cap it w/o locks held */
1577                }
1578                if (ret < 0)
1579                        goto out;
1580        }
1581        ret = 0;
1582out:
1583        session->s_cap_iterator = NULL;
1584        spin_unlock(&session->s_cap_lock);
1585
1586        iput(last_inode);
1587        if (old_cap)
1588                ceph_put_cap(session->s_mdsc, old_cap);
1589
1590        return ret;
1591}
1592
1593static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
1594{
1595        struct ceph_inode_info *ci = ceph_inode(inode);
1596        struct ceph_cap_snap *capsnap;
1597        int capsnap_release = 0;
1598
1599        lockdep_assert_held(&ci->i_ceph_lock);
1600
1601        dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
1602
1603        while (!list_empty(&ci->i_cap_snaps)) {
1604                capsnap = list_first_entry(&ci->i_cap_snaps,
1605                                           struct ceph_cap_snap, ci_item);
1606                __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
1607                ceph_put_snap_context(capsnap->context);
1608                ceph_put_cap_snap(capsnap);
1609                capsnap_release++;
1610        }
1611        wake_up_all(&ci->i_cap_wq);
1612        wake_up_all(&mdsc->cap_flushing_wq);
1613        return capsnap_release;
1614}
1615
1616static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1617                                  void *arg)
1618{
1619        struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1620        struct ceph_mds_client *mdsc = fsc->mdsc;
1621        struct ceph_inode_info *ci = ceph_inode(inode);
1622        LIST_HEAD(to_remove);
1623        bool dirty_dropped = false;
1624        bool invalidate = false;
1625        int capsnap_release = 0;
1626
1627        dout("removing cap %p, ci is %p, inode is %p\n",
1628             cap, ci, &ci->vfs_inode);
1629        spin_lock(&ci->i_ceph_lock);
1630        __ceph_remove_cap(cap, false);
1631        if (!ci->i_auth_cap) {
1632                struct ceph_cap_flush *cf;
1633
1634                if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
1635                        if (inode->i_data.nrpages > 0)
1636                                invalidate = true;
1637                        if (ci->i_wrbuffer_ref > 0)
1638                                mapping_set_error(&inode->i_data, -EIO);
1639                }
1640
1641                while (!list_empty(&ci->i_cap_flush_list)) {
1642                        cf = list_first_entry(&ci->i_cap_flush_list,
1643                                              struct ceph_cap_flush, i_list);
1644                        list_move(&cf->i_list, &to_remove);
1645                }
1646
1647                spin_lock(&mdsc->cap_dirty_lock);
1648
1649                list_for_each_entry(cf, &to_remove, i_list)
1650                        list_del_init(&cf->g_list);
1651
1652                if (!list_empty(&ci->i_dirty_item)) {
1653                        pr_warn_ratelimited(
1654                                " dropping dirty %s state for %p %lld\n",
1655                                ceph_cap_string(ci->i_dirty_caps),
1656                                inode, ceph_ino(inode));
1657                        ci->i_dirty_caps = 0;
1658                        list_del_init(&ci->i_dirty_item);
1659                        dirty_dropped = true;
1660                }
1661                if (!list_empty(&ci->i_flushing_item)) {
1662                        pr_warn_ratelimited(
1663                                " dropping dirty+flushing %s state for %p %lld\n",
1664                                ceph_cap_string(ci->i_flushing_caps),
1665                                inode, ceph_ino(inode));
1666                        ci->i_flushing_caps = 0;
1667                        list_del_init(&ci->i_flushing_item);
1668                        mdsc->num_cap_flushing--;
1669                        dirty_dropped = true;
1670                }
1671                spin_unlock(&mdsc->cap_dirty_lock);
1672
1673                if (dirty_dropped) {
1674                        mapping_set_error(inode->i_mapping, -EIO);
1675
1676                        if (ci->i_wrbuffer_ref_head == 0 &&
1677                            ci->i_wr_ref == 0 &&
1678                            ci->i_dirty_caps == 0 &&
1679                            ci->i_flushing_caps == 0) {
1680                                ceph_put_snap_context(ci->i_head_snapc);
1681                                ci->i_head_snapc = NULL;
1682                        }
1683                }
1684
1685                if (atomic_read(&ci->i_filelock_ref) > 0) {
1686                        /* make further file lock syscall return -EIO */
1687                        ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
1688                        pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1689                                            inode, ceph_ino(inode));
1690                }
1691
1692                if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1693                        list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1694                        ci->i_prealloc_cap_flush = NULL;
1695                }
1696
1697                if (!list_empty(&ci->i_cap_snaps))
1698                        capsnap_release = remove_capsnaps(mdsc, inode);
1699        }
1700        spin_unlock(&ci->i_ceph_lock);
1701        while (!list_empty(&to_remove)) {
1702                struct ceph_cap_flush *cf;
1703                cf = list_first_entry(&to_remove,
1704                                      struct ceph_cap_flush, i_list);
1705                list_del_init(&cf->i_list);
1706                if (!cf->is_capsnap)
1707                        ceph_free_cap_flush(cf);
1708        }
1709
1710        wake_up_all(&ci->i_cap_wq);
1711        if (invalidate)
1712                ceph_queue_invalidate(inode);
1713        if (dirty_dropped)
1714                iput(inode);
1715        while (capsnap_release--)
1716                iput(inode);
1717        return 0;
1718}
1719
1720/*
1721 * caller must hold session s_mutex
1722 */
1723static void remove_session_caps(struct ceph_mds_session *session)
1724{
1725        struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1726        struct super_block *sb = fsc->sb;
1727        LIST_HEAD(dispose);
1728
1729        dout("remove_session_caps on %p\n", session);
1730        ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1731
1732        wake_up_all(&fsc->mdsc->cap_flushing_wq);
1733
1734        spin_lock(&session->s_cap_lock);
1735        if (session->s_nr_caps > 0) {
1736                struct inode *inode;
1737                struct ceph_cap *cap, *prev = NULL;
1738                struct ceph_vino vino;
1739                /*
1740                 * iterate_session_caps() skips inodes that are being
1741                 * deleted, we need to wait until deletions are complete.
1742                 * __wait_on_freeing_inode() is designed for the job,
1743                 * but it is not exported, so use lookup inode function
1744                 * to access it.
1745                 */
1746                while (!list_empty(&session->s_caps)) {
1747                        cap = list_entry(session->s_caps.next,
1748                                         struct ceph_cap, session_caps);
1749                        if (cap == prev)
1750                                break;
1751                        prev = cap;
1752                        vino = cap->ci->i_vino;
1753                        spin_unlock(&session->s_cap_lock);
1754
1755                        inode = ceph_find_inode(sb, vino);
1756                        iput(inode);
1757
1758                        spin_lock(&session->s_cap_lock);
1759                }
1760        }
1761
1762        // drop cap expires and unlock s_cap_lock
1763        detach_cap_releases(session, &dispose);
1764
1765        BUG_ON(session->s_nr_caps > 0);
1766        BUG_ON(!list_empty(&session->s_cap_flushing));
1767        spin_unlock(&session->s_cap_lock);
1768        dispose_cap_releases(session->s_mdsc, &dispose);
1769}
1770
1771enum {
1772        RECONNECT,
1773        RENEWCAPS,
1774        FORCE_RO,
1775};
1776
1777/*
1778 * wake up any threads waiting on this session's caps.  if the cap is
1779 * old (didn't get renewed on the client reconnect), remove it now.
1780 *
1781 * caller must hold s_mutex.
1782 */
1783static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1784                              void *arg)
1785{
1786        struct ceph_inode_info *ci = ceph_inode(inode);
1787        unsigned long ev = (unsigned long)arg;
1788
1789        if (ev == RECONNECT) {
1790                spin_lock(&ci->i_ceph_lock);
1791                ci->i_wanted_max_size = 0;
1792                ci->i_requested_max_size = 0;
1793                spin_unlock(&ci->i_ceph_lock);
1794        } else if (ev == RENEWCAPS) {
1795                if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
1796                        /* mds did not re-issue stale cap */
1797                        spin_lock(&ci->i_ceph_lock);
1798                        cap->issued = cap->implemented = CEPH_CAP_PIN;
1799                        spin_unlock(&ci->i_ceph_lock);
1800                }
1801        } else if (ev == FORCE_RO) {
1802        }
1803        wake_up_all(&ci->i_cap_wq);
1804        return 0;
1805}
1806
1807static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1808{
1809        dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1810        ceph_iterate_session_caps(session, wake_up_session_cb,
1811                                  (void *)(unsigned long)ev);
1812}
1813
1814/*
1815 * Send periodic message to MDS renewing all currently held caps.  The
1816 * ack will reset the expiration for all caps from this session.
1817 *
1818 * caller holds s_mutex
1819 */
1820static int send_renew_caps(struct ceph_mds_client *mdsc,
1821                           struct ceph_mds_session *session)
1822{
1823        struct ceph_msg *msg;
1824        int state;
1825
1826        if (time_after_eq(jiffies, session->s_cap_ttl) &&
1827            time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1828                pr_info("mds%d caps stale\n", session->s_mds);
1829        session->s_renew_requested = jiffies;
1830
1831        /* do not try to renew caps until a recovering mds has reconnected
1832         * with its clients. */
1833        state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1834        if (state < CEPH_MDS_STATE_RECONNECT) {
1835                dout("send_renew_caps ignoring mds%d (%s)\n",
1836                     session->s_mds, ceph_mds_state_name(state));
1837                return 0;
1838        }
1839
1840        dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1841                ceph_mds_state_name(state));
1842        msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1843                                      ++session->s_renew_seq);
1844        if (!msg)
1845                return -ENOMEM;
1846        ceph_con_send(&session->s_con, msg);
1847        return 0;
1848}
1849
1850static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1851                             struct ceph_mds_session *session, u64 seq)
1852{
1853        struct ceph_msg *msg;
1854
1855        dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1856             session->s_mds, ceph_session_state_name(session->s_state), seq);
1857        msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1858        if (!msg)
1859                return -ENOMEM;
1860        ceph_con_send(&session->s_con, msg);
1861        return 0;
1862}
1863
1864
1865/*
1866 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1867 *
1868 * Called under session->s_mutex
1869 */
1870static void renewed_caps(struct ceph_mds_client *mdsc,
1871                         struct ceph_mds_session *session, int is_renew)
1872{
1873        int was_stale;
1874        int wake = 0;
1875
1876        spin_lock(&session->s_cap_lock);
1877        was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1878
1879        session->s_cap_ttl = session->s_renew_requested +
1880                mdsc->mdsmap->m_session_timeout*HZ;
1881
1882        if (was_stale) {
1883                if (time_before(jiffies, session->s_cap_ttl)) {
1884                        pr_info("mds%d caps renewed\n", session->s_mds);
1885                        wake = 1;
1886                } else {
1887                        pr_info("mds%d caps still stale\n", session->s_mds);
1888                }
1889        }
1890        dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1891             session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1892             time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1893        spin_unlock(&session->s_cap_lock);
1894
1895        if (wake)
1896                wake_up_session_caps(session, RENEWCAPS);
1897}
1898
1899/*
1900 * send a session close request
1901 */
1902static int request_close_session(struct ceph_mds_session *session)
1903{
1904        struct ceph_msg *msg;
1905
1906        dout("request_close_session mds%d state %s seq %lld\n",
1907             session->s_mds, ceph_session_state_name(session->s_state),
1908             session->s_seq);
1909        msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
1910                                      session->s_seq);
1911        if (!msg)
1912                return -ENOMEM;
1913        ceph_con_send(&session->s_con, msg);
1914        return 1;
1915}
1916
1917/*
1918 * Called with s_mutex held.
1919 */
1920static int __close_session(struct ceph_mds_client *mdsc,
1921                         struct ceph_mds_session *session)
1922{
1923        if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1924                return 0;
1925        session->s_state = CEPH_MDS_SESSION_CLOSING;
1926        return request_close_session(session);
1927}
1928
1929static bool drop_negative_children(struct dentry *dentry)
1930{
1931        struct dentry *child;
1932        bool all_negative = true;
1933
1934        if (!d_is_dir(dentry))
1935                goto out;
1936
1937        spin_lock(&dentry->d_lock);
1938        list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1939                if (d_really_is_positive(child)) {
1940                        all_negative = false;
1941                        break;
1942                }
1943        }
1944        spin_unlock(&dentry->d_lock);
1945
1946        if (all_negative)
1947                shrink_dcache_parent(dentry);
1948out:
1949        return all_negative;
1950}
1951
1952/*
1953 * Trim old(er) caps.
1954 *
1955 * Because we can't cache an inode without one or more caps, we do
1956 * this indirectly: if a cap is unused, we prune its aliases, at which
1957 * point the inode will hopefully get dropped to.
1958 *
1959 * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1960 * memory pressure from the MDS, though, so it needn't be perfect.
1961 */
1962static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1963{
1964        int *remaining = arg;
1965        struct ceph_inode_info *ci = ceph_inode(inode);
1966        int used, wanted, oissued, mine;
1967
1968        if (*remaining <= 0)
1969                return -1;
1970
1971        spin_lock(&ci->i_ceph_lock);
1972        mine = cap->issued | cap->implemented;
1973        used = __ceph_caps_used(ci);
1974        wanted = __ceph_caps_file_wanted(ci);
1975        oissued = __ceph_caps_issued_other(ci, cap);
1976
1977        dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1978             inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1979             ceph_cap_string(used), ceph_cap_string(wanted));
1980        if (cap == ci->i_auth_cap) {
1981                if (ci->i_dirty_caps || ci->i_flushing_caps ||
1982                    !list_empty(&ci->i_cap_snaps))
1983                        goto out;
1984                if ((used | wanted) & CEPH_CAP_ANY_WR)
1985                        goto out;
1986                /* Note: it's possible that i_filelock_ref becomes non-zero
1987                 * after dropping auth caps. It doesn't hurt because reply
1988                 * of lock mds request will re-add auth caps. */
1989                if (atomic_read(&ci->i_filelock_ref) > 0)
1990                        goto out;
1991        }
1992        /* The inode has cached pages, but it's no longer used.
1993         * we can safely drop it */
1994        if (S_ISREG(inode->i_mode) &&
1995            wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1996            !(oissued & CEPH_CAP_FILE_CACHE)) {
1997          used = 0;
1998          oissued = 0;
1999        }
2000        if ((used | wanted) & ~oissued & mine)
2001                goto out;   /* we need these caps */
2002
2003        if (oissued) {
2004                /* we aren't the only cap.. just remove us */
2005                ceph_remove_cap(cap, true);
2006                (*remaining)--;
2007        } else {
2008                struct dentry *dentry;
2009                /* try dropping referring dentries */
2010                spin_unlock(&ci->i_ceph_lock);
2011                dentry = d_find_any_alias(inode);
2012                if (dentry && drop_negative_children(dentry)) {
2013                        int count;
2014                        dput(dentry);
2015                        d_prune_aliases(inode);
2016                        count = atomic_read(&inode->i_count);
2017                        if (count == 1)
2018                                (*remaining)--;
2019                        dout("trim_caps_cb %p cap %p pruned, count now %d\n",
2020                             inode, cap, count);
2021                } else {
2022                        dput(dentry);
2023                }
2024                return 0;
2025        }
2026
2027out:
2028        spin_unlock(&ci->i_ceph_lock);
2029        return 0;
2030}
2031
2032/*
2033 * Trim session cap count down to some max number.
2034 */
2035int ceph_trim_caps(struct ceph_mds_client *mdsc,
2036                   struct ceph_mds_session *session,
2037                   int max_caps)
2038{
2039        int trim_caps = session->s_nr_caps - max_caps;
2040
2041        dout("trim_caps mds%d start: %d / %d, trim %d\n",
2042             session->s_mds, session->s_nr_caps, max_caps, trim_caps);
2043        if (trim_caps > 0) {
2044                int remaining = trim_caps;
2045
2046                ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
2047                dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
2048                     session->s_mds, session->s_nr_caps, max_caps,
2049                        trim_caps - remaining);
2050        }
2051
2052        ceph_flush_cap_releases(mdsc, session);
2053        return 0;
2054}
2055
2056static int check_caps_flush(struct ceph_mds_client *mdsc,
2057                            u64 want_flush_tid)
2058{
2059        int ret = 1;
2060
2061        spin_lock(&mdsc->cap_dirty_lock);
2062        if (!list_empty(&mdsc->cap_flush_list)) {
2063                struct ceph_cap_flush *cf =
2064                        list_first_entry(&mdsc->cap_flush_list,
2065                                         struct ceph_cap_flush, g_list);
2066                if (cf->tid <= want_flush_tid) {
2067                        dout("check_caps_flush still flushing tid "
2068                             "%llu <= %llu\n", cf->tid, want_flush_tid);
2069                        ret = 0;
2070                }
2071        }
2072        spin_unlock(&mdsc->cap_dirty_lock);
2073        return ret;
2074}
2075
2076/*
2077 * flush all dirty inode data to disk.
2078 *
2079 * returns true if we've flushed through want_flush_tid
2080 */
2081static void wait_caps_flush(struct ceph_mds_client *mdsc,
2082                            u64 want_flush_tid)
2083{
2084        dout("check_caps_flush want %llu\n", want_flush_tid);
2085
2086        wait_event(mdsc->cap_flushing_wq,
2087                   check_caps_flush(mdsc, want_flush_tid));
2088
2089        dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
2090}
2091
2092/*
2093 * called under s_mutex
2094 */
2095static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
2096                                   struct ceph_mds_session *session)
2097{
2098        struct ceph_msg *msg = NULL;
2099        struct ceph_mds_cap_release *head;
2100        struct ceph_mds_cap_item *item;
2101        struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
2102        struct ceph_cap *cap;
2103        LIST_HEAD(tmp_list);
2104        int num_cap_releases;
2105        __le32  barrier, *cap_barrier;
2106
2107        down_read(&osdc->lock);
2108        barrier = cpu_to_le32(osdc->epoch_barrier);
2109        up_read(&osdc->lock);
2110
2111        spin_lock(&session->s_cap_lock);
2112again:
2113        list_splice_init(&session->s_cap_releases, &tmp_list);
2114        num_cap_releases = session->s_num_cap_releases;
2115        session->s_num_cap_releases = 0;
2116        spin_unlock(&session->s_cap_lock);
2117
2118        while (!list_empty(&tmp_list)) {
2119                if (!msg) {
2120                        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2121                                        PAGE_SIZE, GFP_NOFS, false);
2122                        if (!msg)
2123                                goto out_err;
2124                        head = msg->front.iov_base;
2125                        head->num = cpu_to_le32(0);
2126                        msg->front.iov_len = sizeof(*head);
2127
2128                        msg->hdr.version = cpu_to_le16(2);
2129                        msg->hdr.compat_version = cpu_to_le16(1);
2130                }
2131
2132                cap = list_first_entry(&tmp_list, struct ceph_cap,
2133                                        session_caps);
2134                list_del(&cap->session_caps);
2135                num_cap_releases--;
2136
2137                head = msg->front.iov_base;
2138                put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2139                                   &head->num);
2140                item = msg->front.iov_base + msg->front.iov_len;
2141                item->ino = cpu_to_le64(cap->cap_ino);
2142                item->cap_id = cpu_to_le64(cap->cap_id);
2143                item->migrate_seq = cpu_to_le32(cap->mseq);
2144                item->seq = cpu_to_le32(cap->issue_seq);
2145                msg->front.iov_len += sizeof(*item);
2146
2147                ceph_put_cap(mdsc, cap);
2148
2149                if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2150                        // Append cap_barrier field
2151                        cap_barrier = msg->front.iov_base + msg->front.iov_len;
2152                        *cap_barrier = barrier;
2153                        msg->front.iov_len += sizeof(*cap_barrier);
2154
2155                        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2156                        dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2157                        ceph_con_send(&session->s_con, msg);
2158                        msg = NULL;
2159                }
2160        }
2161
2162        BUG_ON(num_cap_releases != 0);
2163
2164        spin_lock(&session->s_cap_lock);
2165        if (!list_empty(&session->s_cap_releases))
2166                goto again;
2167        spin_unlock(&session->s_cap_lock);
2168
2169        if (msg) {
2170                // Append cap_barrier field
2171                cap_barrier = msg->front.iov_base + msg->front.iov_len;
2172                *cap_barrier = barrier;
2173                msg->front.iov_len += sizeof(*cap_barrier);
2174
2175                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2176                dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2177                ceph_con_send(&session->s_con, msg);
2178        }
2179        return;
2180out_err:
2181        pr_err("send_cap_releases mds%d, failed to allocate message\n",
2182                session->s_mds);
2183        spin_lock(&session->s_cap_lock);
2184        list_splice(&tmp_list, &session->s_cap_releases);
2185        session->s_num_cap_releases += num_cap_releases;
2186        spin_unlock(&session->s_cap_lock);
2187}
2188
2189static void ceph_cap_release_work(struct work_struct *work)
2190{
2191        struct ceph_mds_session *session =
2192                container_of(work, struct ceph_mds_session, s_cap_release_work);
2193
2194        mutex_lock(&session->s_mutex);
2195        if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2196            session->s_state == CEPH_MDS_SESSION_HUNG)
2197                ceph_send_cap_releases(session->s_mdsc, session);
2198        mutex_unlock(&session->s_mutex);
2199        ceph_put_mds_session(session);
2200}
2201
2202void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2203                             struct ceph_mds_session *session)
2204{
2205        if (mdsc->stopping)
2206                return;
2207
2208        ceph_get_mds_session(session);
2209        if (queue_work(mdsc->fsc->cap_wq,
2210                       &session->s_cap_release_work)) {
2211                dout("cap release work queued\n");
2212        } else {
2213                ceph_put_mds_session(session);
2214                dout("failed to queue cap release work\n");
2215        }
2216}
2217
2218/*
2219 * caller holds session->s_cap_lock
2220 */
2221void __ceph_queue_cap_release(struct ceph_mds_session *session,
2222                              struct ceph_cap *cap)
2223{
2224        list_add_tail(&cap->session_caps, &session->s_cap_releases);
2225        session->s_num_cap_releases++;
2226
2227        if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2228                ceph_flush_cap_releases(session->s_mdsc, session);
2229}
2230
2231static void ceph_cap_reclaim_work(struct work_struct *work)
2232{
2233        struct ceph_mds_client *mdsc =
2234                container_of(work, struct ceph_mds_client, cap_reclaim_work);
2235        int ret = ceph_trim_dentries(mdsc);
2236        if (ret == -EAGAIN)
2237                ceph_queue_cap_reclaim_work(mdsc);
2238}
2239
2240void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2241{
2242        if (mdsc->stopping)
2243                return;
2244
2245        if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2246                dout("caps reclaim work queued\n");
2247        } else {
2248                dout("failed to queue caps release work\n");
2249        }
2250}
2251
2252void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2253{
2254        int val;
2255        if (!nr)
2256                return;
2257        val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2258        if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2259                atomic_set(&mdsc->cap_reclaim_pending, 0);
2260                ceph_queue_cap_reclaim_work(mdsc);
2261        }
2262}
2263
2264/*
2265 * requests
2266 */
2267
2268int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2269                                    struct inode *dir)
2270{
2271        struct ceph_inode_info *ci = ceph_inode(dir);
2272        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2273        struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2274        size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2275        unsigned int num_entries;
2276        int order;
2277
2278        spin_lock(&ci->i_ceph_lock);
2279        num_entries = ci->i_files + ci->i_subdirs;
2280        spin_unlock(&ci->i_ceph_lock);
2281        num_entries = max(num_entries, 1U);
2282        num_entries = min(num_entries, opt->max_readdir);
2283
2284        order = get_order(size * num_entries);
2285        while (order >= 0) {
2286                rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2287                                                             __GFP_NOWARN,
2288                                                             order);
2289                if (rinfo->dir_entries)
2290                        break;
2291                order--;
2292        }
2293        if (!rinfo->dir_entries)
2294                return -ENOMEM;
2295
2296        num_entries = (PAGE_SIZE << order) / size;
2297        num_entries = min(num_entries, opt->max_readdir);
2298
2299        rinfo->dir_buf_size = PAGE_SIZE << order;
2300        req->r_num_caps = num_entries + 1;
2301        req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2302        req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2303        return 0;
2304}
2305
2306/*
2307 * Create an mds request.
2308 */
2309struct ceph_mds_request *
2310ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2311{
2312        struct ceph_mds_request *req;
2313
2314        req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2315        if (!req)
2316                return ERR_PTR(-ENOMEM);
2317
2318        mutex_init(&req->r_fill_mutex);
2319        req->r_mdsc = mdsc;
2320        req->r_started = jiffies;
2321        req->r_start_latency = ktime_get();
2322        req->r_resend_mds = -1;
2323        INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2324        INIT_LIST_HEAD(&req->r_unsafe_target_item);
2325        req->r_fmode = -1;
2326        kref_init(&req->r_kref);
2327        RB_CLEAR_NODE(&req->r_node);
2328        INIT_LIST_HEAD(&req->r_wait);
2329        init_completion(&req->r_completion);
2330        init_completion(&req->r_safe_completion);
2331        INIT_LIST_HEAD(&req->r_unsafe_item);
2332
2333        ktime_get_coarse_real_ts64(&req->r_stamp);
2334
2335        req->r_op = op;
2336        req->r_direct_mode = mode;
2337        return req;
2338}
2339
2340/*
2341 * return oldest (lowest) request, tid in request tree, 0 if none.
2342 *
2343 * called under mdsc->mutex.
2344 */
2345static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2346{
2347        if (RB_EMPTY_ROOT(&mdsc->request_tree))
2348                return NULL;
2349        return rb_entry(rb_first(&mdsc->request_tree),
2350                        struct ceph_mds_request, r_node);
2351}
2352
2353static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2354{
2355        return mdsc->oldest_tid;
2356}
2357
2358/*
2359 * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
2360 * on build_path_from_dentry in fs/cifs/dir.c.
2361 *
2362 * If @stop_on_nosnap, generate path relative to the first non-snapped
2363 * inode.
2364 *
2365 * Encode hidden .snap dirs as a double /, i.e.
2366 *   foo/.snap/bar -> foo//bar
2367 */
2368char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2369                           int stop_on_nosnap)
2370{
2371        struct dentry *temp;
2372        char *path;
2373        int pos;
2374        unsigned seq;
2375        u64 base;
2376
2377        if (!dentry)
2378                return ERR_PTR(-EINVAL);
2379
2380        path = __getname();
2381        if (!path)
2382                return ERR_PTR(-ENOMEM);
2383retry:
2384        pos = PATH_MAX - 1;
2385        path[pos] = '\0';
2386
2387        seq = read_seqbegin(&rename_lock);
2388        rcu_read_lock();
2389        temp = dentry;
2390        for (;;) {
2391                struct inode *inode;
2392
2393                spin_lock(&temp->d_lock);
2394                inode = d_inode(temp);
2395                if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2396                        dout("build_path path+%d: %p SNAPDIR\n",
2397                             pos, temp);
2398                } else if (stop_on_nosnap && inode && dentry != temp &&
2399                           ceph_snap(inode) == CEPH_NOSNAP) {
2400                        spin_unlock(&temp->d_lock);
2401                        pos++; /* get rid of any prepended '/' */
2402                        break;
2403                } else {
2404                        pos -= temp->d_name.len;
2405                        if (pos < 0) {
2406                                spin_unlock(&temp->d_lock);
2407                                break;
2408                        }
2409                        memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2410                }
2411                spin_unlock(&temp->d_lock);
2412                temp = READ_ONCE(temp->d_parent);
2413
2414                /* Are we at the root? */
2415                if (IS_ROOT(temp))
2416                        break;
2417
2418                /* Are we out of buffer? */
2419                if (--pos < 0)
2420                        break;
2421
2422                path[pos] = '/';
2423        }
2424        base = ceph_ino(d_inode(temp));
2425        rcu_read_unlock();
2426
2427        if (read_seqretry(&rename_lock, seq))
2428                goto retry;
2429
2430        if (pos < 0) {
2431                /*
2432                 * A rename didn't occur, but somehow we didn't end up where
2433                 * we thought we would. Throw a warning and try again.
2434                 */
2435                pr_warn("build_path did not end path lookup where "
2436                        "expected, pos is %d\n", pos);
2437                goto retry;
2438        }
2439
2440        *pbase = base;
2441        *plen = PATH_MAX - 1 - pos;
2442        dout("build_path on %p %d built %llx '%.*s'\n",
2443             dentry, d_count(dentry), base, *plen, path + pos);
2444        return path + pos;
2445}
2446
2447static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2448                             const char **ppath, int *ppathlen, u64 *pino,
2449                             bool *pfreepath, bool parent_locked)
2450{
2451        char *path;
2452
2453        rcu_read_lock();
2454        if (!dir)
2455                dir = d_inode_rcu(dentry->d_parent);
2456        if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2457                *pino = ceph_ino(dir);
2458                rcu_read_unlock();
2459                *ppath = dentry->d_name.name;
2460                *ppathlen = dentry->d_name.len;
2461                return 0;
2462        }
2463        rcu_read_unlock();
2464        path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2465        if (IS_ERR(path))
2466                return PTR_ERR(path);
2467        *ppath = path;
2468        *pfreepath = true;
2469        return 0;
2470}
2471
2472static int build_inode_path(struct inode *inode,
2473                            const char **ppath, int *ppathlen, u64 *pino,
2474                            bool *pfreepath)
2475{
2476        struct dentry *dentry;
2477        char *path;
2478
2479        if (ceph_snap(inode) == CEPH_NOSNAP) {
2480                *pino = ceph_ino(inode);
2481                *ppathlen = 0;
2482                return 0;
2483        }
2484        dentry = d_find_alias(inode);
2485        path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2486        dput(dentry);
2487        if (IS_ERR(path))
2488                return PTR_ERR(path);
2489        *ppath = path;
2490        *pfreepath = true;
2491        return 0;
2492}
2493
2494/*
2495 * request arguments may be specified via an inode *, a dentry *, or
2496 * an explicit ino+path.
2497 */
2498static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2499                                  struct inode *rdiri, const char *rpath,
2500                                  u64 rino, const char **ppath, int *pathlen,
2501                                  u64 *ino, bool *freepath, bool parent_locked)
2502{
2503        int r = 0;
2504
2505        if (rinode) {
2506                r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2507                dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2508                     ceph_snap(rinode));
2509        } else if (rdentry) {
2510                r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2511                                        freepath, parent_locked);
2512                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2513                     *ppath);
2514        } else if (rpath || rino) {
2515                *ino = rino;
2516                *ppath = rpath;
2517                *pathlen = rpath ? strlen(rpath) : 0;
2518                dout(" path %.*s\n", *pathlen, rpath);
2519        }
2520
2521        return r;
2522}
2523
2524static void encode_timestamp_and_gids(void **p,
2525                                      const struct ceph_mds_request *req)
2526{
2527        struct ceph_timespec ts;
2528        int i;
2529
2530        ceph_encode_timespec64(&ts, &req->r_stamp);
2531        ceph_encode_copy(p, &ts, sizeof(ts));
2532
2533        /* gid_list */
2534        ceph_encode_32(p, req->r_cred->group_info->ngroups);
2535        for (i = 0; i < req->r_cred->group_info->ngroups; i++)
2536                ceph_encode_64(p, from_kgid(&init_user_ns,
2537                                            req->r_cred->group_info->gid[i]));
2538}
2539
2540/*
2541 * called under mdsc->mutex
2542 */
2543static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
2544                                               struct ceph_mds_request *req,
2545                                               bool drop_cap_releases)
2546{
2547        int mds = session->s_mds;
2548        struct ceph_mds_client *mdsc = session->s_mdsc;
2549        struct ceph_msg *msg;
2550        struct ceph_mds_request_head_old *head;
2551        const char *path1 = NULL;
2552        const char *path2 = NULL;
2553        u64 ino1 = 0, ino2 = 0;
2554        int pathlen1 = 0, pathlen2 = 0;
2555        bool freepath1 = false, freepath2 = false;
2556        int len;
2557        u16 releases;
2558        void *p, *end;
2559        int ret;
2560        bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
2561
2562        ret = set_request_path_attr(req->r_inode, req->r_dentry,
2563                              req->r_parent, req->r_path1, req->r_ino1.ino,
2564                              &path1, &pathlen1, &ino1, &freepath1,
2565                              test_bit(CEPH_MDS_R_PARENT_LOCKED,
2566                                        &req->r_req_flags));
2567        if (ret < 0) {
2568                msg = ERR_PTR(ret);
2569                goto out;
2570        }
2571
2572        /* If r_old_dentry is set, then assume that its parent is locked */
2573        ret = set_request_path_attr(NULL, req->r_old_dentry,
2574                              req->r_old_dentry_dir,
2575                              req->r_path2, req->r_ino2.ino,
2576                              &path2, &pathlen2, &ino2, &freepath2, true);
2577        if (ret < 0) {
2578                msg = ERR_PTR(ret);
2579                goto out_free1;
2580        }
2581
2582        len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
2583        len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2584                sizeof(struct ceph_timespec);
2585        len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
2586
2587        /* calculate (max) length for cap releases */
2588        len += sizeof(struct ceph_mds_request_release) *
2589                (!!req->r_inode_drop + !!req->r_dentry_drop +
2590                 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2591
2592        if (req->r_dentry_drop)
2593                len += pathlen1;
2594        if (req->r_old_dentry_drop)
2595                len += pathlen2;
2596
2597        msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2598        if (!msg) {
2599                msg = ERR_PTR(-ENOMEM);
2600                goto out_free2;
2601        }
2602
2603        msg->hdr.tid = cpu_to_le64(req->r_tid);
2604
2605        /*
2606         * The old ceph_mds_request_head didn't contain a version field, and
2607         * one was added when we moved the message version from 3->4.
2608         */
2609        if (legacy) {
2610                msg->hdr.version = cpu_to_le16(3);
2611                head = msg->front.iov_base;
2612                p = msg->front.iov_base + sizeof(*head);
2613        } else {
2614                struct ceph_mds_request_head *new_head = msg->front.iov_base;
2615
2616                msg->hdr.version = cpu_to_le16(4);
2617                new_head->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
2618                head = (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2619                p = msg->front.iov_base + sizeof(*new_head);
2620        }
2621
2622        end = msg->front.iov_base + msg->front.iov_len;
2623
2624        head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2625        head->op = cpu_to_le32(req->r_op);
2626        head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
2627                                                 req->r_cred->fsuid));
2628        head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
2629                                                 req->r_cred->fsgid));
2630        head->ino = cpu_to_le64(req->r_deleg_ino);
2631        head->args = req->r_args;
2632
2633        ceph_encode_filepath(&p, end, ino1, path1);
2634        ceph_encode_filepath(&p, end, ino2, path2);
2635
2636        /* make note of release offset, in case we need to replay */
2637        req->r_request_release_offset = p - msg->front.iov_base;
2638
2639        /* cap releases */
2640        releases = 0;
2641        if (req->r_inode_drop)
2642                releases += ceph_encode_inode_release(&p,
2643                      req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2644                      mds, req->r_inode_drop, req->r_inode_unless,
2645                      req->r_op == CEPH_MDS_OP_READDIR);
2646        if (req->r_dentry_drop)
2647                releases += ceph_encode_dentry_release(&p, req->r_dentry,
2648                                req->r_parent, mds, req->r_dentry_drop,
2649                                req->r_dentry_unless);
2650        if (req->r_old_dentry_drop)
2651                releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2652                                req->r_old_dentry_dir, mds,
2653                                req->r_old_dentry_drop,
2654                                req->r_old_dentry_unless);
2655        if (req->r_old_inode_drop)
2656                releases += ceph_encode_inode_release(&p,
2657                      d_inode(req->r_old_dentry),
2658                      mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2659
2660        if (drop_cap_releases) {
2661                releases = 0;
2662                p = msg->front.iov_base + req->r_request_release_offset;
2663        }
2664
2665        head->num_releases = cpu_to_le16(releases);
2666
2667        encode_timestamp_and_gids(&p, req);
2668
2669        if (WARN_ON_ONCE(p > end)) {
2670                ceph_msg_put(msg);
2671                msg = ERR_PTR(-ERANGE);
2672                goto out_free2;
2673        }
2674
2675        msg->front.iov_len = p - msg->front.iov_base;
2676        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2677
2678        if (req->r_pagelist) {
2679                struct ceph_pagelist *pagelist = req->r_pagelist;
2680                ceph_msg_data_add_pagelist(msg, pagelist);
2681                msg->hdr.data_len = cpu_to_le32(pagelist->length);
2682        } else {
2683                msg->hdr.data_len = 0;
2684        }
2685
2686        msg->hdr.data_off = cpu_to_le16(0);
2687
2688out_free2:
2689        if (freepath2)
2690                ceph_mdsc_free_path((char *)path2, pathlen2);
2691out_free1:
2692        if (freepath1)
2693                ceph_mdsc_free_path((char *)path1, pathlen1);
2694out:
2695        return msg;
2696}
2697
2698/*
2699 * called under mdsc->mutex if error, under no mutex if
2700 * success.
2701 */
2702static void complete_request(struct ceph_mds_client *mdsc,
2703                             struct ceph_mds_request *req)
2704{
2705        req->r_end_latency = ktime_get();
2706
2707        if (req->r_callback)
2708                req->r_callback(mdsc, req);
2709        complete_all(&req->r_completion);
2710}
2711
2712static struct ceph_mds_request_head_old *
2713find_old_request_head(void *p, u64 features)
2714{
2715        bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
2716        struct ceph_mds_request_head *new_head;
2717
2718        if (legacy)
2719                return (struct ceph_mds_request_head_old *)p;
2720        new_head = (struct ceph_mds_request_head *)p;
2721        return (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2722}
2723
2724/*
2725 * called under mdsc->mutex
2726 */
2727static int __prepare_send_request(struct ceph_mds_session *session,
2728                                  struct ceph_mds_request *req,
2729                                  bool drop_cap_releases)
2730{
2731        int mds = session->s_mds;
2732        struct ceph_mds_client *mdsc = session->s_mdsc;
2733        struct ceph_mds_request_head_old *rhead;
2734        struct ceph_msg *msg;
2735        int flags = 0;
2736
2737        req->r_attempts++;
2738        if (req->r_inode) {
2739                struct ceph_cap *cap =
2740                        ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2741
2742                if (cap)
2743                        req->r_sent_on_mseq = cap->mseq;
2744                else
2745                        req->r_sent_on_mseq = -1;
2746        }
2747        dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2748             req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2749
2750        if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2751                void *p;
2752
2753                /*
2754                 * Replay.  Do not regenerate message (and rebuild
2755                 * paths, etc.); just use the original message.
2756                 * Rebuilding paths will break for renames because
2757                 * d_move mangles the src name.
2758                 */
2759                msg = req->r_request;
2760                rhead = find_old_request_head(msg->front.iov_base,
2761                                              session->s_con.peer_features);
2762
2763                flags = le32_to_cpu(rhead->flags);
2764                flags |= CEPH_MDS_FLAG_REPLAY;
2765                rhead->flags = cpu_to_le32(flags);
2766
2767                if (req->r_target_inode)
2768                        rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2769
2770                rhead->num_retry = req->r_attempts - 1;
2771
2772                /* remove cap/dentry releases from message */
2773                rhead->num_releases = 0;
2774
2775                p = msg->front.iov_base + req->r_request_release_offset;
2776                encode_timestamp_and_gids(&p, req);
2777
2778                msg->front.iov_len = p - msg->front.iov_base;
2779                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2780                return 0;
2781        }
2782
2783        if (req->r_request) {
2784                ceph_msg_put(req->r_request);
2785                req->r_request = NULL;
2786        }
2787        msg = create_request_message(session, req, drop_cap_releases);
2788        if (IS_ERR(msg)) {
2789                req->r_err = PTR_ERR(msg);
2790                return PTR_ERR(msg);
2791        }
2792        req->r_request = msg;
2793
2794        rhead = find_old_request_head(msg->front.iov_base,
2795                                      session->s_con.peer_features);
2796        rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2797        if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2798                flags |= CEPH_MDS_FLAG_REPLAY;
2799        if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2800                flags |= CEPH_MDS_FLAG_ASYNC;
2801        if (req->r_parent)
2802                flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2803        rhead->flags = cpu_to_le32(flags);
2804        rhead->num_fwd = req->r_num_fwd;
2805        rhead->num_retry = req->r_attempts - 1;
2806
2807        dout(" r_parent = %p\n", req->r_parent);
2808        return 0;
2809}
2810
2811/*
2812 * called under mdsc->mutex
2813 */
2814static int __send_request(struct ceph_mds_session *session,
2815                          struct ceph_mds_request *req,
2816                          bool drop_cap_releases)
2817{
2818        int err;
2819
2820        err = __prepare_send_request(session, req, drop_cap_releases);
2821        if (!err) {
2822                ceph_msg_get(req->r_request);
2823                ceph_con_send(&session->s_con, req->r_request);
2824        }
2825
2826        return err;
2827}
2828
2829/*
2830 * send request, or put it on the appropriate wait list.
2831 */
2832static void __do_request(struct ceph_mds_client *mdsc,
2833                        struct ceph_mds_request *req)
2834{
2835        struct ceph_mds_session *session = NULL;
2836        int mds = -1;
2837        int err = 0;
2838        bool random;
2839
2840        if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2841                if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2842                        __unregister_request(mdsc, req);
2843                return;
2844        }
2845
2846        if (req->r_timeout &&
2847            time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2848                dout("do_request timed out\n");
2849                err = -ETIMEDOUT;
2850                goto finish;
2851        }
2852        if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2853                dout("do_request forced umount\n");
2854                err = -EIO;
2855                goto finish;
2856        }
2857        if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2858                if (mdsc->mdsmap_err) {
2859                        err = mdsc->mdsmap_err;
2860                        dout("do_request mdsmap err %d\n", err);
2861                        goto finish;
2862                }
2863                if (mdsc->mdsmap->m_epoch == 0) {
2864                        dout("do_request no mdsmap, waiting for map\n");
2865                        list_add(&req->r_wait, &mdsc->waiting_for_map);
2866                        return;
2867                }
2868                if (!(mdsc->fsc->mount_options->flags &
2869                      CEPH_MOUNT_OPT_MOUNTWAIT) &&
2870                    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2871                        err = -EHOSTUNREACH;
2872                        goto finish;
2873                }
2874        }
2875
2876        put_request_session(req);
2877
2878        mds = __choose_mds(mdsc, req, &random);
2879        if (mds < 0 ||
2880            ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2881                if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2882                        err = -EJUKEBOX;
2883                        goto finish;
2884                }
2885                dout("do_request no mds or not active, waiting for map\n");
2886                list_add(&req->r_wait, &mdsc->waiting_for_map);
2887                return;
2888        }
2889
2890        /* get, open session */
2891        session = __ceph_lookup_mds_session(mdsc, mds);
2892        if (!session) {
2893                session = register_session(mdsc, mds);
2894                if (IS_ERR(session)) {
2895                        err = PTR_ERR(session);
2896                        goto finish;
2897                }
2898        }
2899        req->r_session = ceph_get_mds_session(session);
2900
2901        dout("do_request mds%d session %p state %s\n", mds, session,
2902             ceph_session_state_name(session->s_state));
2903        if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2904            session->s_state != CEPH_MDS_SESSION_HUNG) {
2905                /*
2906                 * We cannot queue async requests since the caps and delegated
2907                 * inodes are bound to the session. Just return -EJUKEBOX and
2908                 * let the caller retry a sync request in that case.
2909                 */
2910                if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2911                        err = -EJUKEBOX;
2912                        goto out_session;
2913                }
2914
2915                /*
2916                 * If the session has been REJECTED, then return a hard error,
2917                 * unless it's a CLEANRECOVER mount, in which case we'll queue
2918                 * it to the mdsc queue.
2919                 */
2920                if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2921                        if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
2922                                list_add(&req->r_wait, &mdsc->waiting_for_map);
2923                        else
2924                                err = -EACCES;
2925                        goto out_session;
2926                }
2927
2928                if (session->s_state == CEPH_MDS_SESSION_NEW ||
2929                    session->s_state == CEPH_MDS_SESSION_CLOSING) {
2930                        err = __open_session(mdsc, session);
2931                        if (err)
2932                                goto out_session;
2933                        /* retry the same mds later */
2934                        if (random)
2935                                req->r_resend_mds = mds;
2936                }
2937                list_add(&req->r_wait, &session->s_waiting);
2938                goto out_session;
2939        }
2940
2941        /* send request */
2942        req->r_resend_mds = -1;   /* forget any previous mds hint */
2943
2944        if (req->r_request_started == 0)   /* note request start time */
2945                req->r_request_started = jiffies;
2946
2947        err = __send_request(session, req, false);
2948
2949out_session:
2950        ceph_put_mds_session(session);
2951finish:
2952        if (err) {
2953                dout("__do_request early error %d\n", err);
2954                req->r_err = err;
2955                complete_request(mdsc, req);
2956                __unregister_request(mdsc, req);
2957        }
2958        return;
2959}
2960
2961/*
2962 * called under mdsc->mutex
2963 */
2964static void __wake_requests(struct ceph_mds_client *mdsc,
2965                            struct list_head *head)
2966{
2967        struct ceph_mds_request *req;
2968        LIST_HEAD(tmp_list);
2969
2970        list_splice_init(head, &tmp_list);
2971
2972        while (!list_empty(&tmp_list)) {
2973                req = list_entry(tmp_list.next,
2974                                 struct ceph_mds_request, r_wait);
2975                list_del_init(&req->r_wait);
2976                dout(" wake request %p tid %llu\n", req, req->r_tid);
2977                __do_request(mdsc, req);
2978        }
2979}
2980
2981/*
2982 * Wake up threads with requests pending for @mds, so that they can
2983 * resubmit their requests to a possibly different mds.
2984 */
2985static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2986{
2987        struct ceph_mds_request *req;
2988        struct rb_node *p = rb_first(&mdsc->request_tree);
2989
2990        dout("kick_requests mds%d\n", mds);
2991        while (p) {
2992                req = rb_entry(p, struct ceph_mds_request, r_node);
2993                p = rb_next(p);
2994                if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2995                        continue;
2996                if (req->r_attempts > 0)
2997                        continue; /* only new requests */
2998                if (req->r_session &&
2999                    req->r_session->s_mds == mds) {
3000                        dout(" kicking tid %llu\n", req->r_tid);
3001                        list_del_init(&req->r_wait);
3002                        __do_request(mdsc, req);
3003                }
3004        }
3005}
3006
3007int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
3008                              struct ceph_mds_request *req)
3009{
3010        int err = 0;
3011
3012        /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
3013        if (req->r_inode)
3014                ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3015        if (req->r_parent) {
3016                struct ceph_inode_info *ci = ceph_inode(req->r_parent);
3017                int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
3018                            CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
3019                spin_lock(&ci->i_ceph_lock);
3020                ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
3021                __ceph_touch_fmode(ci, mdsc, fmode);
3022                spin_unlock(&ci->i_ceph_lock);
3023        }
3024        if (req->r_old_dentry_dir)
3025                ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3026                                  CEPH_CAP_PIN);
3027
3028        if (req->r_inode) {
3029                err = ceph_wait_on_async_create(req->r_inode);
3030                if (err) {
3031                        dout("%s: wait for async create returned: %d\n",
3032                             __func__, err);
3033                        return err;
3034                }
3035        }
3036
3037        if (!err && req->r_old_inode) {
3038                err = ceph_wait_on_async_create(req->r_old_inode);
3039                if (err) {
3040                        dout("%s: wait for async create returned: %d\n",
3041                             __func__, err);
3042                        return err;
3043                }
3044        }
3045
3046        dout("submit_request on %p for inode %p\n", req, dir);
3047        mutex_lock(&mdsc->mutex);
3048        __register_request(mdsc, req, dir);
3049        __do_request(mdsc, req);
3050        err = req->r_err;
3051        mutex_unlock(&mdsc->mutex);
3052        return err;
3053}
3054
3055static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
3056                                  struct ceph_mds_request *req)
3057{
3058        int err;
3059
3060        /* wait */
3061        dout("do_request waiting\n");
3062        if (!req->r_timeout && req->r_wait_for_completion) {
3063                err = req->r_wait_for_completion(mdsc, req);
3064        } else {
3065                long timeleft = wait_for_completion_killable_timeout(
3066                                        &req->r_completion,
3067                                        ceph_timeout_jiffies(req->r_timeout));
3068                if (timeleft > 0)
3069                        err = 0;
3070                else if (!timeleft)
3071                        err = -ETIMEDOUT;  /* timed out */
3072                else
3073                        err = timeleft;  /* killed */
3074        }
3075        dout("do_request waited, got %d\n", err);
3076        mutex_lock(&mdsc->mutex);
3077
3078        /* only abort if we didn't race with a real reply */
3079        if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3080                err = le32_to_cpu(req->r_reply_info.head->result);
3081        } else if (err < 0) {
3082                dout("aborted request %lld with %d\n", req->r_tid, err);
3083
3084                /*
3085                 * ensure we aren't running concurrently with
3086                 * ceph_fill_trace or ceph_readdir_prepopulate, which
3087                 * rely on locks (dir mutex) held by our caller.
3088                 */
3089                mutex_lock(&req->r_fill_mutex);
3090                req->r_err = err;
3091                set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3092                mutex_unlock(&req->r_fill_mutex);
3093
3094                if (req->r_parent &&
3095                    (req->r_op & CEPH_MDS_OP_WRITE))
3096                        ceph_invalidate_dir_request(req);
3097        } else {
3098                err = req->r_err;
3099        }
3100
3101        mutex_unlock(&mdsc->mutex);
3102        return err;
3103}
3104
3105/*
3106 * Synchrously perform an mds request.  Take care of all of the
3107 * session setup, forwarding, retry details.
3108 */
3109int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3110                         struct inode *dir,
3111                         struct ceph_mds_request *req)
3112{
3113        int err;
3114
3115        dout("do_request on %p\n", req);
3116
3117        /* issue */
3118        err = ceph_mdsc_submit_request(mdsc, dir, req);
3119        if (!err)
3120                err = ceph_mdsc_wait_request(mdsc, req);
3121        dout("do_request %p done, result %d\n", req, err);
3122        return err;
3123}
3124
3125/*
3126 * Invalidate dir's completeness, dentry lease state on an aborted MDS
3127 * namespace request.
3128 */
3129void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3130{
3131        struct inode *dir = req->r_parent;
3132        struct inode *old_dir = req->r_old_dentry_dir;
3133
3134        dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
3135
3136        ceph_dir_clear_complete(dir);
3137        if (old_dir)
3138                ceph_dir_clear_complete(old_dir);
3139        if (req->r_dentry)
3140                ceph_invalidate_dentry_lease(req->r_dentry);
3141        if (req->r_old_dentry)
3142                ceph_invalidate_dentry_lease(req->r_old_dentry);
3143}
3144
3145/*
3146 * Handle mds reply.
3147 *
3148 * We take the session mutex and parse and process the reply immediately.
3149 * This preserves the logical ordering of replies, capabilities, etc., sent
3150 * by the MDS as they are applied to our local cache.
3151 */
3152static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
3153{
3154        struct ceph_mds_client *mdsc = session->s_mdsc;
3155        struct ceph_mds_request *req;
3156        struct ceph_mds_reply_head *head = msg->front.iov_base;
3157        struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
3158        struct ceph_snap_realm *realm;
3159        u64 tid;
3160        int err, result;
3161        int mds = session->s_mds;
3162
3163        if (msg->front.iov_len < sizeof(*head)) {
3164                pr_err("mdsc_handle_reply got corrupt (short) reply\n");
3165                ceph_msg_dump(msg);
3166                return;
3167        }
3168
3169        /* get request, session */
3170        tid = le64_to_cpu(msg->hdr.tid);
3171        mutex_lock(&mdsc->mutex);
3172        req = lookup_get_request(mdsc, tid);
3173        if (!req) {
3174                dout("handle_reply on unknown tid %llu\n", tid);
3175                mutex_unlock(&mdsc->mutex);
3176                return;
3177        }
3178        dout("handle_reply %p\n", req);
3179
3180        /* correct session? */
3181        if (req->r_session != session) {
3182                pr_err("mdsc_handle_reply got %llu on session mds%d"
3183                       " not mds%d\n", tid, session->s_mds,
3184                       req->r_session ? req->r_session->s_mds : -1);
3185                mutex_unlock(&mdsc->mutex);
3186                goto out;
3187        }
3188
3189        /* dup? */
3190        if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3191            (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3192                pr_warn("got a dup %s reply on %llu from mds%d\n",
3193                           head->safe ? "safe" : "unsafe", tid, mds);
3194                mutex_unlock(&mdsc->mutex);
3195                goto out;
3196        }
3197        if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3198                pr_warn("got unsafe after safe on %llu from mds%d\n",
3199                           tid, mds);
3200                mutex_unlock(&mdsc->mutex);
3201                goto out;
3202        }
3203
3204        result = le32_to_cpu(head->result);
3205
3206        /*
3207         * Handle an ESTALE
3208         * if we're not talking to the authority, send to them
3209         * if the authority has changed while we weren't looking,
3210         * send to new authority
3211         * Otherwise we just have to return an ESTALE
3212         */
3213        if (result == -ESTALE) {
3214                dout("got ESTALE on request %llu\n", req->r_tid);
3215                req->r_resend_mds = -1;
3216                if (req->r_direct_mode != USE_AUTH_MDS) {
3217                        dout("not using auth, setting for that now\n");
3218                        req->r_direct_mode = USE_AUTH_MDS;
3219                        __do_request(mdsc, req);
3220                        mutex_unlock(&mdsc->mutex);
3221                        goto out;
3222                } else  {
3223                        int mds = __choose_mds(mdsc, req, NULL);
3224                        if (mds >= 0 && mds != req->r_session->s_mds) {
3225                                dout("but auth changed, so resending\n");
3226                                __do_request(mdsc, req);
3227                                mutex_unlock(&mdsc->mutex);
3228                                goto out;
3229                        }
3230                }
3231                dout("have to return ESTALE on request %llu\n", req->r_tid);
3232        }
3233
3234
3235        if (head->safe) {
3236                set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3237                __unregister_request(mdsc, req);
3238
3239                /* last request during umount? */
3240                if (mdsc->stopping && !__get_oldest_req(mdsc))
3241                        complete_all(&mdsc->safe_umount_waiters);
3242
3243                if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3244                        /*
3245                         * We already handled the unsafe response, now do the
3246                         * cleanup.  No need to examine the response; the MDS
3247                         * doesn't include any result info in the safe
3248                         * response.  And even if it did, there is nothing
3249                         * useful we could do with a revised return value.
3250                         */
3251                        dout("got safe reply %llu, mds%d\n", tid, mds);
3252
3253                        mutex_unlock(&mdsc->mutex);
3254                        goto out;
3255                }
3256        } else {
3257                set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3258                list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3259        }
3260
3261        dout("handle_reply tid %lld result %d\n", tid, result);
3262        rinfo = &req->r_reply_info;
3263        if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3264                err = parse_reply_info(session, msg, rinfo, (u64)-1);
3265        else
3266                err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
3267        mutex_unlock(&mdsc->mutex);
3268
3269        /* Must find target inode outside of mutexes to avoid deadlocks */
3270        if ((err >= 0) && rinfo->head->is_target) {
3271                struct inode *in;
3272                struct ceph_vino tvino = {
3273                        .ino  = le64_to_cpu(rinfo->targeti.in->ino),
3274                        .snap = le64_to_cpu(rinfo->targeti.in->snapid)
3275                };
3276
3277                in = ceph_get_inode(mdsc->fsc->sb, tvino);
3278                if (IS_ERR(in)) {
3279                        err = PTR_ERR(in);
3280                        mutex_lock(&session->s_mutex);
3281                        goto out_err;
3282                }
3283                req->r_target_inode = in;
3284        }
3285
3286        mutex_lock(&session->s_mutex);
3287        if (err < 0) {
3288                pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
3289                ceph_msg_dump(msg);
3290                goto out_err;
3291        }
3292
3293        /* snap trace */
3294        realm = NULL;
3295        if (rinfo->snapblob_len) {
3296                down_write(&mdsc->snap_rwsem);
3297                ceph_update_snap_trace(mdsc, rinfo->snapblob,
3298                                rinfo->snapblob + rinfo->snapblob_len,
3299                                le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3300                                &realm);
3301                downgrade_write(&mdsc->snap_rwsem);
3302        } else {
3303                down_read(&mdsc->snap_rwsem);
3304        }
3305
3306        /* insert trace into our cache */
3307        mutex_lock(&req->r_fill_mutex);
3308        current->journal_info = req;
3309        err = ceph_fill_trace(mdsc->fsc->sb, req);
3310        if (err == 0) {
3311                if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3312                                    req->r_op == CEPH_MDS_OP_LSSNAP))
3313                        ceph_readdir_prepopulate(req, req->r_session);
3314        }
3315        current->journal_info = NULL;
3316        mutex_unlock(&req->r_fill_mutex);
3317
3318        up_read(&mdsc->snap_rwsem);
3319        if (realm)
3320                ceph_put_snap_realm(mdsc, realm);
3321
3322        if (err == 0) {
3323                if (req->r_target_inode &&
3324                    test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3325                        struct ceph_inode_info *ci =
3326                                ceph_inode(req->r_target_inode);
3327                        spin_lock(&ci->i_unsafe_lock);
3328                        list_add_tail(&req->r_unsafe_target_item,
3329                                      &ci->i_unsafe_iops);
3330                        spin_unlock(&ci->i_unsafe_lock);
3331                }
3332
3333                ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3334        }
3335out_err:
3336        mutex_lock(&mdsc->mutex);
3337        if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3338                if (err) {
3339                        req->r_err = err;
3340                } else {
3341                        req->r_reply =  ceph_msg_get(msg);
3342                        set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3343                }
3344        } else {
3345                dout("reply arrived after request %lld was aborted\n", tid);
3346        }
3347        mutex_unlock(&mdsc->mutex);
3348
3349        mutex_unlock(&session->s_mutex);
3350
3351        /* kick calling process */
3352        complete_request(mdsc, req);
3353
3354        ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3355                                     req->r_end_latency, err);
3356out:
3357        ceph_mdsc_put_request(req);
3358        return;
3359}
3360
3361
3362
3363/*
3364 * handle mds notification that our request has been forwarded.
3365 */
3366static void handle_forward(struct ceph_mds_client *mdsc,
3367                           struct ceph_mds_session *session,
3368                           struct ceph_msg *msg)
3369{
3370        struct ceph_mds_request *req;
3371        u64 tid = le64_to_cpu(msg->hdr.tid);
3372        u32 next_mds;
3373        u32 fwd_seq;
3374        int err = -EINVAL;
3375        void *p = msg->front.iov_base;
3376        void *end = p + msg->front.iov_len;
3377
3378        ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3379        next_mds = ceph_decode_32(&p);
3380        fwd_seq = ceph_decode_32(&p);
3381
3382        mutex_lock(&mdsc->mutex);
3383        req = lookup_get_request(mdsc, tid);
3384        if (!req) {
3385                dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3386                goto out;  /* dup reply? */
3387        }
3388
3389        if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3390                dout("forward tid %llu aborted, unregistering\n", tid);
3391                __unregister_request(mdsc, req);
3392        } else if (fwd_seq <= req->r_num_fwd) {
3393                dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3394                     tid, next_mds, req->r_num_fwd, fwd_seq);
3395        } else {
3396                /* resend. forward race not possible; mds would drop */
3397                dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3398                BUG_ON(req->r_err);
3399                BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3400                req->r_attempts = 0;
3401                req->r_num_fwd = fwd_seq;
3402                req->r_resend_mds = next_mds;
3403                put_request_session(req);
3404                __do_request(mdsc, req);
3405        }
3406        ceph_mdsc_put_request(req);
3407out:
3408        mutex_unlock(&mdsc->mutex);
3409        return;
3410
3411bad:
3412        pr_err("mdsc_handle_forward decode error err=%d\n", err);
3413}
3414
3415static int __decode_session_metadata(void **p, void *end,
3416                                     bool *blocklisted)
3417{
3418        /* map<string,string> */
3419        u32 n;
3420        bool err_str;
3421        ceph_decode_32_safe(p, end, n, bad);
3422        while (n-- > 0) {
3423                u32 len;
3424                ceph_decode_32_safe(p, end, len, bad);
3425                ceph_decode_need(p, end, len, bad);
3426                err_str = !strncmp(*p, "error_string", len);
3427                *p += len;
3428                ceph_decode_32_safe(p, end, len, bad);
3429                ceph_decode_need(p, end, len, bad);
3430                /*
3431                 * Match "blocklisted (blacklisted)" from newer MDSes,
3432                 * or "blacklisted" from older MDSes.
3433                 */
3434                if (err_str && strnstr(*p, "blacklisted", len))
3435                        *blocklisted = true;
3436                *p += len;
3437        }
3438        return 0;
3439bad:
3440        return -1;
3441}
3442
3443/*
3444 * handle a mds session control message
3445 */
3446static void handle_session(struct ceph_mds_session *session,
3447                           struct ceph_msg *msg)
3448{
3449        struct ceph_mds_client *mdsc = session->s_mdsc;
3450        int mds = session->s_mds;
3451        int msg_version = le16_to_cpu(msg->hdr.version);
3452        void *p = msg->front.iov_base;
3453        void *end = p + msg->front.iov_len;
3454        struct ceph_mds_session_head *h;
3455        u32 op;
3456        u64 seq, features = 0;
3457        int wake = 0;
3458        bool blocklisted = false;
3459
3460        /* decode */
3461        ceph_decode_need(&p, end, sizeof(*h), bad);
3462        h = p;
3463        p += sizeof(*h);
3464
3465        op = le32_to_cpu(h->op);
3466        seq = le64_to_cpu(h->seq);
3467
3468        if (msg_version >= 3) {
3469                u32 len;
3470                /* version >= 2, metadata */
3471                if (__decode_session_metadata(&p, end, &blocklisted) < 0)
3472                        goto bad;
3473                /* version >= 3, feature bits */
3474                ceph_decode_32_safe(&p, end, len, bad);
3475                if (len) {
3476                        ceph_decode_64_safe(&p, end, features, bad);
3477                        p += len - sizeof(features);
3478                }
3479        }
3480
3481        mutex_lock(&mdsc->mutex);
3482        if (op == CEPH_SESSION_CLOSE) {
3483                ceph_get_mds_session(session);
3484                __unregister_session(mdsc, session);
3485        }
3486        /* FIXME: this ttl calculation is generous */
3487        session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3488        mutex_unlock(&mdsc->mutex);
3489
3490        mutex_lock(&session->s_mutex);
3491
3492        dout("handle_session mds%d %s %p state %s seq %llu\n",
3493             mds, ceph_session_op_name(op), session,
3494             ceph_session_state_name(session->s_state), seq);
3495
3496        if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3497                session->s_state = CEPH_MDS_SESSION_OPEN;
3498                pr_info("mds%d came back\n", session->s_mds);
3499        }
3500
3501        switch (op) {
3502        case CEPH_SESSION_OPEN:
3503                if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3504                        pr_info("mds%d reconnect success\n", session->s_mds);
3505                session->s_state = CEPH_MDS_SESSION_OPEN;
3506                session->s_features = features;
3507                renewed_caps(mdsc, session, 0);
3508                if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features))
3509                        metric_schedule_delayed(&mdsc->metric);
3510                wake = 1;
3511                if (mdsc->stopping)
3512                        __close_session(mdsc, session);
3513                break;
3514
3515        case CEPH_SESSION_RENEWCAPS:
3516                if (session->s_renew_seq == seq)
3517                        renewed_caps(mdsc, session, 1);
3518                break;
3519
3520        case CEPH_SESSION_CLOSE:
3521                if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3522                        pr_info("mds%d reconnect denied\n", session->s_mds);
3523                session->s_state = CEPH_MDS_SESSION_CLOSED;
3524                cleanup_session_requests(mdsc, session);
3525                remove_session_caps(session);
3526                wake = 2; /* for good measure */
3527                wake_up_all(&mdsc->session_close_wq);
3528                break;
3529
3530        case CEPH_SESSION_STALE:
3531                pr_info("mds%d caps went stale, renewing\n",
3532                        session->s_mds);
3533                atomic_inc(&session->s_cap_gen);
3534                session->s_cap_ttl = jiffies - 1;
3535                send_renew_caps(mdsc, session);
3536                break;
3537
3538        case CEPH_SESSION_RECALL_STATE:
3539                ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3540                break;
3541
3542        case CEPH_SESSION_FLUSHMSG:
3543                send_flushmsg_ack(mdsc, session, seq);
3544                break;
3545
3546        case CEPH_SESSION_FORCE_RO:
3547                dout("force_session_readonly %p\n", session);
3548                spin_lock(&session->s_cap_lock);
3549                session->s_readonly = true;
3550                spin_unlock(&session->s_cap_lock);
3551                wake_up_session_caps(session, FORCE_RO);
3552                break;
3553
3554        case CEPH_SESSION_REJECT:
3555                WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3556                pr_info("mds%d rejected session\n", session->s_mds);
3557                session->s_state = CEPH_MDS_SESSION_REJECTED;
3558                cleanup_session_requests(mdsc, session);
3559                remove_session_caps(session);
3560                if (blocklisted)
3561                        mdsc->fsc->blocklisted = true;
3562                wake = 2; /* for good measure */
3563                break;
3564
3565        default:
3566                pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3567                WARN_ON(1);
3568        }
3569
3570        mutex_unlock(&session->s_mutex);
3571        if (wake) {
3572                mutex_lock(&mdsc->mutex);
3573                __wake_requests(mdsc, &session->s_waiting);
3574                if (wake == 2)
3575                        kick_requests(mdsc, mds);
3576                mutex_unlock(&mdsc->mutex);
3577        }
3578        if (op == CEPH_SESSION_CLOSE)
3579                ceph_put_mds_session(session);
3580        return;
3581
3582bad:
3583        pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3584               (int)msg->front.iov_len);
3585        ceph_msg_dump(msg);
3586        return;
3587}
3588
3589void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3590{
3591        int dcaps;
3592
3593        dcaps = xchg(&req->r_dir_caps, 0);
3594        if (dcaps) {
3595                dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3596                ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3597        }
3598}
3599
3600void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3601{
3602        int dcaps;
3603
3604        dcaps = xchg(&req->r_dir_caps, 0);
3605        if (dcaps) {
3606                dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3607                ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3608                                                dcaps);
3609        }
3610}
3611
3612/*
3613 * called under session->mutex.
3614 */
3615static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3616                                   struct ceph_mds_session *session)
3617{
3618        struct ceph_mds_request *req, *nreq;
3619        struct rb_node *p;
3620
3621        dout("replay_unsafe_requests mds%d\n", session->s_mds);
3622
3623        mutex_lock(&mdsc->mutex);
3624        list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3625                __send_request(session, req, true);
3626
3627        /*
3628         * also re-send old requests when MDS enters reconnect stage. So that MDS
3629         * can process completed request in clientreplay stage.
3630         */
3631        p = rb_first(&mdsc->request_tree);
3632        while (p) {
3633                req = rb_entry(p, struct ceph_mds_request, r_node);
3634                p = rb_next(p);
3635                if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3636                        continue;
3637                if (req->r_attempts == 0)
3638                        continue; /* only old requests */
3639                if (!req->r_session)
3640                        continue;
3641                if (req->r_session->s_mds != session->s_mds)
3642                        continue;
3643
3644                ceph_mdsc_release_dir_caps_no_check(req);
3645
3646                __send_request(session, req, true);
3647        }
3648        mutex_unlock(&mdsc->mutex);
3649}
3650
3651static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3652{
3653        struct ceph_msg *reply;
3654        struct ceph_pagelist *_pagelist;
3655        struct page *page;
3656        __le32 *addr;
3657        int err = -ENOMEM;
3658
3659        if (!recon_state->allow_multi)
3660                return -ENOSPC;
3661
3662        /* can't handle message that contains both caps and realm */
3663        BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3664
3665        /* pre-allocate new pagelist */
3666        _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3667        if (!_pagelist)
3668                return -ENOMEM;
3669
3670        reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3671        if (!reply)
3672                goto fail_msg;
3673
3674        /* placeholder for nr_caps */
3675        err = ceph_pagelist_encode_32(_pagelist, 0);
3676        if (err < 0)
3677                goto fail;
3678
3679        if (recon_state->nr_caps) {
3680                /* currently encoding caps */
3681                err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3682                if (err)
3683                        goto fail;
3684        } else {
3685                /* placeholder for nr_realms (currently encoding relams) */
3686                err = ceph_pagelist_encode_32(_pagelist, 0);
3687                if (err < 0)
3688                        goto fail;
3689        }
3690
3691        err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3692        if (err)
3693                goto fail;
3694
3695        page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3696        addr = kmap_atomic(page);
3697        if (recon_state->nr_caps) {
3698                /* currently encoding caps */
3699                *addr = cpu_to_le32(recon_state->nr_caps);
3700        } else {
3701                /* currently encoding relams */
3702                *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3703        }
3704        kunmap_atomic(addr);
3705
3706        reply->hdr.version = cpu_to_le16(5);
3707        reply->hdr.compat_version = cpu_to_le16(4);
3708
3709        reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3710        ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3711
3712        ceph_con_send(&recon_state->session->s_con, reply);
3713        ceph_pagelist_release(recon_state->pagelist);
3714
3715        recon_state->pagelist = _pagelist;
3716        recon_state->nr_caps = 0;
3717        recon_state->nr_realms = 0;
3718        recon_state->msg_version = 5;
3719        return 0;
3720fail:
3721        ceph_msg_put(reply);
3722fail_msg:
3723        ceph_pagelist_release(_pagelist);
3724        return err;
3725}
3726
3727static struct dentry* d_find_primary(struct inode *inode)
3728{
3729        struct dentry *alias, *dn = NULL;
3730
3731        if (hlist_empty(&inode->i_dentry))
3732                return NULL;
3733
3734        spin_lock(&inode->i_lock);
3735        if (hlist_empty(&inode->i_dentry))
3736                goto out_unlock;
3737
3738        if (S_ISDIR(inode->i_mode)) {
3739                alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
3740                if (!IS_ROOT(alias))
3741                        dn = dget(alias);
3742                goto out_unlock;
3743        }
3744
3745        hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
3746                spin_lock(&alias->d_lock);
3747                if (!d_unhashed(alias) &&
3748                    (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
3749                        dn = dget_dlock(alias);
3750                }
3751                spin_unlock(&alias->d_lock);
3752                if (dn)
3753                        break;
3754        }
3755out_unlock:
3756        spin_unlock(&inode->i_lock);
3757        return dn;
3758}
3759
3760/*
3761 * Encode information about a cap for a reconnect with the MDS.
3762 */
3763static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
3764                          void *arg)
3765{
3766        union {
3767                struct ceph_mds_cap_reconnect v2;
3768                struct ceph_mds_cap_reconnect_v1 v1;
3769        } rec;
3770        struct ceph_inode_info *ci = cap->ci;
3771        struct ceph_reconnect_state *recon_state = arg;
3772        struct ceph_pagelist *pagelist = recon_state->pagelist;
3773        struct dentry *dentry;
3774        char *path;
3775        int pathlen, err;
3776        u64 pathbase;
3777        u64 snap_follows;
3778
3779        dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3780             inode, ceph_vinop(inode), cap, cap->cap_id,
3781             ceph_cap_string(cap->issued));
3782
3783        dentry = d_find_primary(inode);
3784        if (dentry) {
3785                /* set pathbase to parent dir when msg_version >= 2 */
3786                path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
3787                                            recon_state->msg_version >= 2);
3788                dput(dentry);
3789                if (IS_ERR(path)) {
3790                        err = PTR_ERR(path);
3791                        goto out_err;
3792                }
3793        } else {
3794                path = NULL;
3795                pathlen = 0;
3796                pathbase = 0;
3797        }
3798
3799        spin_lock(&ci->i_ceph_lock);
3800        cap->seq = 0;        /* reset cap seq */
3801        cap->issue_seq = 0;  /* and issue_seq */
3802        cap->mseq = 0;       /* and migrate_seq */
3803        cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
3804
3805        /* These are lost when the session goes away */
3806        if (S_ISDIR(inode->i_mode)) {
3807                if (cap->issued & CEPH_CAP_DIR_CREATE) {
3808                        ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
3809                        memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
3810                }
3811                cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
3812        }
3813
3814        if (recon_state->msg_version >= 2) {
3815                rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3816                rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3817                rec.v2.issued = cpu_to_le32(cap->issued);
3818                rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3819                rec.v2.pathbase = cpu_to_le64(pathbase);
3820                rec.v2.flock_len = (__force __le32)
3821                        ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3822        } else {
3823                rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3824                rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3825                rec.v1.issued = cpu_to_le32(cap->issued);
3826                rec.v1.size = cpu_to_le64(i_size_read(inode));
3827                ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3828                ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3829                rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3830                rec.v1.pathbase = cpu_to_le64(pathbase);
3831        }
3832
3833        if (list_empty(&ci->i_cap_snaps)) {
3834                snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3835        } else {
3836                struct ceph_cap_snap *capsnap =
3837                        list_first_entry(&ci->i_cap_snaps,
3838                                         struct ceph_cap_snap, ci_item);
3839                snap_follows = capsnap->follows;
3840        }
3841        spin_unlock(&ci->i_ceph_lock);
3842
3843        if (recon_state->msg_version >= 2) {
3844                int num_fcntl_locks, num_flock_locks;
3845                struct ceph_filelock *flocks = NULL;
3846                size_t struct_len, total_len = sizeof(u64);
3847                u8 struct_v = 0;
3848
3849encode_again:
3850                if (rec.v2.flock_len) {
3851                        ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
3852                } else {
3853                        num_fcntl_locks = 0;
3854                        num_flock_locks = 0;
3855                }
3856                if (num_fcntl_locks + num_flock_locks > 0) {
3857                        flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
3858                                               sizeof(struct ceph_filelock),
3859                                               GFP_NOFS);
3860                        if (!flocks) {
3861                                err = -ENOMEM;
3862                                goto out_err;
3863                        }
3864                        err = ceph_encode_locks_to_buffer(inode, flocks,
3865                                                          num_fcntl_locks,
3866                                                          num_flock_locks);
3867                        if (err) {
3868                                kfree(flocks);
3869                                flocks = NULL;
3870                                if (err == -ENOSPC)
3871                                        goto encode_again;
3872                                goto out_err;
3873                        }
3874                } else {
3875                        kfree(flocks);
3876                        flocks = NULL;
3877                }
3878
3879                if (recon_state->msg_version >= 3) {
3880                        /* version, compat_version and struct_len */
3881                        total_len += 2 * sizeof(u8) + sizeof(u32);
3882                        struct_v = 2;
3883                }
3884                /*
3885                 * number of encoded locks is stable, so copy to pagelist
3886                 */
3887                struct_len = 2 * sizeof(u32) +
3888                            (num_fcntl_locks + num_flock_locks) *
3889                            sizeof(struct ceph_filelock);
3890                rec.v2.flock_len = cpu_to_le32(struct_len);
3891
3892                struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
3893
3894                if (struct_v >= 2)
3895                        struct_len += sizeof(u64); /* snap_follows */
3896
3897                total_len += struct_len;
3898
3899                if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
3900                        err = send_reconnect_partial(recon_state);
3901                        if (err)
3902                                goto out_freeflocks;
3903                        pagelist = recon_state->pagelist;
3904                }
3905
3906                err = ceph_pagelist_reserve(pagelist, total_len);
3907                if (err)
3908                        goto out_freeflocks;
3909
3910                ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3911                if (recon_state->msg_version >= 3) {
3912                        ceph_pagelist_encode_8(pagelist, struct_v);
3913                        ceph_pagelist_encode_8(pagelist, 1);
3914                        ceph_pagelist_encode_32(pagelist, struct_len);
3915                }
3916                ceph_pagelist_encode_string(pagelist, path, pathlen);
3917                ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3918                ceph_locks_to_pagelist(flocks, pagelist,
3919                                       num_fcntl_locks, num_flock_locks);
3920                if (struct_v >= 2)
3921                        ceph_pagelist_encode_64(pagelist, snap_follows);
3922out_freeflocks:
3923                kfree(flocks);
3924        } else {
3925                err = ceph_pagelist_reserve(pagelist,
3926                                            sizeof(u64) + sizeof(u32) +
3927                                            pathlen + sizeof(rec.v1));
3928                if (err)
3929                        goto out_err;
3930
3931                ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3932                ceph_pagelist_encode_string(pagelist, path, pathlen);
3933                ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3934        }
3935
3936out_err:
3937        ceph_mdsc_free_path(path, pathlen);
3938        if (!err)
3939                recon_state->nr_caps++;
3940        return err;
3941}
3942
3943static int encode_snap_realms(struct ceph_mds_client *mdsc,
3944                              struct ceph_reconnect_state *recon_state)
3945{
3946        struct rb_node *p;
3947        struct ceph_pagelist *pagelist = recon_state->pagelist;
3948        int err = 0;
3949
3950        if (recon_state->msg_version >= 4) {
3951                err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
3952                if (err < 0)
3953                        goto fail;
3954        }
3955
3956        /*
3957         * snaprealms.  we provide mds with the ino, seq (version), and
3958         * parent for all of our realms.  If the mds has any newer info,
3959         * it will tell us.
3960         */
3961        for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3962                struct ceph_snap_realm *realm =
3963                       rb_entry(p, struct ceph_snap_realm, node);
3964                struct ceph_mds_snaprealm_reconnect sr_rec;
3965
3966                if (recon_state->msg_version >= 4) {
3967                        size_t need = sizeof(u8) * 2 + sizeof(u32) +
3968                                      sizeof(sr_rec);
3969
3970                        if (pagelist->length + need > RECONNECT_MAX_SIZE) {
3971                                err = send_reconnect_partial(recon_state);
3972                                if (err)
3973                                        goto fail;
3974                                pagelist = recon_state->pagelist;
3975                        }
3976
3977                        err = ceph_pagelist_reserve(pagelist, need);
3978                        if (err)
3979                                goto fail;
3980
3981                        ceph_pagelist_encode_8(pagelist, 1);
3982                        ceph_pagelist_encode_8(pagelist, 1);
3983                        ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
3984                }
3985
3986                dout(" adding snap realm %llx seq %lld parent %llx\n",
3987                     realm->ino, realm->seq, realm->parent_ino);
3988                sr_rec.ino = cpu_to_le64(realm->ino);
3989                sr_rec.seq = cpu_to_le64(realm->seq);
3990                sr_rec.parent = cpu_to_le64(realm->parent_ino);
3991
3992                err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3993                if (err)
3994                        goto fail;
3995
3996                recon_state->nr_realms++;
3997        }
3998fail:
3999        return err;
4000}
4001
4002
4003/*
4004 * If an MDS fails and recovers, clients need to reconnect in order to
4005 * reestablish shared state.  This includes all caps issued through
4006 * this session _and_ the snap_realm hierarchy.  Because it's not
4007 * clear which snap realms the mds cares about, we send everything we
4008 * know about.. that ensures we'll then get any new info the
4009 * recovering MDS might have.
4010 *
4011 * This is a relatively heavyweight operation, but it's rare.
4012 */
4013static void send_mds_reconnect(struct ceph_mds_client *mdsc,
4014                               struct ceph_mds_session *session)
4015{
4016        struct ceph_msg *reply;
4017        int mds = session->s_mds;
4018        int err = -ENOMEM;
4019        struct ceph_reconnect_state recon_state = {
4020                .session = session,
4021        };
4022        LIST_HEAD(dispose);
4023
4024        pr_info("mds%d reconnect start\n", mds);
4025
4026        recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
4027        if (!recon_state.pagelist)
4028                goto fail_nopagelist;
4029
4030        reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
4031        if (!reply)
4032                goto fail_nomsg;
4033
4034        xa_destroy(&session->s_delegated_inos);
4035
4036        mutex_lock(&session->s_mutex);
4037        session->s_state = CEPH_MDS_SESSION_RECONNECTING;
4038        session->s_seq = 0;
4039
4040        dout("session %p state %s\n", session,
4041             ceph_session_state_name(session->s_state));
4042
4043        atomic_inc(&session->s_cap_gen);
4044
4045        spin_lock(&session->s_cap_lock);
4046        /* don't know if session is readonly */
4047        session->s_readonly = 0;
4048        /*
4049         * notify __ceph_remove_cap() that we are composing cap reconnect.
4050         * If a cap get released before being added to the cap reconnect,
4051         * __ceph_remove_cap() should skip queuing cap release.
4052         */
4053        session->s_cap_reconnect = 1;
4054        /* drop old cap expires; we're about to reestablish that state */
4055        detach_cap_releases(session, &dispose);
4056        spin_unlock(&session->s_cap_lock);
4057        dispose_cap_releases(mdsc, &dispose);
4058
4059        /* trim unused caps to reduce MDS's cache rejoin time */
4060        if (mdsc->fsc->sb->s_root)
4061                shrink_dcache_parent(mdsc->fsc->sb->s_root);
4062
4063        ceph_con_close(&session->s_con);
4064        ceph_con_open(&session->s_con,
4065                      CEPH_ENTITY_TYPE_MDS, mds,
4066                      ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
4067
4068        /* replay unsafe requests */
4069        replay_unsafe_requests(mdsc, session);
4070
4071        ceph_early_kick_flushing_caps(mdsc, session);
4072
4073        down_read(&mdsc->snap_rwsem);
4074
4075        /* placeholder for nr_caps */
4076        err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
4077        if (err)
4078                goto fail;
4079
4080        if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
4081                recon_state.msg_version = 3;
4082                recon_state.allow_multi = true;
4083        } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
4084                recon_state.msg_version = 3;
4085        } else {
4086                recon_state.msg_version = 2;
4087        }
4088        /* trsaverse this session's caps */
4089        err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
4090
4091        spin_lock(&session->s_cap_lock);
4092        session->s_cap_reconnect = 0;
4093        spin_unlock(&session->s_cap_lock);
4094
4095        if (err < 0)
4096                goto fail;
4097
4098        /* check if all realms can be encoded into current message */
4099        if (mdsc->num_snap_realms) {
4100                size_t total_len =
4101                        recon_state.pagelist->length +
4102                        mdsc->num_snap_realms *
4103                        sizeof(struct ceph_mds_snaprealm_reconnect);
4104                if (recon_state.msg_version >= 4) {
4105                        /* number of realms */
4106                        total_len += sizeof(u32);
4107                        /* version, compat_version and struct_len */
4108                        total_len += mdsc->num_snap_realms *
4109                                     (2 * sizeof(u8) + sizeof(u32));
4110                }
4111                if (total_len > RECONNECT_MAX_SIZE) {
4112                        if (!recon_state.allow_multi) {
4113                                err = -ENOSPC;
4114                                goto fail;
4115                        }
4116                        if (recon_state.nr_caps) {
4117                                err = send_reconnect_partial(&recon_state);
4118                                if (err)
4119                                        goto fail;
4120                        }
4121                        recon_state.msg_version = 5;
4122                }
4123        }
4124
4125        err = encode_snap_realms(mdsc, &recon_state);
4126        if (err < 0)
4127                goto fail;
4128
4129        if (recon_state.msg_version >= 5) {
4130                err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
4131                if (err < 0)
4132                        goto fail;
4133        }
4134
4135        if (recon_state.nr_caps || recon_state.nr_realms) {
4136                struct page *page =
4137                        list_first_entry(&recon_state.pagelist->head,
4138                                        struct page, lru);
4139                __le32 *addr = kmap_atomic(page);
4140                if (recon_state.nr_caps) {
4141                        WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4142                        *addr = cpu_to_le32(recon_state.nr_caps);
4143                } else if (recon_state.msg_version >= 4) {
4144                        *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
4145                }
4146                kunmap_atomic(addr);
4147        }
4148
4149        reply->hdr.version = cpu_to_le16(recon_state.msg_version);
4150        if (recon_state.msg_version >= 4)
4151                reply->hdr.compat_version = cpu_to_le16(4);
4152
4153        reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
4154        ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
4155
4156        ceph_con_send(&session->s_con, reply);
4157
4158        mutex_unlock(&session->s_mutex);
4159
4160        mutex_lock(&mdsc->mutex);
4161        __wake_requests(mdsc, &session->s_waiting);
4162        mutex_unlock(&mdsc->mutex);
4163
4164        up_read(&mdsc->snap_rwsem);
4165        ceph_pagelist_release(recon_state.pagelist);
4166        return;
4167
4168fail:
4169        ceph_msg_put(reply);
4170        up_read(&mdsc->snap_rwsem);
4171        mutex_unlock(&session->s_mutex);
4172fail_nomsg:
4173        ceph_pagelist_release(recon_state.pagelist);
4174fail_nopagelist:
4175        pr_err("error %d preparing reconnect for mds%d\n", err, mds);
4176        return;
4177}
4178
4179
4180/*
4181 * compare old and new mdsmaps, kicking requests
4182 * and closing out old connections as necessary
4183 *
4184 * called under mdsc->mutex.
4185 */
4186static void check_new_map(struct ceph_mds_client *mdsc,
4187                          struct ceph_mdsmap *newmap,
4188                          struct ceph_mdsmap *oldmap)
4189{
4190        int i, j, err;
4191        int oldstate, newstate;
4192        struct ceph_mds_session *s;
4193        unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
4194
4195        dout("check_new_map new %u old %u\n",
4196             newmap->m_epoch, oldmap->m_epoch);
4197
4198        if (newmap->m_info) {
4199                for (i = 0; i < newmap->possible_max_rank; i++) {
4200                        for (j = 0; j < newmap->m_info[i].num_export_targets; j++)
4201                                set_bit(newmap->m_info[i].export_targets[j], targets);
4202                }
4203        }
4204
4205        for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4206                if (!mdsc->sessions[i])
4207                        continue;
4208                s = mdsc->sessions[i];
4209                oldstate = ceph_mdsmap_get_state(oldmap, i);
4210                newstate = ceph_mdsmap_get_state(newmap, i);
4211
4212                dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
4213                     i, ceph_mds_state_name(oldstate),
4214                     ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
4215                     ceph_mds_state_name(newstate),
4216                     ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
4217                     ceph_session_state_name(s->s_state));
4218
4219                if (i >= newmap->possible_max_rank) {
4220                        /* force close session for stopped mds */
4221                        ceph_get_mds_session(s);
4222                        __unregister_session(mdsc, s);
4223                        __wake_requests(mdsc, &s->s_waiting);
4224                        mutex_unlock(&mdsc->mutex);
4225
4226                        mutex_lock(&s->s_mutex);
4227                        cleanup_session_requests(mdsc, s);
4228                        remove_session_caps(s);
4229                        mutex_unlock(&s->s_mutex);
4230
4231                        ceph_put_mds_session(s);
4232
4233                        mutex_lock(&mdsc->mutex);
4234                        kick_requests(mdsc, i);
4235                        continue;
4236                }
4237
4238                if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
4239                           ceph_mdsmap_get_addr(newmap, i),
4240                           sizeof(struct ceph_entity_addr))) {
4241                        /* just close it */
4242                        mutex_unlock(&mdsc->mutex);
4243                        mutex_lock(&s->s_mutex);
4244                        mutex_lock(&mdsc->mutex);
4245                        ceph_con_close(&s->s_con);
4246                        mutex_unlock(&s->s_mutex);
4247                        s->s_state = CEPH_MDS_SESSION_RESTARTING;
4248                } else if (oldstate == newstate) {
4249                        continue;  /* nothing new with this mds */
4250                }
4251
4252                /*
4253                 * send reconnect?
4254                 */
4255                if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
4256                    newstate >= CEPH_MDS_STATE_RECONNECT) {
4257                        mutex_unlock(&mdsc->mutex);
4258                        clear_bit(i, targets);
4259                        send_mds_reconnect(mdsc, s);
4260                        mutex_lock(&mdsc->mutex);
4261                }
4262
4263                /*
4264                 * kick request on any mds that has gone active.
4265                 */
4266                if (oldstate < CEPH_MDS_STATE_ACTIVE &&
4267                    newstate >= CEPH_MDS_STATE_ACTIVE) {
4268                        if (oldstate != CEPH_MDS_STATE_CREATING &&
4269                            oldstate != CEPH_MDS_STATE_STARTING)
4270                                pr_info("mds%d recovery completed\n", s->s_mds);
4271                        kick_requests(mdsc, i);
4272                        mutex_unlock(&mdsc->mutex);
4273                        mutex_lock(&s->s_mutex);
4274                        mutex_lock(&mdsc->mutex);
4275                        ceph_kick_flushing_caps(mdsc, s);
4276                        mutex_unlock(&s->s_mutex);
4277                        wake_up_session_caps(s, RECONNECT);
4278                }
4279        }
4280
4281        /*
4282         * Only open and reconnect sessions that don't exist yet.
4283         */
4284        for (i = 0; i < newmap->possible_max_rank; i++) {
4285                /*
4286                 * In case the import MDS is crashed just after
4287                 * the EImportStart journal is flushed, so when
4288                 * a standby MDS takes over it and is replaying
4289                 * the EImportStart journal the new MDS daemon
4290                 * will wait the client to reconnect it, but the
4291                 * client may never register/open the session yet.
4292                 *
4293                 * Will try to reconnect that MDS daemon if the
4294                 * rank number is in the export targets array and
4295                 * is the up:reconnect state.
4296                 */
4297                newstate = ceph_mdsmap_get_state(newmap, i);
4298                if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
4299                        continue;
4300
4301                /*
4302                 * The session maybe registered and opened by some
4303                 * requests which were choosing random MDSes during
4304                 * the mdsc->mutex's unlock/lock gap below in rare
4305                 * case. But the related MDS daemon will just queue
4306                 * that requests and be still waiting for the client's
4307                 * reconnection request in up:reconnect state.
4308                 */
4309                s = __ceph_lookup_mds_session(mdsc, i);
4310                if (likely(!s)) {
4311                        s = __open_export_target_session(mdsc, i);
4312                        if (IS_ERR(s)) {
4313                                err = PTR_ERR(s);
4314                                pr_err("failed to open export target session, err %d\n",
4315                                       err);
4316                                continue;
4317                        }
4318                }
4319                dout("send reconnect to export target mds.%d\n", i);
4320                mutex_unlock(&mdsc->mutex);
4321                send_mds_reconnect(mdsc, s);
4322                ceph_put_mds_session(s);
4323                mutex_lock(&mdsc->mutex);
4324        }
4325
4326        for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4327                s = mdsc->sessions[i];
4328                if (!s)
4329                        continue;
4330                if (!ceph_mdsmap_is_laggy(newmap, i))
4331                        continue;
4332                if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4333                    s->s_state == CEPH_MDS_SESSION_HUNG ||
4334                    s->s_state == CEPH_MDS_SESSION_CLOSING) {
4335                        dout(" connecting to export targets of laggy mds%d\n",
4336                             i);
4337                        __open_export_target_sessions(mdsc, s);
4338                }
4339        }
4340}
4341
4342
4343
4344/*
4345 * leases
4346 */
4347
4348/*
4349 * caller must hold session s_mutex, dentry->d_lock
4350 */
4351void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
4352{
4353        struct ceph_dentry_info *di = ceph_dentry(dentry);
4354
4355        ceph_put_mds_session(di->lease_session);
4356        di->lease_session = NULL;
4357}
4358
4359static void handle_lease(struct ceph_mds_client *mdsc,
4360                         struct ceph_mds_session *session,
4361                         struct ceph_msg *msg)
4362{
4363        struct super_block *sb = mdsc->fsc->sb;
4364        struct inode *inode;
4365        struct dentry *parent, *dentry;
4366        struct ceph_dentry_info *di;
4367        int mds = session->s_mds;
4368        struct ceph_mds_lease *h = msg->front.iov_base;
4369        u32 seq;
4370        struct ceph_vino vino;
4371        struct qstr dname;
4372        int release = 0;
4373
4374        dout("handle_lease from mds%d\n", mds);
4375
4376        /* decode */
4377        if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
4378                goto bad;
4379        vino.ino = le64_to_cpu(h->ino);
4380        vino.snap = CEPH_NOSNAP;
4381        seq = le32_to_cpu(h->seq);
4382        dname.len = get_unaligned_le32(h + 1);
4383        if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
4384                goto bad;
4385        dname.name = (void *)(h + 1) + sizeof(u32);
4386
4387        /* lookup inode */
4388        inode = ceph_find_inode(sb, vino);
4389        dout("handle_lease %s, ino %llx %p %.*s\n",
4390             ceph_lease_op_name(h->action), vino.ino, inode,
4391             dname.len, dname.name);
4392
4393        mutex_lock(&session->s_mutex);
4394        inc_session_sequence(session);
4395
4396        if (!inode) {
4397                dout("handle_lease no inode %llx\n", vino.ino);
4398                goto release;
4399        }
4400
4401        /* dentry */
4402        parent = d_find_alias(inode);
4403        if (!parent) {
4404                dout("no parent dentry on inode %p\n", inode);
4405                WARN_ON(1);
4406                goto release;  /* hrm... */
4407        }
4408        dname.hash = full_name_hash(parent, dname.name, dname.len);
4409        dentry = d_lookup(parent, &dname);
4410        dput(parent);
4411        if (!dentry)
4412                goto release;
4413
4414        spin_lock(&dentry->d_lock);
4415        di = ceph_dentry(dentry);
4416        switch (h->action) {
4417        case CEPH_MDS_LEASE_REVOKE:
4418                if (di->lease_session == session) {
4419                        if (ceph_seq_cmp(di->lease_seq, seq) > 0)
4420                                h->seq = cpu_to_le32(di->lease_seq);
4421                        __ceph_mdsc_drop_dentry_lease(dentry);
4422                }
4423                release = 1;
4424                break;
4425
4426        case CEPH_MDS_LEASE_RENEW:
4427                if (di->lease_session == session &&
4428                    di->lease_gen == atomic_read(&session->s_cap_gen) &&
4429                    di->lease_renew_from &&
4430                    di->lease_renew_after == 0) {
4431                        unsigned long duration =
4432                                msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4433
4434                        di->lease_seq = seq;
4435                        di->time = di->lease_renew_from + duration;
4436                        di->lease_renew_after = di->lease_renew_from +
4437                                (duration >> 1);
4438                        di->lease_renew_from = 0;
4439                }
4440                break;
4441        }
4442        spin_unlock(&dentry->d_lock);
4443        dput(dentry);
4444
4445        if (!release)
4446                goto out;
4447
4448release:
4449        /* let's just reuse the same message */
4450        h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4451        ceph_msg_get(msg);
4452        ceph_con_send(&session->s_con, msg);
4453
4454out:
4455        mutex_unlock(&session->s_mutex);
4456        iput(inode);
4457        return;
4458
4459bad:
4460        pr_err("corrupt lease message\n");
4461        ceph_msg_dump(msg);
4462}
4463
4464void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4465                              struct dentry *dentry, char action,
4466                              u32 seq)
4467{
4468        struct ceph_msg *msg;
4469        struct ceph_mds_lease *lease;
4470        struct inode *dir;
4471        int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4472
4473        dout("lease_send_msg identry %p %s to mds%d\n",
4474             dentry, ceph_lease_op_name(action), session->s_mds);
4475
4476        msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4477        if (!msg)
4478                return;
4479        lease = msg->front.iov_base;
4480        lease->action = action;
4481        lease->seq = cpu_to_le32(seq);
4482
4483        spin_lock(&dentry->d_lock);
4484        dir = d_inode(dentry->d_parent);
4485        lease->ino = cpu_to_le64(ceph_ino(dir));
4486        lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4487
4488        put_unaligned_le32(dentry->d_name.len, lease + 1);
4489        memcpy((void *)(lease + 1) + 4,
4490               dentry->d_name.name, dentry->d_name.len);
4491        spin_unlock(&dentry->d_lock);
4492        /*
4493         * if this is a preemptive lease RELEASE, no need to
4494         * flush request stream, since the actual request will
4495         * soon follow.
4496         */
4497        msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
4498
4499        ceph_con_send(&session->s_con, msg);
4500}
4501
4502/*
4503 * lock unlock the session, to wait ongoing session activities
4504 */
4505static void lock_unlock_session(struct ceph_mds_session *s)
4506{
4507        mutex_lock(&s->s_mutex);
4508        mutex_unlock(&s->s_mutex);
4509}
4510
4511static void maybe_recover_session(struct ceph_mds_client *mdsc)
4512{
4513        struct ceph_fs_client *fsc = mdsc->fsc;
4514
4515        if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4516                return;
4517
4518        if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4519                return;
4520
4521        if (!READ_ONCE(fsc->blocklisted))
4522                return;
4523
4524        pr_info("auto reconnect after blocklisted\n");
4525        ceph_force_reconnect(fsc->sb);
4526}
4527
4528bool check_session_state(struct ceph_mds_session *s)
4529{
4530        struct ceph_fs_client *fsc = s->s_mdsc->fsc;
4531
4532        switch (s->s_state) {
4533        case CEPH_MDS_SESSION_OPEN:
4534                if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4535                        s->s_state = CEPH_MDS_SESSION_HUNG;
4536                        pr_info("mds%d hung\n", s->s_mds);
4537                }
4538                break;
4539        case CEPH_MDS_SESSION_CLOSING:
4540                /* Should never reach this when not force unmounting */
4541                WARN_ON_ONCE(s->s_ttl &&
4542                             READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
4543                fallthrough;
4544        case CEPH_MDS_SESSION_NEW:
4545        case CEPH_MDS_SESSION_RESTARTING:
4546        case CEPH_MDS_SESSION_CLOSED:
4547        case CEPH_MDS_SESSION_REJECTED:
4548                return false;
4549        }
4550
4551        return true;
4552}
4553
4554/*
4555 * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
4556 * then we need to retransmit that request.
4557 */
4558void inc_session_sequence(struct ceph_mds_session *s)
4559{
4560        lockdep_assert_held(&s->s_mutex);
4561
4562        s->s_seq++;
4563
4564        if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4565                int ret;
4566
4567                dout("resending session close request for mds%d\n", s->s_mds);
4568                ret = request_close_session(s);
4569                if (ret < 0)
4570                        pr_err("unable to close session to mds%d: %d\n",
4571                               s->s_mds, ret);
4572        }
4573}
4574
4575/*
4576 * delayed work -- periodically trim expired leases, renew caps with mds.  If
4577 * the @delay parameter is set to 0 or if it's more than 5 secs, the default
4578 * workqueue delay value of 5 secs will be used.
4579 */
4580static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
4581{
4582        unsigned long max_delay = HZ * 5;
4583
4584        /* 5 secs default delay */
4585        if (!delay || (delay > max_delay))
4586                delay = max_delay;
4587        schedule_delayed_work(&mdsc->delayed_work,
4588                              round_jiffies_relative(delay));
4589}
4590
4591static void delayed_work(struct work_struct *work)
4592{
4593        struct ceph_mds_client *mdsc =
4594                container_of(work, struct ceph_mds_client, delayed_work.work);
4595        unsigned long delay;
4596        int renew_interval;
4597        int renew_caps;
4598        int i;
4599
4600        dout("mdsc delayed_work\n");
4601
4602        if (mdsc->stopping)
4603                return;
4604
4605        mutex_lock(&mdsc->mutex);
4606        renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4607        renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4608                                   mdsc->last_renew_caps);
4609        if (renew_caps)
4610                mdsc->last_renew_caps = jiffies;
4611
4612        for (i = 0; i < mdsc->max_sessions; i++) {
4613                struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4614                if (!s)
4615                        continue;
4616
4617                if (!check_session_state(s)) {
4618                        ceph_put_mds_session(s);
4619                        continue;
4620                }
4621                mutex_unlock(&mdsc->mutex);
4622
4623                mutex_lock(&s->s_mutex);
4624                if (renew_caps)
4625                        send_renew_caps(mdsc, s);
4626                else
4627                        ceph_con_keepalive(&s->s_con);
4628                if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4629                    s->s_state == CEPH_MDS_SESSION_HUNG)
4630                        ceph_send_cap_releases(mdsc, s);
4631                mutex_unlock(&s->s_mutex);
4632                ceph_put_mds_session(s);
4633
4634                mutex_lock(&mdsc->mutex);
4635        }
4636        mutex_unlock(&mdsc->mutex);
4637
4638        delay = ceph_check_delayed_caps(mdsc);
4639
4640        ceph_queue_cap_reclaim_work(mdsc);
4641
4642        ceph_trim_snapid_map(mdsc);
4643
4644        maybe_recover_session(mdsc);
4645
4646        schedule_delayed(mdsc, delay);
4647}
4648
4649int ceph_mdsc_init(struct ceph_fs_client *fsc)
4650
4651{
4652        struct ceph_mds_client *mdsc;
4653        int err;
4654
4655        mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4656        if (!mdsc)
4657                return -ENOMEM;
4658        mdsc->fsc = fsc;
4659        mutex_init(&mdsc->mutex);
4660        mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4661        if (!mdsc->mdsmap) {
4662                err = -ENOMEM;
4663                goto err_mdsc;
4664        }
4665
4666        init_completion(&mdsc->safe_umount_waiters);
4667        init_waitqueue_head(&mdsc->session_close_wq);
4668        INIT_LIST_HEAD(&mdsc->waiting_for_map);
4669        mdsc->quotarealms_inodes = RB_ROOT;
4670        mutex_init(&mdsc->quotarealms_inodes_mutex);
4671        init_rwsem(&mdsc->snap_rwsem);
4672        mdsc->snap_realms = RB_ROOT;
4673        INIT_LIST_HEAD(&mdsc->snap_empty);
4674        spin_lock_init(&mdsc->snap_empty_lock);
4675        mdsc->request_tree = RB_ROOT;
4676        INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4677        mdsc->last_renew_caps = jiffies;
4678        INIT_LIST_HEAD(&mdsc->cap_delay_list);
4679        INIT_LIST_HEAD(&mdsc->cap_wait_list);
4680        spin_lock_init(&mdsc->cap_delay_lock);
4681        INIT_LIST_HEAD(&mdsc->snap_flush_list);
4682        spin_lock_init(&mdsc->snap_flush_lock);
4683        mdsc->last_cap_flush_tid = 1;
4684        INIT_LIST_HEAD(&mdsc->cap_flush_list);
4685        INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4686        spin_lock_init(&mdsc->cap_dirty_lock);
4687        init_waitqueue_head(&mdsc->cap_flushing_wq);
4688        INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4689        err = ceph_metric_init(&mdsc->metric);
4690        if (err)
4691                goto err_mdsmap;
4692
4693        spin_lock_init(&mdsc->dentry_list_lock);
4694        INIT_LIST_HEAD(&mdsc->dentry_leases);
4695        INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4696
4697        ceph_caps_init(mdsc);
4698        ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4699
4700        spin_lock_init(&mdsc->snapid_map_lock);
4701        mdsc->snapid_map_tree = RB_ROOT;
4702        INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4703
4704        init_rwsem(&mdsc->pool_perm_rwsem);
4705        mdsc->pool_perm_tree = RB_ROOT;
4706
4707        strscpy(mdsc->nodename, utsname()->nodename,
4708                sizeof(mdsc->nodename));
4709
4710        fsc->mdsc = mdsc;
4711        return 0;
4712
4713err_mdsmap:
4714        kfree(mdsc->mdsmap);
4715err_mdsc:
4716        kfree(mdsc);
4717        return err;
4718}
4719
4720/*
4721 * Wait for safe replies on open mds requests.  If we time out, drop
4722 * all requests from the tree to avoid dangling dentry refs.
4723 */
4724static void wait_requests(struct ceph_mds_client *mdsc)
4725{
4726        struct ceph_options *opts = mdsc->fsc->client->options;
4727        struct ceph_mds_request *req;
4728
4729        mutex_lock(&mdsc->mutex);
4730        if (__get_oldest_req(mdsc)) {
4731                mutex_unlock(&mdsc->mutex);
4732
4733                dout("wait_requests waiting for requests\n");
4734                wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4735                                    ceph_timeout_jiffies(opts->mount_timeout));
4736
4737                /* tear down remaining requests */
4738                mutex_lock(&mdsc->mutex);
4739                while ((req = __get_oldest_req(mdsc))) {
4740                        dout("wait_requests timed out on tid %llu\n",
4741                             req->r_tid);
4742                        list_del_init(&req->r_wait);
4743                        __unregister_request(mdsc, req);
4744                }
4745        }
4746        mutex_unlock(&mdsc->mutex);
4747        dout("wait_requests done\n");
4748}
4749
4750void send_flush_mdlog(struct ceph_mds_session *s)
4751{
4752        struct ceph_msg *msg;
4753
4754        /*
4755         * Pre-luminous MDS crashes when it sees an unknown session request
4756         */
4757        if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
4758                return;
4759
4760        mutex_lock(&s->s_mutex);
4761        dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
4762             ceph_session_state_name(s->s_state), s->s_seq);
4763        msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
4764                                      s->s_seq);
4765        if (!msg) {
4766                pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
4767                       s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
4768        } else {
4769                ceph_con_send(&s->s_con, msg);
4770        }
4771        mutex_unlock(&s->s_mutex);
4772}
4773
4774/*
4775 * called before mount is ro, and before dentries are torn down.
4776 * (hmm, does this still race with new lookups?)
4777 */
4778void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4779{
4780        dout("pre_umount\n");
4781        mdsc->stopping = 1;
4782
4783        ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
4784        ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
4785        ceph_flush_dirty_caps(mdsc);
4786        wait_requests(mdsc);
4787
4788        /*
4789         * wait for reply handlers to drop their request refs and
4790         * their inode/dcache refs
4791         */
4792        ceph_msgr_flush();
4793
4794        ceph_cleanup_quotarealms_inodes(mdsc);
4795}
4796
4797/*
4798 * wait for all write mds requests to flush.
4799 */
4800static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
4801{
4802        struct ceph_mds_request *req = NULL, *nextreq;
4803        struct rb_node *n;
4804
4805        mutex_lock(&mdsc->mutex);
4806        dout("wait_unsafe_requests want %lld\n", want_tid);
4807restart:
4808        req = __get_oldest_req(mdsc);
4809        while (req && req->r_tid <= want_tid) {
4810                /* find next request */
4811                n = rb_next(&req->r_node);
4812                if (n)
4813                        nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4814                else
4815                        nextreq = NULL;
4816                if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4817                    (req->r_op & CEPH_MDS_OP_WRITE)) {
4818                        /* write op */
4819                        ceph_mdsc_get_request(req);
4820                        if (nextreq)
4821                                ceph_mdsc_get_request(nextreq);
4822                        mutex_unlock(&mdsc->mutex);
4823                        dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
4824                             req->r_tid, want_tid);
4825                        wait_for_completion(&req->r_safe_completion);
4826                        mutex_lock(&mdsc->mutex);
4827                        ceph_mdsc_put_request(req);
4828                        if (!nextreq)
4829                                break;  /* next dne before, so we're done! */
4830                        if (RB_EMPTY_NODE(&nextreq->r_node)) {
4831                                /* next request was removed from tree */
4832                                ceph_mdsc_put_request(nextreq);
4833                                goto restart;
4834                        }
4835                        ceph_mdsc_put_request(nextreq);  /* won't go away */
4836                }
4837                req = nextreq;
4838        }
4839        mutex_unlock(&mdsc->mutex);
4840        dout("wait_unsafe_requests done\n");
4841}
4842
4843void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
4844{
4845        u64 want_tid, want_flush;
4846
4847        if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
4848                return;
4849
4850        dout("sync\n");
4851        mutex_lock(&mdsc->mutex);
4852        want_tid = mdsc->last_tid;
4853        mutex_unlock(&mdsc->mutex);
4854
4855        ceph_flush_dirty_caps(mdsc);
4856        spin_lock(&mdsc->cap_dirty_lock);
4857        want_flush = mdsc->last_cap_flush_tid;
4858        if (!list_empty(&mdsc->cap_flush_list)) {
4859                struct ceph_cap_flush *cf =
4860                        list_last_entry(&mdsc->cap_flush_list,
4861                                        struct ceph_cap_flush, g_list);
4862                cf->wake = true;
4863        }
4864        spin_unlock(&mdsc->cap_dirty_lock);
4865
4866        dout("sync want tid %lld flush_seq %lld\n",
4867             want_tid, want_flush);
4868
4869        wait_unsafe_requests(mdsc, want_tid);
4870        wait_caps_flush(mdsc, want_flush);
4871}
4872
4873/*
4874 * true if all sessions are closed, or we force unmount
4875 */
4876static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
4877{
4878        if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4879                return true;
4880        return atomic_read(&mdsc->num_sessions) <= skipped;
4881}
4882
4883/*
4884 * called after sb is ro.
4885 */
4886void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
4887{
4888        struct ceph_options *opts = mdsc->fsc->client->options;
4889        struct ceph_mds_session *session;
4890        int i;
4891        int skipped = 0;
4892
4893        dout("close_sessions\n");
4894
4895        /* close sessions */
4896        mutex_lock(&mdsc->mutex);
4897        for (i = 0; i < mdsc->max_sessions; i++) {
4898                session = __ceph_lookup_mds_session(mdsc, i);
4899                if (!session)
4900                        continue;
4901                mutex_unlock(&mdsc->mutex);
4902                mutex_lock(&session->s_mutex);
4903                if (__close_session(mdsc, session) <= 0)
4904                        skipped++;
4905                mutex_unlock(&session->s_mutex);
4906                ceph_put_mds_session(session);
4907                mutex_lock(&mdsc->mutex);
4908        }
4909        mutex_unlock(&mdsc->mutex);
4910
4911        dout("waiting for sessions to close\n");
4912        wait_event_timeout(mdsc->session_close_wq,
4913                           done_closing_sessions(mdsc, skipped),
4914                           ceph_timeout_jiffies(opts->mount_timeout));
4915
4916        /* tear down remaining sessions */
4917        mutex_lock(&mdsc->mutex);
4918        for (i = 0; i < mdsc->max_sessions; i++) {
4919                if (mdsc->sessions[i]) {
4920                        session = ceph_get_mds_session(mdsc->sessions[i]);
4921                        __unregister_session(mdsc, session);
4922                        mutex_unlock(&mdsc->mutex);
4923                        mutex_lock(&session->s_mutex);
4924                        remove_session_caps(session);
4925                        mutex_unlock(&session->s_mutex);
4926                        ceph_put_mds_session(session);
4927                        mutex_lock(&mdsc->mutex);
4928                }
4929        }
4930        WARN_ON(!list_empty(&mdsc->cap_delay_list));
4931        mutex_unlock(&mdsc->mutex);
4932
4933        ceph_cleanup_snapid_map(mdsc);
4934        ceph_cleanup_empty_realms(mdsc);
4935
4936        cancel_work_sync(&mdsc->cap_reclaim_work);
4937        cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4938
4939        dout("stopped\n");
4940}
4941
4942void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
4943{
4944        struct ceph_mds_session *session;
4945        int mds;
4946
4947        dout("force umount\n");
4948
4949        mutex_lock(&mdsc->mutex);
4950        for (mds = 0; mds < mdsc->max_sessions; mds++) {
4951                session = __ceph_lookup_mds_session(mdsc, mds);
4952                if (!session)
4953                        continue;
4954
4955                if (session->s_state == CEPH_MDS_SESSION_REJECTED)
4956                        __unregister_session(mdsc, session);
4957                __wake_requests(mdsc, &session->s_waiting);
4958                mutex_unlock(&mdsc->mutex);
4959
4960                mutex_lock(&session->s_mutex);
4961                __close_session(mdsc, session);
4962                if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
4963                        cleanup_session_requests(mdsc, session);
4964                        remove_session_caps(session);
4965                }
4966                mutex_unlock(&session->s_mutex);
4967                ceph_put_mds_session(session);
4968
4969                mutex_lock(&mdsc->mutex);
4970                kick_requests(mdsc, mds);
4971        }
4972        __wake_requests(mdsc, &mdsc->waiting_for_map);
4973        mutex_unlock(&mdsc->mutex);
4974}
4975
4976static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
4977{
4978        dout("stop\n");
4979        /*
4980         * Make sure the delayed work stopped before releasing
4981         * the resources.
4982         *
4983         * Because the cancel_delayed_work_sync() will only
4984         * guarantee that the work finishes executing. But the
4985         * delayed work will re-arm itself again after that.
4986         */
4987        flush_delayed_work(&mdsc->delayed_work);
4988
4989        if (mdsc->mdsmap)
4990                ceph_mdsmap_destroy(mdsc->mdsmap);
4991        kfree(mdsc->sessions);
4992        ceph_caps_finalize(mdsc);
4993        ceph_pool_perm_destroy(mdsc);
4994}
4995
4996void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
4997{
4998        struct ceph_mds_client *mdsc = fsc->mdsc;
4999        dout("mdsc_destroy %p\n", mdsc);
5000
5001        if (!mdsc)
5002                return;
5003
5004        /* flush out any connection work with references to us */
5005        ceph_msgr_flush();
5006
5007        ceph_mdsc_stop(mdsc);
5008
5009        ceph_metric_destroy(&mdsc->metric);
5010
5011        fsc->mdsc = NULL;
5012        kfree(mdsc);
5013        dout("mdsc_destroy %p done\n", mdsc);
5014}
5015
5016void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5017{
5018        struct ceph_fs_client *fsc = mdsc->fsc;
5019        const char *mds_namespace = fsc->mount_options->mds_namespace;
5020        void *p = msg->front.iov_base;
5021        void *end = p + msg->front.iov_len;
5022        u32 epoch;
5023        u32 num_fs;
5024        u32 mount_fscid = (u32)-1;
5025        int err = -EINVAL;
5026
5027        ceph_decode_need(&p, end, sizeof(u32), bad);
5028        epoch = ceph_decode_32(&p);
5029
5030        dout("handle_fsmap epoch %u\n", epoch);
5031
5032        /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
5033        ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
5034
5035        ceph_decode_32_safe(&p, end, num_fs, bad);
5036        while (num_fs-- > 0) {
5037                void *info_p, *info_end;
5038                u32 info_len;
5039                u32 fscid, namelen;
5040
5041                ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
5042                p += 2;         // info_v, info_cv
5043                info_len = ceph_decode_32(&p);
5044                ceph_decode_need(&p, end, info_len, bad);
5045                info_p = p;
5046                info_end = p + info_len;
5047                p = info_end;
5048
5049                ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
5050                fscid = ceph_decode_32(&info_p);
5051                namelen = ceph_decode_32(&info_p);
5052                ceph_decode_need(&info_p, info_end, namelen, bad);
5053
5054                if (mds_namespace &&
5055                    strlen(mds_namespace) == namelen &&
5056                    !strncmp(mds_namespace, (char *)info_p, namelen)) {
5057                        mount_fscid = fscid;
5058                        break;
5059                }
5060        }
5061
5062        ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
5063        if (mount_fscid != (u32)-1) {
5064                fsc->client->monc.fs_cluster_id = mount_fscid;
5065                ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
5066                                   0, true);
5067                ceph_monc_renew_subs(&fsc->client->monc);
5068        } else {
5069                err = -ENOENT;
5070                goto err_out;
5071        }
5072        return;
5073
5074bad:
5075        pr_err("error decoding fsmap\n");
5076err_out:
5077        mutex_lock(&mdsc->mutex);
5078        mdsc->mdsmap_err = err;
5079        __wake_requests(mdsc, &mdsc->waiting_for_map);
5080        mutex_unlock(&mdsc->mutex);
5081}
5082
5083/*
5084 * handle mds map update.
5085 */
5086void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5087{
5088        u32 epoch;
5089        u32 maplen;
5090        void *p = msg->front.iov_base;
5091        void *end = p + msg->front.iov_len;
5092        struct ceph_mdsmap *newmap, *oldmap;
5093        struct ceph_fsid fsid;
5094        int err = -EINVAL;
5095
5096        ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
5097        ceph_decode_copy(&p, &fsid, sizeof(fsid));
5098        if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
5099                return;
5100        epoch = ceph_decode_32(&p);
5101        maplen = ceph_decode_32(&p);
5102        dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
5103
5104        /* do we need it? */
5105        mutex_lock(&mdsc->mutex);
5106        if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
5107                dout("handle_map epoch %u <= our %u\n",
5108                     epoch, mdsc->mdsmap->m_epoch);
5109                mutex_unlock(&mdsc->mutex);
5110                return;
5111        }
5112
5113        newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
5114        if (IS_ERR(newmap)) {
5115                err = PTR_ERR(newmap);
5116                goto bad_unlock;
5117        }
5118
5119        /* swap into place */
5120        if (mdsc->mdsmap) {
5121                oldmap = mdsc->mdsmap;
5122                mdsc->mdsmap = newmap;
5123                check_new_map(mdsc, newmap, oldmap);
5124                ceph_mdsmap_destroy(oldmap);
5125        } else {
5126                mdsc->mdsmap = newmap;  /* first mds map */
5127        }
5128        mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
5129                                        MAX_LFS_FILESIZE);
5130
5131        __wake_requests(mdsc, &mdsc->waiting_for_map);
5132        ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
5133                          mdsc->mdsmap->m_epoch);
5134
5135        mutex_unlock(&mdsc->mutex);
5136        schedule_delayed(mdsc, 0);
5137        return;
5138
5139bad_unlock:
5140        mutex_unlock(&mdsc->mutex);
5141bad:
5142        pr_err("error decoding mdsmap %d\n", err);
5143        return;
5144}
5145
5146static struct ceph_connection *mds_get_con(struct ceph_connection *con)
5147{
5148        struct ceph_mds_session *s = con->private;
5149
5150        if (ceph_get_mds_session(s))
5151                return con;
5152        return NULL;
5153}
5154
5155static void mds_put_con(struct ceph_connection *con)
5156{
5157        struct ceph_mds_session *s = con->private;
5158
5159        ceph_put_mds_session(s);
5160}
5161
5162/*
5163 * if the client is unresponsive for long enough, the mds will kill
5164 * the session entirely.
5165 */
5166static void mds_peer_reset(struct ceph_connection *con)
5167{
5168        struct ceph_mds_session *s = con->private;
5169        struct ceph_mds_client *mdsc = s->s_mdsc;
5170
5171        pr_warn("mds%d closed our session\n", s->s_mds);
5172        send_mds_reconnect(mdsc, s);
5173}
5174
5175static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5176{
5177        struct ceph_mds_session *s = con->private;
5178        struct ceph_mds_client *mdsc = s->s_mdsc;
5179        int type = le16_to_cpu(msg->hdr.type);
5180
5181        mutex_lock(&mdsc->mutex);
5182        if (__verify_registered_session(mdsc, s) < 0) {
5183                mutex_unlock(&mdsc->mutex);
5184                goto out;
5185        }
5186        mutex_unlock(&mdsc->mutex);
5187
5188        switch (type) {
5189        case CEPH_MSG_MDS_MAP:
5190                ceph_mdsc_handle_mdsmap(mdsc, msg);
5191                break;
5192        case CEPH_MSG_FS_MAP_USER:
5193                ceph_mdsc_handle_fsmap(mdsc, msg);
5194                break;
5195        case CEPH_MSG_CLIENT_SESSION:
5196                handle_session(s, msg);
5197                break;
5198        case CEPH_MSG_CLIENT_REPLY:
5199                handle_reply(s, msg);
5200                break;
5201        case CEPH_MSG_CLIENT_REQUEST_FORWARD:
5202                handle_forward(mdsc, s, msg);
5203                break;
5204        case CEPH_MSG_CLIENT_CAPS:
5205                ceph_handle_caps(s, msg);
5206                break;
5207        case CEPH_MSG_CLIENT_SNAP:
5208                ceph_handle_snap(mdsc, s, msg);
5209                break;
5210        case CEPH_MSG_CLIENT_LEASE:
5211                handle_lease(mdsc, s, msg);
5212                break;
5213        case CEPH_MSG_CLIENT_QUOTA:
5214                ceph_handle_quota(mdsc, s, msg);
5215                break;
5216
5217        default:
5218                pr_err("received unknown message type %d %s\n", type,
5219                       ceph_msg_type_name(type));
5220        }
5221out:
5222        ceph_msg_put(msg);
5223}
5224
5225/*
5226 * authentication
5227 */
5228
5229/*
5230 * Note: returned pointer is the address of a structure that's
5231 * managed separately.  Caller must *not* attempt to free it.
5232 */
5233static struct ceph_auth_handshake *
5234mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
5235{
5236        struct ceph_mds_session *s = con->private;
5237        struct ceph_mds_client *mdsc = s->s_mdsc;
5238        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5239        struct ceph_auth_handshake *auth = &s->s_auth;
5240        int ret;
5241
5242        ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5243                                         force_new, proto, NULL, NULL);
5244        if (ret)
5245                return ERR_PTR(ret);
5246
5247        return auth;
5248}
5249
5250static int mds_add_authorizer_challenge(struct ceph_connection *con,
5251                                    void *challenge_buf, int challenge_buf_len)
5252{
5253        struct ceph_mds_session *s = con->private;
5254        struct ceph_mds_client *mdsc = s->s_mdsc;
5255        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5256
5257        return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
5258                                            challenge_buf, challenge_buf_len);
5259}
5260
5261static int mds_verify_authorizer_reply(struct ceph_connection *con)
5262{
5263        struct ceph_mds_session *s = con->private;
5264        struct ceph_mds_client *mdsc = s->s_mdsc;
5265        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5266        struct ceph_auth_handshake *auth = &s->s_auth;
5267
5268        return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
5269                auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
5270                NULL, NULL, NULL, NULL);
5271}
5272
5273static int mds_invalidate_authorizer(struct ceph_connection *con)
5274{
5275        struct ceph_mds_session *s = con->private;
5276        struct ceph_mds_client *mdsc = s->s_mdsc;
5277        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5278
5279        ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
5280
5281        return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
5282}
5283
5284static int mds_get_auth_request(struct ceph_connection *con,
5285                                void *buf, int *buf_len,
5286                                void **authorizer, int *authorizer_len)
5287{
5288        struct ceph_mds_session *s = con->private;
5289        struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5290        struct ceph_auth_handshake *auth = &s->s_auth;
5291        int ret;
5292
5293        ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5294                                       buf, buf_len);
5295        if (ret)
5296                return ret;
5297
5298        *authorizer = auth->authorizer_buf;
5299        *authorizer_len = auth->authorizer_buf_len;
5300        return 0;
5301}
5302
5303static int mds_handle_auth_reply_more(struct ceph_connection *con,
5304                                      void *reply, int reply_len,
5305                                      void *buf, int *buf_len,
5306                                      void **authorizer, int *authorizer_len)
5307{
5308        struct ceph_mds_session *s = con->private;
5309        struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5310        struct ceph_auth_handshake *auth = &s->s_auth;
5311        int ret;
5312
5313        ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
5314                                              buf, buf_len);
5315        if (ret)
5316                return ret;
5317
5318        *authorizer = auth->authorizer_buf;
5319        *authorizer_len = auth->authorizer_buf_len;
5320        return 0;
5321}
5322
5323static int mds_handle_auth_done(struct ceph_connection *con,
5324                                u64 global_id, void *reply, int reply_len,
5325                                u8 *session_key, int *session_key_len,
5326                                u8 *con_secret, int *con_secret_len)
5327{
5328        struct ceph_mds_session *s = con->private;
5329        struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5330        struct ceph_auth_handshake *auth = &s->s_auth;
5331
5332        return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
5333                                               session_key, session_key_len,
5334                                               con_secret, con_secret_len);
5335}
5336
5337static int mds_handle_auth_bad_method(struct ceph_connection *con,
5338                                      int used_proto, int result,
5339                                      const int *allowed_protos, int proto_cnt,
5340                                      const int *allowed_modes, int mode_cnt)
5341{
5342        struct ceph_mds_session *s = con->private;
5343        struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc;
5344        int ret;
5345
5346        if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS,
5347                                            used_proto, result,
5348                                            allowed_protos, proto_cnt,
5349                                            allowed_modes, mode_cnt)) {
5350                ret = ceph_monc_validate_auth(monc);
5351                if (ret)
5352                        return ret;
5353        }
5354
5355        return -EACCES;
5356}
5357
5358static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
5359                                struct ceph_msg_header *hdr, int *skip)
5360{
5361        struct ceph_msg *msg;
5362        int type = (int) le16_to_cpu(hdr->type);
5363        int front_len = (int) le32_to_cpu(hdr->front_len);
5364
5365        if (con->in_msg)
5366                return con->in_msg;
5367
5368        *skip = 0;
5369        msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
5370        if (!msg) {
5371                pr_err("unable to allocate msg type %d len %d\n",
5372                       type, front_len);
5373                return NULL;
5374        }
5375
5376        return msg;
5377}
5378
5379static int mds_sign_message(struct ceph_msg *msg)
5380{
5381       struct ceph_mds_session *s = msg->con->private;
5382       struct ceph_auth_handshake *auth = &s->s_auth;
5383
5384       return ceph_auth_sign_message(auth, msg);
5385}
5386
5387static int mds_check_message_signature(struct ceph_msg *msg)
5388{
5389       struct ceph_mds_session *s = msg->con->private;
5390       struct ceph_auth_handshake *auth = &s->s_auth;
5391
5392       return ceph_auth_check_message_signature(auth, msg);
5393}
5394
5395static const struct ceph_connection_operations mds_con_ops = {
5396        .get = mds_get_con,
5397        .put = mds_put_con,
5398        .alloc_msg = mds_alloc_msg,
5399        .dispatch = mds_dispatch,
5400        .peer_reset = mds_peer_reset,
5401        .get_authorizer = mds_get_authorizer,
5402        .add_authorizer_challenge = mds_add_authorizer_challenge,
5403        .verify_authorizer_reply = mds_verify_authorizer_reply,
5404        .invalidate_authorizer = mds_invalidate_authorizer,
5405        .sign_message = mds_sign_message,
5406        .check_message_signature = mds_check_message_signature,
5407        .get_auth_request = mds_get_auth_request,
5408        .handle_auth_reply_more = mds_handle_auth_reply_more,
5409        .handle_auth_done = mds_handle_auth_done,
5410        .handle_auth_bad_method = mds_handle_auth_bad_method,
5411};
5412
5413/* eof */
5414