linux/net/ceph/osd_client.c
<<
>>
Prefs
   1
   2#include <linux/ceph/ceph_debug.h>
   3
   4#include <linux/module.h>
   5#include <linux/err.h>
   6#include <linux/highmem.h>
   7#include <linux/mm.h>
   8#include <linux/pagemap.h>
   9#include <linux/slab.h>
  10#include <linux/uaccess.h>
  11#ifdef CONFIG_BLOCK
  12#include <linux/bio.h>
  13#endif
  14
  15#include <linux/ceph/ceph_features.h>
  16#include <linux/ceph/libceph.h>
  17#include <linux/ceph/osd_client.h>
  18#include <linux/ceph/messenger.h>
  19#include <linux/ceph/decode.h>
  20#include <linux/ceph/auth.h>
  21#include <linux/ceph/pagelist.h>
  22
  23#define OSD_OPREPLY_FRONT_LEN   512
  24
  25static struct kmem_cache        *ceph_osd_request_cache;
  26
  27static const struct ceph_connection_operations osd_con_ops;
  28
  29/*
  30 * Implement client access to distributed object storage cluster.
  31 *
  32 * All data objects are stored within a cluster/cloud of OSDs, or
  33 * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
  34 * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
  35 * remote daemons serving up and coordinating consistent and safe
  36 * access to storage.
  37 *
  38 * Cluster membership and the mapping of data objects onto storage devices
  39 * are described by the osd map.
  40 *
  41 * We keep track of pending OSD requests (read, write), resubmit
  42 * requests to different OSDs when the cluster topology/data layout
  43 * change, or retry the affected requests when the communications
  44 * channel with an OSD is reset.
  45 */
  46
  47static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
  48static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
  49static void link_linger(struct ceph_osd *osd,
  50                        struct ceph_osd_linger_request *lreq);
  51static void unlink_linger(struct ceph_osd *osd,
  52                          struct ceph_osd_linger_request *lreq);
  53static void clear_backoffs(struct ceph_osd *osd);
  54
  55#if 1
  56static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
  57{
  58        bool wrlocked = true;
  59
  60        if (unlikely(down_read_trylock(sem))) {
  61                wrlocked = false;
  62                up_read(sem);
  63        }
  64
  65        return wrlocked;
  66}
  67static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
  68{
  69        WARN_ON(!rwsem_is_locked(&osdc->lock));
  70}
  71static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
  72{
  73        WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
  74}
  75static inline void verify_osd_locked(struct ceph_osd *osd)
  76{
  77        struct ceph_osd_client *osdc = osd->o_osdc;
  78
  79        WARN_ON(!(mutex_is_locked(&osd->lock) &&
  80                  rwsem_is_locked(&osdc->lock)) &&
  81                !rwsem_is_wrlocked(&osdc->lock));
  82}
  83static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
  84{
  85        WARN_ON(!mutex_is_locked(&lreq->lock));
  86}
  87#else
  88static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
  89static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
  90static inline void verify_osd_locked(struct ceph_osd *osd) { }
  91static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
  92#endif
  93
  94/*
  95 * calculate the mapping of a file extent onto an object, and fill out the
  96 * request accordingly.  shorten extent as necessary if it crosses an
  97 * object boundary.
  98 *
  99 * fill osd op in request message.
 100 */
 101static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
 102                        u64 *objnum, u64 *objoff, u64 *objlen)
 103{
 104        u64 orig_len = *plen;
 105        int r;
 106
 107        /* object extent? */
 108        r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
 109                                          objoff, objlen);
 110        if (r < 0)
 111                return r;
 112        if (*objlen < orig_len) {
 113                *plen = *objlen;
 114                dout(" skipping last %llu, final file extent %llu~%llu\n",
 115                     orig_len - *plen, off, *plen);
 116        }
 117
 118        dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
 119
 120        return 0;
 121}
 122
 123static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
 124{
 125        memset(osd_data, 0, sizeof (*osd_data));
 126        osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
 127}
 128
 129static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
 130                        struct page **pages, u64 length, u32 alignment,
 131                        bool pages_from_pool, bool own_pages)
 132{
 133        osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
 134        osd_data->pages = pages;
 135        osd_data->length = length;
 136        osd_data->alignment = alignment;
 137        osd_data->pages_from_pool = pages_from_pool;
 138        osd_data->own_pages = own_pages;
 139}
 140
 141static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
 142                        struct ceph_pagelist *pagelist)
 143{
 144        osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
 145        osd_data->pagelist = pagelist;
 146}
 147
 148#ifdef CONFIG_BLOCK
 149static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
 150                        struct bio *bio, size_t bio_length)
 151{
 152        osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
 153        osd_data->bio = bio;
 154        osd_data->bio_length = bio_length;
 155}
 156#endif /* CONFIG_BLOCK */
 157
 158#define osd_req_op_data(oreq, whch, typ, fld)                           \
 159({                                                                      \
 160        struct ceph_osd_request *__oreq = (oreq);                       \
 161        unsigned int __whch = (whch);                                   \
 162        BUG_ON(__whch >= __oreq->r_num_ops);                            \
 163        &__oreq->r_ops[__whch].typ.fld;                                 \
 164})
 165
 166static struct ceph_osd_data *
 167osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
 168{
 169        BUG_ON(which >= osd_req->r_num_ops);
 170
 171        return &osd_req->r_ops[which].raw_data_in;
 172}
 173
 174struct ceph_osd_data *
 175osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
 176                        unsigned int which)
 177{
 178        return osd_req_op_data(osd_req, which, extent, osd_data);
 179}
 180EXPORT_SYMBOL(osd_req_op_extent_osd_data);
 181
 182void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
 183                        unsigned int which, struct page **pages,
 184                        u64 length, u32 alignment,
 185                        bool pages_from_pool, bool own_pages)
 186{
 187        struct ceph_osd_data *osd_data;
 188
 189        osd_data = osd_req_op_raw_data_in(osd_req, which);
 190        ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 191                                pages_from_pool, own_pages);
 192}
 193EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
 194
 195void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
 196                        unsigned int which, struct page **pages,
 197                        u64 length, u32 alignment,
 198                        bool pages_from_pool, bool own_pages)
 199{
 200        struct ceph_osd_data *osd_data;
 201
 202        osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
 203        ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 204                                pages_from_pool, own_pages);
 205}
 206EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
 207
 208void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
 209                        unsigned int which, struct ceph_pagelist *pagelist)
 210{
 211        struct ceph_osd_data *osd_data;
 212
 213        osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
 214        ceph_osd_data_pagelist_init(osd_data, pagelist);
 215}
 216EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
 217
 218#ifdef CONFIG_BLOCK
 219void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
 220                        unsigned int which, struct bio *bio, size_t bio_length)
 221{
 222        struct ceph_osd_data *osd_data;
 223
 224        osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
 225        ceph_osd_data_bio_init(osd_data, bio, bio_length);
 226}
 227EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
 228#endif /* CONFIG_BLOCK */
 229
 230static void osd_req_op_cls_request_info_pagelist(
 231                        struct ceph_osd_request *osd_req,
 232                        unsigned int which, struct ceph_pagelist *pagelist)
 233{
 234        struct ceph_osd_data *osd_data;
 235
 236        osd_data = osd_req_op_data(osd_req, which, cls, request_info);
 237        ceph_osd_data_pagelist_init(osd_data, pagelist);
 238}
 239
 240void osd_req_op_cls_request_data_pagelist(
 241                        struct ceph_osd_request *osd_req,
 242                        unsigned int which, struct ceph_pagelist *pagelist)
 243{
 244        struct ceph_osd_data *osd_data;
 245
 246        osd_data = osd_req_op_data(osd_req, which, cls, request_data);
 247        ceph_osd_data_pagelist_init(osd_data, pagelist);
 248        osd_req->r_ops[which].cls.indata_len += pagelist->length;
 249        osd_req->r_ops[which].indata_len += pagelist->length;
 250}
 251EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
 252
 253void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
 254                        unsigned int which, struct page **pages, u64 length,
 255                        u32 alignment, bool pages_from_pool, bool own_pages)
 256{
 257        struct ceph_osd_data *osd_data;
 258
 259        osd_data = osd_req_op_data(osd_req, which, cls, request_data);
 260        ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 261                                pages_from_pool, own_pages);
 262        osd_req->r_ops[which].cls.indata_len += length;
 263        osd_req->r_ops[which].indata_len += length;
 264}
 265EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
 266
 267void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
 268                        unsigned int which, struct page **pages, u64 length,
 269                        u32 alignment, bool pages_from_pool, bool own_pages)
 270{
 271        struct ceph_osd_data *osd_data;
 272
 273        osd_data = osd_req_op_data(osd_req, which, cls, response_data);
 274        ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 275                                pages_from_pool, own_pages);
 276}
 277EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
 278
 279static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
 280{
 281        switch (osd_data->type) {
 282        case CEPH_OSD_DATA_TYPE_NONE:
 283                return 0;
 284        case CEPH_OSD_DATA_TYPE_PAGES:
 285                return osd_data->length;
 286        case CEPH_OSD_DATA_TYPE_PAGELIST:
 287                return (u64)osd_data->pagelist->length;
 288#ifdef CONFIG_BLOCK
 289        case CEPH_OSD_DATA_TYPE_BIO:
 290                return (u64)osd_data->bio_length;
 291#endif /* CONFIG_BLOCK */
 292        default:
 293                WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
 294                return 0;
 295        }
 296}
 297
 298static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
 299{
 300        if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
 301                int num_pages;
 302
 303                num_pages = calc_pages_for((u64)osd_data->alignment,
 304                                                (u64)osd_data->length);
 305                ceph_release_page_vector(osd_data->pages, num_pages);
 306        }
 307        ceph_osd_data_init(osd_data);
 308}
 309
 310static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
 311                        unsigned int which)
 312{
 313        struct ceph_osd_req_op *op;
 314
 315        BUG_ON(which >= osd_req->r_num_ops);
 316        op = &osd_req->r_ops[which];
 317
 318        switch (op->op) {
 319        case CEPH_OSD_OP_READ:
 320        case CEPH_OSD_OP_WRITE:
 321        case CEPH_OSD_OP_WRITEFULL:
 322                ceph_osd_data_release(&op->extent.osd_data);
 323                break;
 324        case CEPH_OSD_OP_CALL:
 325                ceph_osd_data_release(&op->cls.request_info);
 326                ceph_osd_data_release(&op->cls.request_data);
 327                ceph_osd_data_release(&op->cls.response_data);
 328                break;
 329        case CEPH_OSD_OP_SETXATTR:
 330        case CEPH_OSD_OP_CMPXATTR:
 331                ceph_osd_data_release(&op->xattr.osd_data);
 332                break;
 333        case CEPH_OSD_OP_STAT:
 334                ceph_osd_data_release(&op->raw_data_in);
 335                break;
 336        case CEPH_OSD_OP_NOTIFY_ACK:
 337                ceph_osd_data_release(&op->notify_ack.request_data);
 338                break;
 339        case CEPH_OSD_OP_NOTIFY:
 340                ceph_osd_data_release(&op->notify.request_data);
 341                ceph_osd_data_release(&op->notify.response_data);
 342                break;
 343        case CEPH_OSD_OP_LIST_WATCHERS:
 344                ceph_osd_data_release(&op->list_watchers.response_data);
 345                break;
 346        default:
 347                break;
 348        }
 349}
 350
 351/*
 352 * Assumes @t is zero-initialized.
 353 */
 354static void target_init(struct ceph_osd_request_target *t)
 355{
 356        ceph_oid_init(&t->base_oid);
 357        ceph_oloc_init(&t->base_oloc);
 358        ceph_oid_init(&t->target_oid);
 359        ceph_oloc_init(&t->target_oloc);
 360
 361        ceph_osds_init(&t->acting);
 362        ceph_osds_init(&t->up);
 363        t->size = -1;
 364        t->min_size = -1;
 365
 366        t->osd = CEPH_HOMELESS_OSD;
 367}
 368
 369static void target_copy(struct ceph_osd_request_target *dest,
 370                        const struct ceph_osd_request_target *src)
 371{
 372        ceph_oid_copy(&dest->base_oid, &src->base_oid);
 373        ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
 374        ceph_oid_copy(&dest->target_oid, &src->target_oid);
 375        ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
 376
 377        dest->pgid = src->pgid; /* struct */
 378        dest->spgid = src->spgid; /* struct */
 379        dest->pg_num = src->pg_num;
 380        dest->pg_num_mask = src->pg_num_mask;
 381        ceph_osds_copy(&dest->acting, &src->acting);
 382        ceph_osds_copy(&dest->up, &src->up);
 383        dest->size = src->size;
 384        dest->min_size = src->min_size;
 385        dest->sort_bitwise = src->sort_bitwise;
 386
 387        dest->flags = src->flags;
 388        dest->paused = src->paused;
 389
 390        dest->epoch = src->epoch;
 391        dest->last_force_resend = src->last_force_resend;
 392
 393        dest->osd = src->osd;
 394}
 395
 396static void target_destroy(struct ceph_osd_request_target *t)
 397{
 398        ceph_oid_destroy(&t->base_oid);
 399        ceph_oloc_destroy(&t->base_oloc);
 400        ceph_oid_destroy(&t->target_oid);
 401        ceph_oloc_destroy(&t->target_oloc);
 402}
 403
 404/*
 405 * requests
 406 */
 407static void request_release_checks(struct ceph_osd_request *req)
 408{
 409        WARN_ON(!RB_EMPTY_NODE(&req->r_node));
 410        WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
 411        WARN_ON(!list_empty(&req->r_unsafe_item));
 412        WARN_ON(req->r_osd);
 413}
 414
 415static void ceph_osdc_release_request(struct kref *kref)
 416{
 417        struct ceph_osd_request *req = container_of(kref,
 418                                            struct ceph_osd_request, r_kref);
 419        unsigned int which;
 420
 421        dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
 422             req->r_request, req->r_reply);
 423        request_release_checks(req);
 424
 425        if (req->r_request)
 426                ceph_msg_put(req->r_request);
 427        if (req->r_reply)
 428                ceph_msg_put(req->r_reply);
 429
 430        for (which = 0; which < req->r_num_ops; which++)
 431                osd_req_op_data_release(req, which);
 432
 433        target_destroy(&req->r_t);
 434        ceph_put_snap_context(req->r_snapc);
 435
 436        if (req->r_mempool)
 437                mempool_free(req, req->r_osdc->req_mempool);
 438        else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
 439                kmem_cache_free(ceph_osd_request_cache, req);
 440        else
 441                kfree(req);
 442}
 443
 444void ceph_osdc_get_request(struct ceph_osd_request *req)
 445{
 446        dout("%s %p (was %d)\n", __func__, req,
 447             kref_read(&req->r_kref));
 448        kref_get(&req->r_kref);
 449}
 450EXPORT_SYMBOL(ceph_osdc_get_request);
 451
 452void ceph_osdc_put_request(struct ceph_osd_request *req)
 453{
 454        if (req) {
 455                dout("%s %p (was %d)\n", __func__, req,
 456                     kref_read(&req->r_kref));
 457                kref_put(&req->r_kref, ceph_osdc_release_request);
 458        }
 459}
 460EXPORT_SYMBOL(ceph_osdc_put_request);
 461
 462static void request_init(struct ceph_osd_request *req)
 463{
 464        /* req only, each op is zeroed in _osd_req_op_init() */
 465        memset(req, 0, sizeof(*req));
 466
 467        kref_init(&req->r_kref);
 468        init_completion(&req->r_completion);
 469        RB_CLEAR_NODE(&req->r_node);
 470        RB_CLEAR_NODE(&req->r_mc_node);
 471        INIT_LIST_HEAD(&req->r_unsafe_item);
 472
 473        target_init(&req->r_t);
 474}
 475
 476/*
 477 * This is ugly, but it allows us to reuse linger registration and ping
 478 * requests, keeping the structure of the code around send_linger{_ping}()
 479 * reasonable.  Setting up a min_nr=2 mempool for each linger request
 480 * and dealing with copying ops (this blasts req only, watch op remains
 481 * intact) isn't any better.
 482 */
 483static void request_reinit(struct ceph_osd_request *req)
 484{
 485        struct ceph_osd_client *osdc = req->r_osdc;
 486        bool mempool = req->r_mempool;
 487        unsigned int num_ops = req->r_num_ops;
 488        u64 snapid = req->r_snapid;
 489        struct ceph_snap_context *snapc = req->r_snapc;
 490        bool linger = req->r_linger;
 491        struct ceph_msg *request_msg = req->r_request;
 492        struct ceph_msg *reply_msg = req->r_reply;
 493
 494        dout("%s req %p\n", __func__, req);
 495        WARN_ON(kref_read(&req->r_kref) != 1);
 496        request_release_checks(req);
 497
 498        WARN_ON(kref_read(&request_msg->kref) != 1);
 499        WARN_ON(kref_read(&reply_msg->kref) != 1);
 500        target_destroy(&req->r_t);
 501
 502        request_init(req);
 503        req->r_osdc = osdc;
 504        req->r_mempool = mempool;
 505        req->r_num_ops = num_ops;
 506        req->r_snapid = snapid;
 507        req->r_snapc = snapc;
 508        req->r_linger = linger;
 509        req->r_request = request_msg;
 510        req->r_reply = reply_msg;
 511}
 512
 513struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
 514                                               struct ceph_snap_context *snapc,
 515                                               unsigned int num_ops,
 516                                               bool use_mempool,
 517                                               gfp_t gfp_flags)
 518{
 519        struct ceph_osd_request *req;
 520
 521        if (use_mempool) {
 522                BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
 523                req = mempool_alloc(osdc->req_mempool, gfp_flags);
 524        } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
 525                req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
 526        } else {
 527                BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
 528                req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
 529                              gfp_flags);
 530        }
 531        if (unlikely(!req))
 532                return NULL;
 533
 534        request_init(req);
 535        req->r_osdc = osdc;
 536        req->r_mempool = use_mempool;
 537        req->r_num_ops = num_ops;
 538        req->r_snapid = CEPH_NOSNAP;
 539        req->r_snapc = ceph_get_snap_context(snapc);
 540
 541        dout("%s req %p\n", __func__, req);
 542        return req;
 543}
 544EXPORT_SYMBOL(ceph_osdc_alloc_request);
 545
 546static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
 547{
 548        return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
 549}
 550
 551int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
 552{
 553        struct ceph_osd_client *osdc = req->r_osdc;
 554        struct ceph_msg *msg;
 555        int msg_size;
 556
 557        WARN_ON(ceph_oid_empty(&req->r_base_oid));
 558        WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
 559
 560        /* create request message */
 561        msg_size = CEPH_ENCODING_START_BLK_LEN +
 562                        CEPH_PGID_ENCODING_LEN + 1; /* spgid */
 563        msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
 564        msg_size += CEPH_ENCODING_START_BLK_LEN +
 565                        sizeof(struct ceph_osd_reqid); /* reqid */
 566        msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
 567        msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
 568        msg_size += CEPH_ENCODING_START_BLK_LEN +
 569                        ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
 570        msg_size += 4 + req->r_base_oid.name_len; /* oid */
 571        msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
 572        msg_size += 8; /* snapid */
 573        msg_size += 8; /* snap_seq */
 574        msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
 575        msg_size += 4 + 8; /* retry_attempt, features */
 576
 577        if (req->r_mempool)
 578                msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
 579        else
 580                msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
 581        if (!msg)
 582                return -ENOMEM;
 583
 584        memset(msg->front.iov_base, 0, msg->front.iov_len);
 585        req->r_request = msg;
 586
 587        /* create reply message */
 588        msg_size = OSD_OPREPLY_FRONT_LEN;
 589        msg_size += req->r_base_oid.name_len;
 590        msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
 591
 592        if (req->r_mempool)
 593                msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
 594        else
 595                msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
 596        if (!msg)
 597                return -ENOMEM;
 598
 599        req->r_reply = msg;
 600
 601        return 0;
 602}
 603EXPORT_SYMBOL(ceph_osdc_alloc_messages);
 604
 605static bool osd_req_opcode_valid(u16 opcode)
 606{
 607        switch (opcode) {
 608#define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
 609__CEPH_FORALL_OSD_OPS(GENERATE_CASE)
 610#undef GENERATE_CASE
 611        default:
 612                return false;
 613        }
 614}
 615
 616/*
 617 * This is an osd op init function for opcodes that have no data or
 618 * other information associated with them.  It also serves as a
 619 * common init routine for all the other init functions, below.
 620 */
 621static struct ceph_osd_req_op *
 622_osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
 623                 u16 opcode, u32 flags)
 624{
 625        struct ceph_osd_req_op *op;
 626
 627        BUG_ON(which >= osd_req->r_num_ops);
 628        BUG_ON(!osd_req_opcode_valid(opcode));
 629
 630        op = &osd_req->r_ops[which];
 631        memset(op, 0, sizeof (*op));
 632        op->op = opcode;
 633        op->flags = flags;
 634
 635        return op;
 636}
 637
 638void osd_req_op_init(struct ceph_osd_request *osd_req,
 639                     unsigned int which, u16 opcode, u32 flags)
 640{
 641        (void)_osd_req_op_init(osd_req, which, opcode, flags);
 642}
 643EXPORT_SYMBOL(osd_req_op_init);
 644
 645void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
 646                                unsigned int which, u16 opcode,
 647                                u64 offset, u64 length,
 648                                u64 truncate_size, u32 truncate_seq)
 649{
 650        struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
 651                                                      opcode, 0);
 652        size_t payload_len = 0;
 653
 654        BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
 655               opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
 656               opcode != CEPH_OSD_OP_TRUNCATE);
 657
 658        op->extent.offset = offset;
 659        op->extent.length = length;
 660        op->extent.truncate_size = truncate_size;
 661        op->extent.truncate_seq = truncate_seq;
 662        if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
 663                payload_len += length;
 664
 665        op->indata_len = payload_len;
 666}
 667EXPORT_SYMBOL(osd_req_op_extent_init);
 668
 669void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
 670                                unsigned int which, u64 length)
 671{
 672        struct ceph_osd_req_op *op;
 673        u64 previous;
 674
 675        BUG_ON(which >= osd_req->r_num_ops);
 676        op = &osd_req->r_ops[which];
 677        previous = op->extent.length;
 678
 679        if (length == previous)
 680                return;         /* Nothing to do */
 681        BUG_ON(length > previous);
 682
 683        op->extent.length = length;
 684        if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
 685                op->indata_len -= previous - length;
 686}
 687EXPORT_SYMBOL(osd_req_op_extent_update);
 688
 689void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
 690                                unsigned int which, u64 offset_inc)
 691{
 692        struct ceph_osd_req_op *op, *prev_op;
 693
 694        BUG_ON(which + 1 >= osd_req->r_num_ops);
 695
 696        prev_op = &osd_req->r_ops[which];
 697        op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
 698        /* dup previous one */
 699        op->indata_len = prev_op->indata_len;
 700        op->outdata_len = prev_op->outdata_len;
 701        op->extent = prev_op->extent;
 702        /* adjust offset */
 703        op->extent.offset += offset_inc;
 704        op->extent.length -= offset_inc;
 705
 706        if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
 707                op->indata_len -= offset_inc;
 708}
 709EXPORT_SYMBOL(osd_req_op_extent_dup_last);
 710
 711void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
 712                        u16 opcode, const char *class, const char *method)
 713{
 714        struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
 715                                                      opcode, 0);
 716        struct ceph_pagelist *pagelist;
 717        size_t payload_len = 0;
 718        size_t size;
 719
 720        BUG_ON(opcode != CEPH_OSD_OP_CALL);
 721
 722        pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
 723        BUG_ON(!pagelist);
 724        ceph_pagelist_init(pagelist);
 725
 726        op->cls.class_name = class;
 727        size = strlen(class);
 728        BUG_ON(size > (size_t) U8_MAX);
 729        op->cls.class_len = size;
 730        ceph_pagelist_append(pagelist, class, size);
 731        payload_len += size;
 732
 733        op->cls.method_name = method;
 734        size = strlen(method);
 735        BUG_ON(size > (size_t) U8_MAX);
 736        op->cls.method_len = size;
 737        ceph_pagelist_append(pagelist, method, size);
 738        payload_len += size;
 739
 740        osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
 741
 742        op->indata_len = payload_len;
 743}
 744EXPORT_SYMBOL(osd_req_op_cls_init);
 745
 746int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
 747                          u16 opcode, const char *name, const void *value,
 748                          size_t size, u8 cmp_op, u8 cmp_mode)
 749{
 750        struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
 751                                                      opcode, 0);
 752        struct ceph_pagelist *pagelist;
 753        size_t payload_len;
 754
 755        BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
 756
 757        pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
 758        if (!pagelist)
 759                return -ENOMEM;
 760
 761        ceph_pagelist_init(pagelist);
 762
 763        payload_len = strlen(name);
 764        op->xattr.name_len = payload_len;
 765        ceph_pagelist_append(pagelist, name, payload_len);
 766
 767        op->xattr.value_len = size;
 768        ceph_pagelist_append(pagelist, value, size);
 769        payload_len += size;
 770
 771        op->xattr.cmp_op = cmp_op;
 772        op->xattr.cmp_mode = cmp_mode;
 773
 774        ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
 775        op->indata_len = payload_len;
 776        return 0;
 777}
 778EXPORT_SYMBOL(osd_req_op_xattr_init);
 779
 780/*
 781 * @watch_opcode: CEPH_OSD_WATCH_OP_*
 782 */
 783static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
 784                                  u64 cookie, u8 watch_opcode)
 785{
 786        struct ceph_osd_req_op *op;
 787
 788        op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
 789        op->watch.cookie = cookie;
 790        op->watch.op = watch_opcode;
 791        op->watch.gen = 0;
 792}
 793
 794void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
 795                                unsigned int which,
 796                                u64 expected_object_size,
 797                                u64 expected_write_size)
 798{
 799        struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
 800                                                      CEPH_OSD_OP_SETALLOCHINT,
 801                                                      0);
 802
 803        op->alloc_hint.expected_object_size = expected_object_size;
 804        op->alloc_hint.expected_write_size = expected_write_size;
 805
 806        /*
 807         * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
 808         * not worth a feature bit.  Set FAILOK per-op flag to make
 809         * sure older osds don't trip over an unsupported opcode.
 810         */
 811        op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
 812}
 813EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
 814
 815static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
 816                                struct ceph_osd_data *osd_data)
 817{
 818        u64 length = ceph_osd_data_length(osd_data);
 819
 820        if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
 821                BUG_ON(length > (u64) SIZE_MAX);
 822                if (length)
 823                        ceph_msg_data_add_pages(msg, osd_data->pages,
 824                                        length, osd_data->alignment);
 825        } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
 826                BUG_ON(!length);
 827                ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
 828#ifdef CONFIG_BLOCK
 829        } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
 830                ceph_msg_data_add_bio(msg, osd_data->bio, length);
 831#endif
 832        } else {
 833                BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
 834        }
 835}
 836
 837static u32 osd_req_encode_op(struct ceph_osd_op *dst,
 838                             const struct ceph_osd_req_op *src)
 839{
 840        if (WARN_ON(!osd_req_opcode_valid(src->op))) {
 841                pr_err("unrecognized osd opcode %d\n", src->op);
 842
 843                return 0;
 844        }
 845
 846        switch (src->op) {
 847        case CEPH_OSD_OP_STAT:
 848                break;
 849        case CEPH_OSD_OP_READ:
 850        case CEPH_OSD_OP_WRITE:
 851        case CEPH_OSD_OP_WRITEFULL:
 852        case CEPH_OSD_OP_ZERO:
 853        case CEPH_OSD_OP_TRUNCATE:
 854                dst->extent.offset = cpu_to_le64(src->extent.offset);
 855                dst->extent.length = cpu_to_le64(src->extent.length);
 856                dst->extent.truncate_size =
 857                        cpu_to_le64(src->extent.truncate_size);
 858                dst->extent.truncate_seq =
 859                        cpu_to_le32(src->extent.truncate_seq);
 860                break;
 861        case CEPH_OSD_OP_CALL:
 862                dst->cls.class_len = src->cls.class_len;
 863                dst->cls.method_len = src->cls.method_len;
 864                dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
 865                break;
 866        case CEPH_OSD_OP_STARTSYNC:
 867                break;
 868        case CEPH_OSD_OP_WATCH:
 869                dst->watch.cookie = cpu_to_le64(src->watch.cookie);
 870                dst->watch.ver = cpu_to_le64(0);
 871                dst->watch.op = src->watch.op;
 872                dst->watch.gen = cpu_to_le32(src->watch.gen);
 873                break;
 874        case CEPH_OSD_OP_NOTIFY_ACK:
 875                break;
 876        case CEPH_OSD_OP_NOTIFY:
 877                dst->notify.cookie = cpu_to_le64(src->notify.cookie);
 878                break;
 879        case CEPH_OSD_OP_LIST_WATCHERS:
 880                break;
 881        case CEPH_OSD_OP_SETALLOCHINT:
 882                dst->alloc_hint.expected_object_size =
 883                    cpu_to_le64(src->alloc_hint.expected_object_size);
 884                dst->alloc_hint.expected_write_size =
 885                    cpu_to_le64(src->alloc_hint.expected_write_size);
 886                break;
 887        case CEPH_OSD_OP_SETXATTR:
 888        case CEPH_OSD_OP_CMPXATTR:
 889                dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
 890                dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
 891                dst->xattr.cmp_op = src->xattr.cmp_op;
 892                dst->xattr.cmp_mode = src->xattr.cmp_mode;
 893                break;
 894        case CEPH_OSD_OP_CREATE:
 895        case CEPH_OSD_OP_DELETE:
 896                break;
 897        default:
 898                pr_err("unsupported osd opcode %s\n",
 899                        ceph_osd_op_name(src->op));
 900                WARN_ON(1);
 901
 902                return 0;
 903        }
 904
 905        dst->op = cpu_to_le16(src->op);
 906        dst->flags = cpu_to_le32(src->flags);
 907        dst->payload_len = cpu_to_le32(src->indata_len);
 908
 909        return src->indata_len;
 910}
 911
 912/*
 913 * build new request AND message, calculate layout, and adjust file
 914 * extent as needed.
 915 *
 916 * if the file was recently truncated, we include information about its
 917 * old and new size so that the object can be updated appropriately.  (we
 918 * avoid synchronously deleting truncated objects because it's slow.)
 919 *
 920 * if @do_sync, include a 'startsync' command so that the osd will flush
 921 * data quickly.
 922 */
 923struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
 924                                               struct ceph_file_layout *layout,
 925                                               struct ceph_vino vino,
 926                                               u64 off, u64 *plen,
 927                                               unsigned int which, int num_ops,
 928                                               int opcode, int flags,
 929                                               struct ceph_snap_context *snapc,
 930                                               u32 truncate_seq,
 931                                               u64 truncate_size,
 932                                               bool use_mempool)
 933{
 934        struct ceph_osd_request *req;
 935        u64 objnum = 0;
 936        u64 objoff = 0;
 937        u64 objlen = 0;
 938        int r;
 939
 940        BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
 941               opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
 942               opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
 943
 944        req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
 945                                        GFP_NOFS);
 946        if (!req) {
 947                r = -ENOMEM;
 948                goto fail;
 949        }
 950
 951        /* calculate max write size */
 952        r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
 953        if (r)
 954                goto fail;
 955
 956        if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
 957                osd_req_op_init(req, which, opcode, 0);
 958        } else {
 959                u32 object_size = layout->object_size;
 960                u32 object_base = off - objoff;
 961                if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
 962                        if (truncate_size <= object_base) {
 963                                truncate_size = 0;
 964                        } else {
 965                                truncate_size -= object_base;
 966                                if (truncate_size > object_size)
 967                                        truncate_size = object_size;
 968                        }
 969                }
 970                osd_req_op_extent_init(req, which, opcode, objoff, objlen,
 971                                       truncate_size, truncate_seq);
 972        }
 973
 974        req->r_abort_on_full = true;
 975        req->r_flags = flags;
 976        req->r_base_oloc.pool = layout->pool_id;
 977        req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
 978        ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
 979
 980        req->r_snapid = vino.snap;
 981        if (flags & CEPH_OSD_FLAG_WRITE)
 982                req->r_data_offset = off;
 983
 984        r = ceph_osdc_alloc_messages(req, GFP_NOFS);
 985        if (r)
 986                goto fail;
 987
 988        return req;
 989
 990fail:
 991        ceph_osdc_put_request(req);
 992        return ERR_PTR(r);
 993}
 994EXPORT_SYMBOL(ceph_osdc_new_request);
 995
 996/*
 997 * We keep osd requests in an rbtree, sorted by ->r_tid.
 998 */
 999DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1000DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1001
1002static bool osd_homeless(struct ceph_osd *osd)
1003{
1004        return osd->o_osd == CEPH_HOMELESS_OSD;
1005}
1006
1007static bool osd_registered(struct ceph_osd *osd)
1008{
1009        verify_osdc_locked(osd->o_osdc);
1010
1011        return !RB_EMPTY_NODE(&osd->o_node);
1012}
1013
1014/*
1015 * Assumes @osd is zero-initialized.
1016 */
1017static void osd_init(struct ceph_osd *osd)
1018{
1019        refcount_set(&osd->o_ref, 1);
1020        RB_CLEAR_NODE(&osd->o_node);
1021        osd->o_requests = RB_ROOT;
1022        osd->o_linger_requests = RB_ROOT;
1023        osd->o_backoff_mappings = RB_ROOT;
1024        osd->o_backoffs_by_id = RB_ROOT;
1025        INIT_LIST_HEAD(&osd->o_osd_lru);
1026        INIT_LIST_HEAD(&osd->o_keepalive_item);
1027        osd->o_incarnation = 1;
1028        mutex_init(&osd->lock);
1029}
1030
1031static void osd_cleanup(struct ceph_osd *osd)
1032{
1033        WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1034        WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1035        WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1036        WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1037        WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1038        WARN_ON(!list_empty(&osd->o_osd_lru));
1039        WARN_ON(!list_empty(&osd->o_keepalive_item));
1040
1041        if (osd->o_auth.authorizer) {
1042                WARN_ON(osd_homeless(osd));
1043                ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1044        }
1045}
1046
1047/*
1048 * Track open sessions with osds.
1049 */
1050static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1051{
1052        struct ceph_osd *osd;
1053
1054        WARN_ON(onum == CEPH_HOMELESS_OSD);
1055
1056        osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1057        osd_init(osd);
1058        osd->o_osdc = osdc;
1059        osd->o_osd = onum;
1060
1061        ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1062
1063        return osd;
1064}
1065
1066static struct ceph_osd *get_osd(struct ceph_osd *osd)
1067{
1068        if (refcount_inc_not_zero(&osd->o_ref)) {
1069                dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1070                     refcount_read(&osd->o_ref));
1071                return osd;
1072        } else {
1073                dout("get_osd %p FAIL\n", osd);
1074                return NULL;
1075        }
1076}
1077
1078static void put_osd(struct ceph_osd *osd)
1079{
1080        dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1081             refcount_read(&osd->o_ref) - 1);
1082        if (refcount_dec_and_test(&osd->o_ref)) {
1083                osd_cleanup(osd);
1084                kfree(osd);
1085        }
1086}
1087
1088DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1089
1090static void __move_osd_to_lru(struct ceph_osd *osd)
1091{
1092        struct ceph_osd_client *osdc = osd->o_osdc;
1093
1094        dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1095        BUG_ON(!list_empty(&osd->o_osd_lru));
1096
1097        spin_lock(&osdc->osd_lru_lock);
1098        list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1099        spin_unlock(&osdc->osd_lru_lock);
1100
1101        osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1102}
1103
1104static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1105{
1106        if (RB_EMPTY_ROOT(&osd->o_requests) &&
1107            RB_EMPTY_ROOT(&osd->o_linger_requests))
1108                __move_osd_to_lru(osd);
1109}
1110
1111static void __remove_osd_from_lru(struct ceph_osd *osd)
1112{
1113        struct ceph_osd_client *osdc = osd->o_osdc;
1114
1115        dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1116
1117        spin_lock(&osdc->osd_lru_lock);
1118        if (!list_empty(&osd->o_osd_lru))
1119                list_del_init(&osd->o_osd_lru);
1120        spin_unlock(&osdc->osd_lru_lock);
1121}
1122
1123/*
1124 * Close the connection and assign any leftover requests to the
1125 * homeless session.
1126 */
1127static void close_osd(struct ceph_osd *osd)
1128{
1129        struct ceph_osd_client *osdc = osd->o_osdc;
1130        struct rb_node *n;
1131
1132        verify_osdc_wrlocked(osdc);
1133        dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1134
1135        ceph_con_close(&osd->o_con);
1136
1137        for (n = rb_first(&osd->o_requests); n; ) {
1138                struct ceph_osd_request *req =
1139                    rb_entry(n, struct ceph_osd_request, r_node);
1140
1141                n = rb_next(n); /* unlink_request() */
1142
1143                dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1144                unlink_request(osd, req);
1145                link_request(&osdc->homeless_osd, req);
1146        }
1147        for (n = rb_first(&osd->o_linger_requests); n; ) {
1148                struct ceph_osd_linger_request *lreq =
1149                    rb_entry(n, struct ceph_osd_linger_request, node);
1150
1151                n = rb_next(n); /* unlink_linger() */
1152
1153                dout(" reassigning lreq %p linger_id %llu\n", lreq,
1154                     lreq->linger_id);
1155                unlink_linger(osd, lreq);
1156                link_linger(&osdc->homeless_osd, lreq);
1157        }
1158        clear_backoffs(osd);
1159
1160        __remove_osd_from_lru(osd);
1161        erase_osd(&osdc->osds, osd);
1162        put_osd(osd);
1163}
1164
1165/*
1166 * reset osd connect
1167 */
1168static int reopen_osd(struct ceph_osd *osd)
1169{
1170        struct ceph_entity_addr *peer_addr;
1171
1172        dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1173
1174        if (RB_EMPTY_ROOT(&osd->o_requests) &&
1175            RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1176                close_osd(osd);
1177                return -ENODEV;
1178        }
1179
1180        peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1181        if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1182                        !ceph_con_opened(&osd->o_con)) {
1183                struct rb_node *n;
1184
1185                dout("osd addr hasn't changed and connection never opened, "
1186                     "letting msgr retry\n");
1187                /* touch each r_stamp for handle_timeout()'s benfit */
1188                for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1189                        struct ceph_osd_request *req =
1190                            rb_entry(n, struct ceph_osd_request, r_node);
1191                        req->r_stamp = jiffies;
1192                }
1193
1194                return -EAGAIN;
1195        }
1196
1197        ceph_con_close(&osd->o_con);
1198        ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1199        osd->o_incarnation++;
1200
1201        return 0;
1202}
1203
1204static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1205                                          bool wrlocked)
1206{
1207        struct ceph_osd *osd;
1208
1209        if (wrlocked)
1210                verify_osdc_wrlocked(osdc);
1211        else
1212                verify_osdc_locked(osdc);
1213
1214        if (o != CEPH_HOMELESS_OSD)
1215                osd = lookup_osd(&osdc->osds, o);
1216        else
1217                osd = &osdc->homeless_osd;
1218        if (!osd) {
1219                if (!wrlocked)
1220                        return ERR_PTR(-EAGAIN);
1221
1222                osd = create_osd(osdc, o);
1223                insert_osd(&osdc->osds, osd);
1224                ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1225                              &osdc->osdmap->osd_addr[osd->o_osd]);
1226        }
1227
1228        dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1229        return osd;
1230}
1231
1232/*
1233 * Create request <-> OSD session relation.
1234 *
1235 * @req has to be assigned a tid, @osd may be homeless.
1236 */
1237static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1238{
1239        verify_osd_locked(osd);
1240        WARN_ON(!req->r_tid || req->r_osd);
1241        dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1242             req, req->r_tid);
1243
1244        if (!osd_homeless(osd))
1245                __remove_osd_from_lru(osd);
1246        else
1247                atomic_inc(&osd->o_osdc->num_homeless);
1248
1249        get_osd(osd);
1250        insert_request(&osd->o_requests, req);
1251        req->r_osd = osd;
1252}
1253
1254static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1255{
1256        verify_osd_locked(osd);
1257        WARN_ON(req->r_osd != osd);
1258        dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1259             req, req->r_tid);
1260
1261        req->r_osd = NULL;
1262        erase_request(&osd->o_requests, req);
1263        put_osd(osd);
1264
1265        if (!osd_homeless(osd))
1266                maybe_move_osd_to_lru(osd);
1267        else
1268                atomic_dec(&osd->o_osdc->num_homeless);
1269}
1270
1271static bool __pool_full(struct ceph_pg_pool_info *pi)
1272{
1273        return pi->flags & CEPH_POOL_FLAG_FULL;
1274}
1275
1276static bool have_pool_full(struct ceph_osd_client *osdc)
1277{
1278        struct rb_node *n;
1279
1280        for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1281                struct ceph_pg_pool_info *pi =
1282                    rb_entry(n, struct ceph_pg_pool_info, node);
1283
1284                if (__pool_full(pi))
1285                        return true;
1286        }
1287
1288        return false;
1289}
1290
1291static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1292{
1293        struct ceph_pg_pool_info *pi;
1294
1295        pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1296        if (!pi)
1297                return false;
1298
1299        return __pool_full(pi);
1300}
1301
1302/*
1303 * Returns whether a request should be blocked from being sent
1304 * based on the current osdmap and osd_client settings.
1305 */
1306static bool target_should_be_paused(struct ceph_osd_client *osdc,
1307                                    const struct ceph_osd_request_target *t,
1308                                    struct ceph_pg_pool_info *pi)
1309{
1310        bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1311        bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1312                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1313                       __pool_full(pi);
1314
1315        WARN_ON(pi->id != t->target_oloc.pool);
1316        return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1317               ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1318               (osdc->osdmap->epoch < osdc->epoch_barrier);
1319}
1320
1321enum calc_target_result {
1322        CALC_TARGET_NO_ACTION = 0,
1323        CALC_TARGET_NEED_RESEND,
1324        CALC_TARGET_POOL_DNE,
1325};
1326
1327static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1328                                           struct ceph_osd_request_target *t,
1329                                           struct ceph_connection *con,
1330                                           bool any_change)
1331{
1332        struct ceph_pg_pool_info *pi;
1333        struct ceph_pg pgid, last_pgid;
1334        struct ceph_osds up, acting;
1335        bool force_resend = false;
1336        bool unpaused = false;
1337        bool legacy_change;
1338        bool split = false;
1339        bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1340        bool recovery_deletes = ceph_osdmap_flag(osdc,
1341                                                 CEPH_OSDMAP_RECOVERY_DELETES);
1342        enum calc_target_result ct_res;
1343        int ret;
1344
1345        t->epoch = osdc->osdmap->epoch;
1346        pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1347        if (!pi) {
1348                t->osd = CEPH_HOMELESS_OSD;
1349                ct_res = CALC_TARGET_POOL_DNE;
1350                goto out;
1351        }
1352
1353        if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1354                if (t->last_force_resend < pi->last_force_request_resend) {
1355                        t->last_force_resend = pi->last_force_request_resend;
1356                        force_resend = true;
1357                } else if (t->last_force_resend == 0) {
1358                        force_resend = true;
1359                }
1360        }
1361
1362        /* apply tiering */
1363        ceph_oid_copy(&t->target_oid, &t->base_oid);
1364        ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1365        if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1366                if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1367                        t->target_oloc.pool = pi->read_tier;
1368                if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1369                        t->target_oloc.pool = pi->write_tier;
1370
1371                pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1372                if (!pi) {
1373                        t->osd = CEPH_HOMELESS_OSD;
1374                        ct_res = CALC_TARGET_POOL_DNE;
1375                        goto out;
1376                }
1377        }
1378
1379        ret = __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc,
1380                                          &pgid);
1381        if (ret) {
1382                WARN_ON(ret != -ENOENT);
1383                t->osd = CEPH_HOMELESS_OSD;
1384                ct_res = CALC_TARGET_POOL_DNE;
1385                goto out;
1386        }
1387        last_pgid.pool = pgid.pool;
1388        last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1389
1390        ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1391        if (any_change &&
1392            ceph_is_new_interval(&t->acting,
1393                                 &acting,
1394                                 &t->up,
1395                                 &up,
1396                                 t->size,
1397                                 pi->size,
1398                                 t->min_size,
1399                                 pi->min_size,
1400                                 t->pg_num,
1401                                 pi->pg_num,
1402                                 t->sort_bitwise,
1403                                 sort_bitwise,
1404                                 t->recovery_deletes,
1405                                 recovery_deletes,
1406                                 &last_pgid))
1407                force_resend = true;
1408
1409        if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1410                t->paused = false;
1411                unpaused = true;
1412        }
1413        legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1414                        ceph_osds_changed(&t->acting, &acting, any_change);
1415        if (t->pg_num)
1416                split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1417
1418        if (legacy_change || force_resend || split) {
1419                t->pgid = pgid; /* struct */
1420                ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1421                ceph_osds_copy(&t->acting, &acting);
1422                ceph_osds_copy(&t->up, &up);
1423                t->size = pi->size;
1424                t->min_size = pi->min_size;
1425                t->pg_num = pi->pg_num;
1426                t->pg_num_mask = pi->pg_num_mask;
1427                t->sort_bitwise = sort_bitwise;
1428                t->recovery_deletes = recovery_deletes;
1429
1430                t->osd = acting.primary;
1431        }
1432
1433        if (unpaused || legacy_change || force_resend ||
1434            (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1435                                               RESEND_ON_SPLIT)))
1436                ct_res = CALC_TARGET_NEED_RESEND;
1437        else
1438                ct_res = CALC_TARGET_NO_ACTION;
1439
1440out:
1441        dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1442        return ct_res;
1443}
1444
1445static struct ceph_spg_mapping *alloc_spg_mapping(void)
1446{
1447        struct ceph_spg_mapping *spg;
1448
1449        spg = kmalloc(sizeof(*spg), GFP_NOIO);
1450        if (!spg)
1451                return NULL;
1452
1453        RB_CLEAR_NODE(&spg->node);
1454        spg->backoffs = RB_ROOT;
1455        return spg;
1456}
1457
1458static void free_spg_mapping(struct ceph_spg_mapping *spg)
1459{
1460        WARN_ON(!RB_EMPTY_NODE(&spg->node));
1461        WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1462
1463        kfree(spg);
1464}
1465
1466/*
1467 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1468 * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1469 * defined only within a specific spgid; it does not pass anything to
1470 * children on split, or to another primary.
1471 */
1472DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1473                 RB_BYPTR, const struct ceph_spg *, node)
1474
1475static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1476{
1477        return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1478}
1479
1480static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1481                                   void **pkey, size_t *pkey_len)
1482{
1483        if (hoid->key_len) {
1484                *pkey = hoid->key;
1485                *pkey_len = hoid->key_len;
1486        } else {
1487                *pkey = hoid->oid;
1488                *pkey_len = hoid->oid_len;
1489        }
1490}
1491
1492static int compare_names(const void *name1, size_t name1_len,
1493                         const void *name2, size_t name2_len)
1494{
1495        int ret;
1496
1497        ret = memcmp(name1, name2, min(name1_len, name2_len));
1498        if (!ret) {
1499                if (name1_len < name2_len)
1500                        ret = -1;
1501                else if (name1_len > name2_len)
1502                        ret = 1;
1503        }
1504        return ret;
1505}
1506
1507static int hoid_compare(const struct ceph_hobject_id *lhs,
1508                        const struct ceph_hobject_id *rhs)
1509{
1510        void *effective_key1, *effective_key2;
1511        size_t effective_key1_len, effective_key2_len;
1512        int ret;
1513
1514        if (lhs->is_max < rhs->is_max)
1515                return -1;
1516        if (lhs->is_max > rhs->is_max)
1517                return 1;
1518
1519        if (lhs->pool < rhs->pool)
1520                return -1;
1521        if (lhs->pool > rhs->pool)
1522                return 1;
1523
1524        if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1525                return -1;
1526        if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1527                return 1;
1528
1529        ret = compare_names(lhs->nspace, lhs->nspace_len,
1530                            rhs->nspace, rhs->nspace_len);
1531        if (ret)
1532                return ret;
1533
1534        hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1535        hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1536        ret = compare_names(effective_key1, effective_key1_len,
1537                            effective_key2, effective_key2_len);
1538        if (ret)
1539                return ret;
1540
1541        ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1542        if (ret)
1543                return ret;
1544
1545        if (lhs->snapid < rhs->snapid)
1546                return -1;
1547        if (lhs->snapid > rhs->snapid)
1548                return 1;
1549
1550        return 0;
1551}
1552
1553/*
1554 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1555 * compat stuff here.
1556 *
1557 * Assumes @hoid is zero-initialized.
1558 */
1559static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1560{
1561        u8 struct_v;
1562        u32 struct_len;
1563        int ret;
1564
1565        ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1566                                  &struct_len);
1567        if (ret)
1568                return ret;
1569
1570        if (struct_v < 4) {
1571                pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1572                goto e_inval;
1573        }
1574
1575        hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1576                                                GFP_NOIO);
1577        if (IS_ERR(hoid->key)) {
1578                ret = PTR_ERR(hoid->key);
1579                hoid->key = NULL;
1580                return ret;
1581        }
1582
1583        hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1584                                                GFP_NOIO);
1585        if (IS_ERR(hoid->oid)) {
1586                ret = PTR_ERR(hoid->oid);
1587                hoid->oid = NULL;
1588                return ret;
1589        }
1590
1591        ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1592        ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1593        ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1594
1595        hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1596                                                   GFP_NOIO);
1597        if (IS_ERR(hoid->nspace)) {
1598                ret = PTR_ERR(hoid->nspace);
1599                hoid->nspace = NULL;
1600                return ret;
1601        }
1602
1603        ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1604
1605        ceph_hoid_build_hash_cache(hoid);
1606        return 0;
1607
1608e_inval:
1609        return -EINVAL;
1610}
1611
1612static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1613{
1614        return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1615               4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1616}
1617
1618static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1619{
1620        ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1621        ceph_encode_string(p, end, hoid->key, hoid->key_len);
1622        ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1623        ceph_encode_64(p, hoid->snapid);
1624        ceph_encode_32(p, hoid->hash);
1625        ceph_encode_8(p, hoid->is_max);
1626        ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1627        ceph_encode_64(p, hoid->pool);
1628}
1629
1630static void free_hoid(struct ceph_hobject_id *hoid)
1631{
1632        if (hoid) {
1633                kfree(hoid->key);
1634                kfree(hoid->oid);
1635                kfree(hoid->nspace);
1636                kfree(hoid);
1637        }
1638}
1639
1640static struct ceph_osd_backoff *alloc_backoff(void)
1641{
1642        struct ceph_osd_backoff *backoff;
1643
1644        backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1645        if (!backoff)
1646                return NULL;
1647
1648        RB_CLEAR_NODE(&backoff->spg_node);
1649        RB_CLEAR_NODE(&backoff->id_node);
1650        return backoff;
1651}
1652
1653static void free_backoff(struct ceph_osd_backoff *backoff)
1654{
1655        WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1656        WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1657
1658        free_hoid(backoff->begin);
1659        free_hoid(backoff->end);
1660        kfree(backoff);
1661}
1662
1663/*
1664 * Within a specific spgid, backoffs are managed by ->begin hoid.
1665 */
1666DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1667                        RB_BYVAL, spg_node);
1668
1669static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1670                                            const struct ceph_hobject_id *hoid)
1671{
1672        struct rb_node *n = root->rb_node;
1673
1674        while (n) {
1675                struct ceph_osd_backoff *cur =
1676                    rb_entry(n, struct ceph_osd_backoff, spg_node);
1677                int cmp;
1678
1679                cmp = hoid_compare(hoid, cur->begin);
1680                if (cmp < 0) {
1681                        n = n->rb_left;
1682                } else if (cmp > 0) {
1683                        if (hoid_compare(hoid, cur->end) < 0)
1684                                return cur;
1685
1686                        n = n->rb_right;
1687                } else {
1688                        return cur;
1689                }
1690        }
1691
1692        return NULL;
1693}
1694
1695/*
1696 * Each backoff has a unique id within its OSD session.
1697 */
1698DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1699
1700static void clear_backoffs(struct ceph_osd *osd)
1701{
1702        while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1703                struct ceph_spg_mapping *spg =
1704                    rb_entry(rb_first(&osd->o_backoff_mappings),
1705                             struct ceph_spg_mapping, node);
1706
1707                while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1708                        struct ceph_osd_backoff *backoff =
1709                            rb_entry(rb_first(&spg->backoffs),
1710                                     struct ceph_osd_backoff, spg_node);
1711
1712                        erase_backoff(&spg->backoffs, backoff);
1713                        erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1714                        free_backoff(backoff);
1715                }
1716                erase_spg_mapping(&osd->o_backoff_mappings, spg);
1717                free_spg_mapping(spg);
1718        }
1719}
1720
1721/*
1722 * Set up a temporary, non-owning view into @t.
1723 */
1724static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1725                                  const struct ceph_osd_request_target *t)
1726{
1727        hoid->key = NULL;
1728        hoid->key_len = 0;
1729        hoid->oid = t->target_oid.name;
1730        hoid->oid_len = t->target_oid.name_len;
1731        hoid->snapid = CEPH_NOSNAP;
1732        hoid->hash = t->pgid.seed;
1733        hoid->is_max = false;
1734        if (t->target_oloc.pool_ns) {
1735                hoid->nspace = t->target_oloc.pool_ns->str;
1736                hoid->nspace_len = t->target_oloc.pool_ns->len;
1737        } else {
1738                hoid->nspace = NULL;
1739                hoid->nspace_len = 0;
1740        }
1741        hoid->pool = t->target_oloc.pool;
1742        ceph_hoid_build_hash_cache(hoid);
1743}
1744
1745static bool should_plug_request(struct ceph_osd_request *req)
1746{
1747        struct ceph_osd *osd = req->r_osd;
1748        struct ceph_spg_mapping *spg;
1749        struct ceph_osd_backoff *backoff;
1750        struct ceph_hobject_id hoid;
1751
1752        spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1753        if (!spg)
1754                return false;
1755
1756        hoid_fill_from_target(&hoid, &req->r_t);
1757        backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1758        if (!backoff)
1759                return false;
1760
1761        dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1762             __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1763             backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1764        return true;
1765}
1766
1767static void setup_request_data(struct ceph_osd_request *req,
1768                               struct ceph_msg *msg)
1769{
1770        u32 data_len = 0;
1771        int i;
1772
1773        if (!list_empty(&msg->data))
1774                return;
1775
1776        WARN_ON(msg->data_length);
1777        for (i = 0; i < req->r_num_ops; i++) {
1778                struct ceph_osd_req_op *op = &req->r_ops[i];
1779
1780                switch (op->op) {
1781                /* request */
1782                case CEPH_OSD_OP_WRITE:
1783                case CEPH_OSD_OP_WRITEFULL:
1784                        WARN_ON(op->indata_len != op->extent.length);
1785                        ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1786                        break;
1787                case CEPH_OSD_OP_SETXATTR:
1788                case CEPH_OSD_OP_CMPXATTR:
1789                        WARN_ON(op->indata_len != op->xattr.name_len +
1790                                                  op->xattr.value_len);
1791                        ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1792                        break;
1793                case CEPH_OSD_OP_NOTIFY_ACK:
1794                        ceph_osdc_msg_data_add(msg,
1795                                               &op->notify_ack.request_data);
1796                        break;
1797
1798                /* reply */
1799                case CEPH_OSD_OP_STAT:
1800                        ceph_osdc_msg_data_add(req->r_reply,
1801                                               &op->raw_data_in);
1802                        break;
1803                case CEPH_OSD_OP_READ:
1804                        ceph_osdc_msg_data_add(req->r_reply,
1805                                               &op->extent.osd_data);
1806                        break;
1807                case CEPH_OSD_OP_LIST_WATCHERS:
1808                        ceph_osdc_msg_data_add(req->r_reply,
1809                                               &op->list_watchers.response_data);
1810                        break;
1811
1812                /* both */
1813                case CEPH_OSD_OP_CALL:
1814                        WARN_ON(op->indata_len != op->cls.class_len +
1815                                                  op->cls.method_len +
1816                                                  op->cls.indata_len);
1817                        ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1818                        /* optional, can be NONE */
1819                        ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1820                        /* optional, can be NONE */
1821                        ceph_osdc_msg_data_add(req->r_reply,
1822                                               &op->cls.response_data);
1823                        break;
1824                case CEPH_OSD_OP_NOTIFY:
1825                        ceph_osdc_msg_data_add(msg,
1826                                               &op->notify.request_data);
1827                        ceph_osdc_msg_data_add(req->r_reply,
1828                                               &op->notify.response_data);
1829                        break;
1830                }
1831
1832                data_len += op->indata_len;
1833        }
1834
1835        WARN_ON(data_len != msg->data_length);
1836}
1837
1838static void encode_pgid(void **p, const struct ceph_pg *pgid)
1839{
1840        ceph_encode_8(p, 1);
1841        ceph_encode_64(p, pgid->pool);
1842        ceph_encode_32(p, pgid->seed);
1843        ceph_encode_32(p, -1); /* preferred */
1844}
1845
1846static void encode_spgid(void **p, const struct ceph_spg *spgid)
1847{
1848        ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
1849        encode_pgid(p, &spgid->pgid);
1850        ceph_encode_8(p, spgid->shard);
1851}
1852
1853static void encode_oloc(void **p, void *end,
1854                        const struct ceph_object_locator *oloc)
1855{
1856        ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
1857        ceph_encode_64(p, oloc->pool);
1858        ceph_encode_32(p, -1); /* preferred */
1859        ceph_encode_32(p, 0);  /* key len */
1860        if (oloc->pool_ns)
1861                ceph_encode_string(p, end, oloc->pool_ns->str,
1862                                   oloc->pool_ns->len);
1863        else
1864                ceph_encode_32(p, 0);
1865}
1866
1867static void encode_request_partial(struct ceph_osd_request *req,
1868                                   struct ceph_msg *msg)
1869{
1870        void *p = msg->front.iov_base;
1871        void *const end = p + msg->front_alloc_len;
1872        u32 data_len = 0;
1873        int i;
1874
1875        if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1876                /* snapshots aren't writeable */
1877                WARN_ON(req->r_snapid != CEPH_NOSNAP);
1878        } else {
1879                WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1880                        req->r_data_offset || req->r_snapc);
1881        }
1882
1883        setup_request_data(req, msg);
1884
1885        encode_spgid(&p, &req->r_t.spgid); /* actual spg */
1886        ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
1887        ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1888        ceph_encode_32(&p, req->r_flags);
1889
1890        /* reqid */
1891        ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
1892        memset(p, 0, sizeof(struct ceph_osd_reqid));
1893        p += sizeof(struct ceph_osd_reqid);
1894
1895        /* trace */
1896        memset(p, 0, sizeof(struct ceph_blkin_trace_info));
1897        p += sizeof(struct ceph_blkin_trace_info);
1898
1899        ceph_encode_32(&p, 0); /* client_inc, always 0 */
1900        ceph_encode_timespec(p, &req->r_mtime);
1901        p += sizeof(struct ceph_timespec);
1902
1903        encode_oloc(&p, end, &req->r_t.target_oloc);
1904        ceph_encode_string(&p, end, req->r_t.target_oid.name,
1905                           req->r_t.target_oid.name_len);
1906
1907        /* ops, can imply data */
1908        ceph_encode_16(&p, req->r_num_ops);
1909        for (i = 0; i < req->r_num_ops; i++) {
1910                data_len += osd_req_encode_op(p, &req->r_ops[i]);
1911                p += sizeof(struct ceph_osd_op);
1912        }
1913
1914        ceph_encode_64(&p, req->r_snapid); /* snapid */
1915        if (req->r_snapc) {
1916                ceph_encode_64(&p, req->r_snapc->seq);
1917                ceph_encode_32(&p, req->r_snapc->num_snaps);
1918                for (i = 0; i < req->r_snapc->num_snaps; i++)
1919                        ceph_encode_64(&p, req->r_snapc->snaps[i]);
1920        } else {
1921                ceph_encode_64(&p, 0); /* snap_seq */
1922                ceph_encode_32(&p, 0); /* snaps len */
1923        }
1924
1925        ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1926        BUG_ON(p > end - 8); /* space for features */
1927
1928        msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
1929        /* front_len is finalized in encode_request_finish() */
1930        msg->front.iov_len = p - msg->front.iov_base;
1931        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1932        msg->hdr.data_len = cpu_to_le32(data_len);
1933        /*
1934         * The header "data_off" is a hint to the receiver allowing it
1935         * to align received data into its buffers such that there's no
1936         * need to re-copy it before writing it to disk (direct I/O).
1937         */
1938        msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1939
1940        dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
1941             req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1942}
1943
1944static void encode_request_finish(struct ceph_msg *msg)
1945{
1946        void *p = msg->front.iov_base;
1947        void *const partial_end = p + msg->front.iov_len;
1948        void *const end = p + msg->front_alloc_len;
1949
1950        if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
1951                /* luminous OSD -- encode features and be done */
1952                p = partial_end;
1953                ceph_encode_64(&p, msg->con->peer_features);
1954        } else {
1955                struct {
1956                        char spgid[CEPH_ENCODING_START_BLK_LEN +
1957                                   CEPH_PGID_ENCODING_LEN + 1];
1958                        __le32 hash;
1959                        __le32 epoch;
1960                        __le32 flags;
1961                        char reqid[CEPH_ENCODING_START_BLK_LEN +
1962                                   sizeof(struct ceph_osd_reqid)];
1963                        char trace[sizeof(struct ceph_blkin_trace_info)];
1964                        __le32 client_inc;
1965                        struct ceph_timespec mtime;
1966                } __packed head;
1967                struct ceph_pg pgid;
1968                void *oloc, *oid, *tail;
1969                int oloc_len, oid_len, tail_len;
1970                int len;
1971
1972                /*
1973                 * Pre-luminous OSD -- reencode v8 into v4 using @head
1974                 * as a temporary buffer.  Encode the raw PG; the rest
1975                 * is just a matter of moving oloc, oid and tail blobs
1976                 * around.
1977                 */
1978                memcpy(&head, p, sizeof(head));
1979                p += sizeof(head);
1980
1981                oloc = p;
1982                p += CEPH_ENCODING_START_BLK_LEN;
1983                pgid.pool = ceph_decode_64(&p);
1984                p += 4 + 4; /* preferred, key len */
1985                len = ceph_decode_32(&p);
1986                p += len;   /* nspace */
1987                oloc_len = p - oloc;
1988
1989                oid = p;
1990                len = ceph_decode_32(&p);
1991                p += len;
1992                oid_len = p - oid;
1993
1994                tail = p;
1995                tail_len = partial_end - p;
1996
1997                p = msg->front.iov_base;
1998                ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
1999                ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2000                ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2001                ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2002
2003                /* reassert_version */
2004                memset(p, 0, sizeof(struct ceph_eversion));
2005                p += sizeof(struct ceph_eversion);
2006
2007                BUG_ON(p >= oloc);
2008                memmove(p, oloc, oloc_len);
2009                p += oloc_len;
2010
2011                pgid.seed = le32_to_cpu(head.hash);
2012                encode_pgid(&p, &pgid); /* raw pg */
2013
2014                BUG_ON(p >= oid);
2015                memmove(p, oid, oid_len);
2016                p += oid_len;
2017
2018                /* tail -- ops, snapid, snapc, retry_attempt */
2019                BUG_ON(p >= tail);
2020                memmove(p, tail, tail_len);
2021                p += tail_len;
2022
2023                msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2024        }
2025
2026        BUG_ON(p > end);
2027        msg->front.iov_len = p - msg->front.iov_base;
2028        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2029
2030        dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2031             le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2032             le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2033             le16_to_cpu(msg->hdr.version));
2034}
2035
2036/*
2037 * @req has to be assigned a tid and registered.
2038 */
2039static void send_request(struct ceph_osd_request *req)
2040{
2041        struct ceph_osd *osd = req->r_osd;
2042
2043        verify_osd_locked(osd);
2044        WARN_ON(osd->o_osd != req->r_t.osd);
2045
2046        /* backoff? */
2047        if (should_plug_request(req))
2048                return;
2049
2050        /*
2051         * We may have a previously queued request message hanging
2052         * around.  Cancel it to avoid corrupting the msgr.
2053         */
2054        if (req->r_sent)
2055                ceph_msg_revoke(req->r_request);
2056
2057        req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2058        if (req->r_attempts)
2059                req->r_flags |= CEPH_OSD_FLAG_RETRY;
2060        else
2061                WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2062
2063        encode_request_partial(req, req->r_request);
2064
2065        dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2066             __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2067             req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2068             req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2069             req->r_attempts);
2070
2071        req->r_t.paused = false;
2072        req->r_stamp = jiffies;
2073        req->r_attempts++;
2074
2075        req->r_sent = osd->o_incarnation;
2076        req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2077        ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2078}
2079
2080static void maybe_request_map(struct ceph_osd_client *osdc)
2081{
2082        bool continuous = false;
2083
2084        verify_osdc_locked(osdc);
2085        WARN_ON(!osdc->osdmap->epoch);
2086
2087        if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2088            ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2089            ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2090                dout("%s osdc %p continuous\n", __func__, osdc);
2091                continuous = true;
2092        } else {
2093                dout("%s osdc %p onetime\n", __func__, osdc);
2094        }
2095
2096        if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2097                               osdc->osdmap->epoch + 1, continuous))
2098                ceph_monc_renew_subs(&osdc->client->monc);
2099}
2100
2101static void complete_request(struct ceph_osd_request *req, int err);
2102static void send_map_check(struct ceph_osd_request *req);
2103
2104static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2105{
2106        struct ceph_osd_client *osdc = req->r_osdc;
2107        struct ceph_osd *osd;
2108        enum calc_target_result ct_res;
2109        bool need_send = false;
2110        bool promoted = false;
2111        bool need_abort = false;
2112
2113        WARN_ON(req->r_tid);
2114        dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2115
2116again:
2117        ct_res = calc_target(osdc, &req->r_t, NULL, false);
2118        if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2119                goto promote;
2120
2121        osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2122        if (IS_ERR(osd)) {
2123                WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2124                goto promote;
2125        }
2126
2127        if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2128                dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2129                     osdc->epoch_barrier);
2130                req->r_t.paused = true;
2131                maybe_request_map(osdc);
2132        } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2133                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2134                dout("req %p pausewr\n", req);
2135                req->r_t.paused = true;
2136                maybe_request_map(osdc);
2137        } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2138                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2139                dout("req %p pauserd\n", req);
2140                req->r_t.paused = true;
2141                maybe_request_map(osdc);
2142        } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2143                   !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2144                                     CEPH_OSD_FLAG_FULL_FORCE)) &&
2145                   (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2146                    pool_full(osdc, req->r_t.base_oloc.pool))) {
2147                dout("req %p full/pool_full\n", req);
2148                pr_warn_ratelimited("FULL or reached pool quota\n");
2149                req->r_t.paused = true;
2150                maybe_request_map(osdc);
2151                if (req->r_abort_on_full)
2152                        need_abort = true;
2153        } else if (!osd_homeless(osd)) {
2154                need_send = true;
2155        } else {
2156                maybe_request_map(osdc);
2157        }
2158
2159        mutex_lock(&osd->lock);
2160        /*
2161         * Assign the tid atomically with send_request() to protect
2162         * multiple writes to the same object from racing with each
2163         * other, resulting in out of order ops on the OSDs.
2164         */
2165        req->r_tid = atomic64_inc_return(&osdc->last_tid);
2166        link_request(osd, req);
2167        if (need_send)
2168                send_request(req);
2169        else if (need_abort)
2170                complete_request(req, -ENOSPC);
2171        mutex_unlock(&osd->lock);
2172
2173        if (ct_res == CALC_TARGET_POOL_DNE)
2174                send_map_check(req);
2175
2176        if (promoted)
2177                downgrade_write(&osdc->lock);
2178        return;
2179
2180promote:
2181        up_read(&osdc->lock);
2182        down_write(&osdc->lock);
2183        wrlocked = true;
2184        promoted = true;
2185        goto again;
2186}
2187
2188static void account_request(struct ceph_osd_request *req)
2189{
2190        WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2191        WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2192
2193        req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2194        atomic_inc(&req->r_osdc->num_requests);
2195
2196        req->r_start_stamp = jiffies;
2197}
2198
2199static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2200{
2201        ceph_osdc_get_request(req);
2202        account_request(req);
2203        __submit_request(req, wrlocked);
2204}
2205
2206static void finish_request(struct ceph_osd_request *req)
2207{
2208        struct ceph_osd_client *osdc = req->r_osdc;
2209
2210        WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2211        dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2212
2213        if (req->r_osd)
2214                unlink_request(req->r_osd, req);
2215        atomic_dec(&osdc->num_requests);
2216
2217        /*
2218         * If an OSD has failed or returned and a request has been sent
2219         * twice, it's possible to get a reply and end up here while the
2220         * request message is queued for delivery.  We will ignore the
2221         * reply, so not a big deal, but better to try and catch it.
2222         */
2223        ceph_msg_revoke(req->r_request);
2224        ceph_msg_revoke_incoming(req->r_reply);
2225}
2226
2227static void __complete_request(struct ceph_osd_request *req)
2228{
2229        if (req->r_callback) {
2230                dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
2231                     req->r_tid, req->r_callback, req->r_result);
2232                req->r_callback(req);
2233        }
2234}
2235
2236/*
2237 * This is open-coded in handle_reply().
2238 */
2239static void complete_request(struct ceph_osd_request *req, int err)
2240{
2241        dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2242
2243        req->r_result = err;
2244        finish_request(req);
2245        __complete_request(req);
2246        complete_all(&req->r_completion);
2247        ceph_osdc_put_request(req);
2248}
2249
2250static void cancel_map_check(struct ceph_osd_request *req)
2251{
2252        struct ceph_osd_client *osdc = req->r_osdc;
2253        struct ceph_osd_request *lookup_req;
2254
2255        verify_osdc_wrlocked(osdc);
2256
2257        lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2258        if (!lookup_req)
2259                return;
2260
2261        WARN_ON(lookup_req != req);
2262        erase_request_mc(&osdc->map_checks, req);
2263        ceph_osdc_put_request(req);
2264}
2265
2266static void cancel_request(struct ceph_osd_request *req)
2267{
2268        dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2269
2270        cancel_map_check(req);
2271        finish_request(req);
2272        complete_all(&req->r_completion);
2273        ceph_osdc_put_request(req);
2274}
2275
2276static void abort_request(struct ceph_osd_request *req, int err)
2277{
2278        dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2279
2280        cancel_map_check(req);
2281        complete_request(req, err);
2282}
2283
2284static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2285{
2286        if (likely(eb > osdc->epoch_barrier)) {
2287                dout("updating epoch_barrier from %u to %u\n",
2288                                osdc->epoch_barrier, eb);
2289                osdc->epoch_barrier = eb;
2290                /* Request map if we're not to the barrier yet */
2291                if (eb > osdc->osdmap->epoch)
2292                        maybe_request_map(osdc);
2293        }
2294}
2295
2296void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2297{
2298        down_read(&osdc->lock);
2299        if (unlikely(eb > osdc->epoch_barrier)) {
2300                up_read(&osdc->lock);
2301                down_write(&osdc->lock);
2302                update_epoch_barrier(osdc, eb);
2303                up_write(&osdc->lock);
2304        } else {
2305                up_read(&osdc->lock);
2306        }
2307}
2308EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2309
2310/*
2311 * Drop all pending requests that are stalled waiting on a full condition to
2312 * clear, and complete them with ENOSPC as the return code. Set the
2313 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2314 * cancelled.
2315 */
2316static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2317{
2318        struct rb_node *n;
2319        bool victims = false;
2320
2321        dout("enter abort_on_full\n");
2322
2323        if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
2324                goto out;
2325
2326        /* Scan list and see if there is anything to abort */
2327        for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2328                struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2329                struct rb_node *m;
2330
2331                m = rb_first(&osd->o_requests);
2332                while (m) {
2333                        struct ceph_osd_request *req = rb_entry(m,
2334                                        struct ceph_osd_request, r_node);
2335                        m = rb_next(m);
2336
2337                        if (req->r_abort_on_full) {
2338                                victims = true;
2339                                break;
2340                        }
2341                }
2342                if (victims)
2343                        break;
2344        }
2345
2346        if (!victims)
2347                goto out;
2348
2349        /*
2350         * Update the barrier to current epoch if it's behind that point,
2351         * since we know we have some calls to be aborted in the tree.
2352         */
2353        update_epoch_barrier(osdc, osdc->osdmap->epoch);
2354
2355        for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2356                struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2357                struct rb_node *m;
2358
2359                m = rb_first(&osd->o_requests);
2360                while (m) {
2361                        struct ceph_osd_request *req = rb_entry(m,
2362                                        struct ceph_osd_request, r_node);
2363                        m = rb_next(m);
2364
2365                        if (req->r_abort_on_full &&
2366                            (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2367                             pool_full(osdc, req->r_t.target_oloc.pool)))
2368                                abort_request(req, -ENOSPC);
2369                }
2370        }
2371out:
2372        dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
2373}
2374
2375static void check_pool_dne(struct ceph_osd_request *req)
2376{
2377        struct ceph_osd_client *osdc = req->r_osdc;
2378        struct ceph_osdmap *map = osdc->osdmap;
2379
2380        verify_osdc_wrlocked(osdc);
2381        WARN_ON(!map->epoch);
2382
2383        if (req->r_attempts) {
2384                /*
2385                 * We sent a request earlier, which means that
2386                 * previously the pool existed, and now it does not
2387                 * (i.e., it was deleted).
2388                 */
2389                req->r_map_dne_bound = map->epoch;
2390                dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2391                     req->r_tid);
2392        } else {
2393                dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2394                     req, req->r_tid, req->r_map_dne_bound, map->epoch);
2395        }
2396
2397        if (req->r_map_dne_bound) {
2398                if (map->epoch >= req->r_map_dne_bound) {
2399                        /* we had a new enough map */
2400                        pr_info_ratelimited("tid %llu pool does not exist\n",
2401                                            req->r_tid);
2402                        complete_request(req, -ENOENT);
2403                }
2404        } else {
2405                send_map_check(req);
2406        }
2407}
2408
2409static void map_check_cb(struct ceph_mon_generic_request *greq)
2410{
2411        struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2412        struct ceph_osd_request *req;
2413        u64 tid = greq->private_data;
2414
2415        WARN_ON(greq->result || !greq->u.newest);
2416
2417        down_write(&osdc->lock);
2418        req = lookup_request_mc(&osdc->map_checks, tid);
2419        if (!req) {
2420                dout("%s tid %llu dne\n", __func__, tid);
2421                goto out_unlock;
2422        }
2423
2424        dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2425             req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2426        if (!req->r_map_dne_bound)
2427                req->r_map_dne_bound = greq->u.newest;
2428        erase_request_mc(&osdc->map_checks, req);
2429        check_pool_dne(req);
2430
2431        ceph_osdc_put_request(req);
2432out_unlock:
2433        up_write(&osdc->lock);
2434}
2435
2436static void send_map_check(struct ceph_osd_request *req)
2437{
2438        struct ceph_osd_client *osdc = req->r_osdc;
2439        struct ceph_osd_request *lookup_req;
2440        int ret;
2441
2442        verify_osdc_wrlocked(osdc);
2443
2444        lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2445        if (lookup_req) {
2446                WARN_ON(lookup_req != req);
2447                return;
2448        }
2449
2450        ceph_osdc_get_request(req);
2451        insert_request_mc(&osdc->map_checks, req);
2452        ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2453                                          map_check_cb, req->r_tid);
2454        WARN_ON(ret);
2455}
2456
2457/*
2458 * lingering requests, watch/notify v2 infrastructure
2459 */
2460static void linger_release(struct kref *kref)
2461{
2462        struct ceph_osd_linger_request *lreq =
2463            container_of(kref, struct ceph_osd_linger_request, kref);
2464
2465        dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2466             lreq->reg_req, lreq->ping_req);
2467        WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2468        WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2469        WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2470        WARN_ON(!list_empty(&lreq->scan_item));
2471        WARN_ON(!list_empty(&lreq->pending_lworks));
2472        WARN_ON(lreq->osd);
2473
2474        if (lreq->reg_req)
2475                ceph_osdc_put_request(lreq->reg_req);
2476        if (lreq->ping_req)
2477                ceph_osdc_put_request(lreq->ping_req);
2478        target_destroy(&lreq->t);
2479        kfree(lreq);
2480}
2481
2482static void linger_put(struct ceph_osd_linger_request *lreq)
2483{
2484        if (lreq)
2485                kref_put(&lreq->kref, linger_release);
2486}
2487
2488static struct ceph_osd_linger_request *
2489linger_get(struct ceph_osd_linger_request *lreq)
2490{
2491        kref_get(&lreq->kref);
2492        return lreq;
2493}
2494
2495static struct ceph_osd_linger_request *
2496linger_alloc(struct ceph_osd_client *osdc)
2497{
2498        struct ceph_osd_linger_request *lreq;
2499
2500        lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2501        if (!lreq)
2502                return NULL;
2503
2504        kref_init(&lreq->kref);
2505        mutex_init(&lreq->lock);
2506        RB_CLEAR_NODE(&lreq->node);
2507        RB_CLEAR_NODE(&lreq->osdc_node);
2508        RB_CLEAR_NODE(&lreq->mc_node);
2509        INIT_LIST_HEAD(&lreq->scan_item);
2510        INIT_LIST_HEAD(&lreq->pending_lworks);
2511        init_completion(&lreq->reg_commit_wait);
2512        init_completion(&lreq->notify_finish_wait);
2513
2514        lreq->osdc = osdc;
2515        target_init(&lreq->t);
2516
2517        dout("%s lreq %p\n", __func__, lreq);
2518        return lreq;
2519}
2520
2521DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2522DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2523DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2524
2525/*
2526 * Create linger request <-> OSD session relation.
2527 *
2528 * @lreq has to be registered, @osd may be homeless.
2529 */
2530static void link_linger(struct ceph_osd *osd,
2531                        struct ceph_osd_linger_request *lreq)
2532{
2533        verify_osd_locked(osd);
2534        WARN_ON(!lreq->linger_id || lreq->osd);
2535        dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2536             osd->o_osd, lreq, lreq->linger_id);
2537
2538        if (!osd_homeless(osd))
2539                __remove_osd_from_lru(osd);
2540        else
2541                atomic_inc(&osd->o_osdc->num_homeless);
2542
2543        get_osd(osd);
2544        insert_linger(&osd->o_linger_requests, lreq);
2545        lreq->osd = osd;
2546}
2547
2548static void unlink_linger(struct ceph_osd *osd,
2549                          struct ceph_osd_linger_request *lreq)
2550{
2551        verify_osd_locked(osd);
2552        WARN_ON(lreq->osd != osd);
2553        dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2554             osd->o_osd, lreq, lreq->linger_id);
2555
2556        lreq->osd = NULL;
2557        erase_linger(&osd->o_linger_requests, lreq);
2558        put_osd(osd);
2559
2560        if (!osd_homeless(osd))
2561                maybe_move_osd_to_lru(osd);
2562        else
2563                atomic_dec(&osd->o_osdc->num_homeless);
2564}
2565
2566static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2567{
2568        verify_osdc_locked(lreq->osdc);
2569
2570        return !RB_EMPTY_NODE(&lreq->osdc_node);
2571}
2572
2573static bool linger_registered(struct ceph_osd_linger_request *lreq)
2574{
2575        struct ceph_osd_client *osdc = lreq->osdc;
2576        bool registered;
2577
2578        down_read(&osdc->lock);
2579        registered = __linger_registered(lreq);
2580        up_read(&osdc->lock);
2581
2582        return registered;
2583}
2584
2585static void linger_register(struct ceph_osd_linger_request *lreq)
2586{
2587        struct ceph_osd_client *osdc = lreq->osdc;
2588
2589        verify_osdc_wrlocked(osdc);
2590        WARN_ON(lreq->linger_id);
2591
2592        linger_get(lreq);
2593        lreq->linger_id = ++osdc->last_linger_id;
2594        insert_linger_osdc(&osdc->linger_requests, lreq);
2595}
2596
2597static void linger_unregister(struct ceph_osd_linger_request *lreq)
2598{
2599        struct ceph_osd_client *osdc = lreq->osdc;
2600
2601        verify_osdc_wrlocked(osdc);
2602
2603        erase_linger_osdc(&osdc->linger_requests, lreq);
2604        linger_put(lreq);
2605}
2606
2607static void cancel_linger_request(struct ceph_osd_request *req)
2608{
2609        struct ceph_osd_linger_request *lreq = req->r_priv;
2610
2611        WARN_ON(!req->r_linger);
2612        cancel_request(req);
2613        linger_put(lreq);
2614}
2615
2616struct linger_work {
2617        struct work_struct work;
2618        struct ceph_osd_linger_request *lreq;
2619        struct list_head pending_item;
2620        unsigned long queued_stamp;
2621
2622        union {
2623                struct {
2624                        u64 notify_id;
2625                        u64 notifier_id;
2626                        void *payload; /* points into @msg front */
2627                        size_t payload_len;
2628
2629                        struct ceph_msg *msg; /* for ceph_msg_put() */
2630                } notify;
2631                struct {
2632                        int err;
2633                } error;
2634        };
2635};
2636
2637static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2638                                       work_func_t workfn)
2639{
2640        struct linger_work *lwork;
2641
2642        lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2643        if (!lwork)
2644                return NULL;
2645
2646        INIT_WORK(&lwork->work, workfn);
2647        INIT_LIST_HEAD(&lwork->pending_item);
2648        lwork->lreq = linger_get(lreq);
2649
2650        return lwork;
2651}
2652
2653static void lwork_free(struct linger_work *lwork)
2654{
2655        struct ceph_osd_linger_request *lreq = lwork->lreq;
2656
2657        mutex_lock(&lreq->lock);
2658        list_del(&lwork->pending_item);
2659        mutex_unlock(&lreq->lock);
2660
2661        linger_put(lreq);
2662        kfree(lwork);
2663}
2664
2665static void lwork_queue(struct linger_work *lwork)
2666{
2667        struct ceph_osd_linger_request *lreq = lwork->lreq;
2668        struct ceph_osd_client *osdc = lreq->osdc;
2669
2670        verify_lreq_locked(lreq);
2671        WARN_ON(!list_empty(&lwork->pending_item));
2672
2673        lwork->queued_stamp = jiffies;
2674        list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2675        queue_work(osdc->notify_wq, &lwork->work);
2676}
2677
2678static void do_watch_notify(struct work_struct *w)
2679{
2680        struct linger_work *lwork = container_of(w, struct linger_work, work);
2681        struct ceph_osd_linger_request *lreq = lwork->lreq;
2682
2683        if (!linger_registered(lreq)) {
2684                dout("%s lreq %p not registered\n", __func__, lreq);
2685                goto out;
2686        }
2687
2688        WARN_ON(!lreq->is_watch);
2689        dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2690             __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2691             lwork->notify.payload_len);
2692        lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2693                  lwork->notify.notifier_id, lwork->notify.payload,
2694                  lwork->notify.payload_len);
2695
2696out:
2697        ceph_msg_put(lwork->notify.msg);
2698        lwork_free(lwork);
2699}
2700
2701static void do_watch_error(struct work_struct *w)
2702{
2703        struct linger_work *lwork = container_of(w, struct linger_work, work);
2704        struct ceph_osd_linger_request *lreq = lwork->lreq;
2705
2706        if (!linger_registered(lreq)) {
2707                dout("%s lreq %p not registered\n", __func__, lreq);
2708                goto out;
2709        }
2710
2711        dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2712        lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2713
2714out:
2715        lwork_free(lwork);
2716}
2717
2718static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2719{
2720        struct linger_work *lwork;
2721
2722        lwork = lwork_alloc(lreq, do_watch_error);
2723        if (!lwork) {
2724                pr_err("failed to allocate error-lwork\n");
2725                return;
2726        }
2727
2728        lwork->error.err = lreq->last_error;
2729        lwork_queue(lwork);
2730}
2731
2732static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2733                                       int result)
2734{
2735        if (!completion_done(&lreq->reg_commit_wait)) {
2736                lreq->reg_commit_error = (result <= 0 ? result : 0);
2737                complete_all(&lreq->reg_commit_wait);
2738        }
2739}
2740
2741static void linger_commit_cb(struct ceph_osd_request *req)
2742{
2743        struct ceph_osd_linger_request *lreq = req->r_priv;
2744
2745        mutex_lock(&lreq->lock);
2746        dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2747             lreq->linger_id, req->r_result);
2748        linger_reg_commit_complete(lreq, req->r_result);
2749        lreq->committed = true;
2750
2751        if (!lreq->is_watch) {
2752                struct ceph_osd_data *osd_data =
2753                    osd_req_op_data(req, 0, notify, response_data);
2754                void *p = page_address(osd_data->pages[0]);
2755
2756                WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2757                        osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2758
2759                /* make note of the notify_id */
2760                if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2761                        lreq->notify_id = ceph_decode_64(&p);
2762                        dout("lreq %p notify_id %llu\n", lreq,
2763                             lreq->notify_id);
2764                } else {
2765                        dout("lreq %p no notify_id\n", lreq);
2766                }
2767        }
2768
2769        mutex_unlock(&lreq->lock);
2770        linger_put(lreq);
2771}
2772
2773static int normalize_watch_error(int err)
2774{
2775        /*
2776         * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2777         * notification and a failure to reconnect because we raced with
2778         * the delete appear the same to the user.
2779         */
2780        if (err == -ENOENT)
2781                err = -ENOTCONN;
2782
2783        return err;
2784}
2785
2786static void linger_reconnect_cb(struct ceph_osd_request *req)
2787{
2788        struct ceph_osd_linger_request *lreq = req->r_priv;
2789
2790        mutex_lock(&lreq->lock);
2791        dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2792             lreq, lreq->linger_id, req->r_result, lreq->last_error);
2793        if (req->r_result < 0) {
2794                if (!lreq->last_error) {
2795                        lreq->last_error = normalize_watch_error(req->r_result);
2796                        queue_watch_error(lreq);
2797                }
2798        }
2799
2800        mutex_unlock(&lreq->lock);
2801        linger_put(lreq);
2802}
2803
2804static void send_linger(struct ceph_osd_linger_request *lreq)
2805{
2806        struct ceph_osd_request *req = lreq->reg_req;
2807        struct ceph_osd_req_op *op = &req->r_ops[0];
2808
2809        verify_osdc_wrlocked(req->r_osdc);
2810        dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2811
2812        if (req->r_osd)
2813                cancel_linger_request(req);
2814
2815        request_reinit(req);
2816        ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2817        ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2818        req->r_flags = lreq->t.flags;
2819        req->r_mtime = lreq->mtime;
2820
2821        mutex_lock(&lreq->lock);
2822        if (lreq->is_watch && lreq->committed) {
2823                WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2824                        op->watch.cookie != lreq->linger_id);
2825                op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2826                op->watch.gen = ++lreq->register_gen;
2827                dout("lreq %p reconnect register_gen %u\n", lreq,
2828                     op->watch.gen);
2829                req->r_callback = linger_reconnect_cb;
2830        } else {
2831                if (!lreq->is_watch)
2832                        lreq->notify_id = 0;
2833                else
2834                        WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2835                dout("lreq %p register\n", lreq);
2836                req->r_callback = linger_commit_cb;
2837        }
2838        mutex_unlock(&lreq->lock);
2839
2840        req->r_priv = linger_get(lreq);
2841        req->r_linger = true;
2842
2843        submit_request(req, true);
2844}
2845
2846static void linger_ping_cb(struct ceph_osd_request *req)
2847{
2848        struct ceph_osd_linger_request *lreq = req->r_priv;
2849
2850        mutex_lock(&lreq->lock);
2851        dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2852             __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2853             lreq->last_error);
2854        if (lreq->register_gen == req->r_ops[0].watch.gen) {
2855                if (!req->r_result) {
2856                        lreq->watch_valid_thru = lreq->ping_sent;
2857                } else if (!lreq->last_error) {
2858                        lreq->last_error = normalize_watch_error(req->r_result);
2859                        queue_watch_error(lreq);
2860                }
2861        } else {
2862                dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2863                     lreq->register_gen, req->r_ops[0].watch.gen);
2864        }
2865
2866        mutex_unlock(&lreq->lock);
2867        linger_put(lreq);
2868}
2869
2870static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2871{
2872        struct ceph_osd_client *osdc = lreq->osdc;
2873        struct ceph_osd_request *req = lreq->ping_req;
2874        struct ceph_osd_req_op *op = &req->r_ops[0];
2875
2876        if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2877                dout("%s PAUSERD\n", __func__);
2878                return;
2879        }
2880
2881        lreq->ping_sent = jiffies;
2882        dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2883             __func__, lreq, lreq->linger_id, lreq->ping_sent,
2884             lreq->register_gen);
2885
2886        if (req->r_osd)
2887                cancel_linger_request(req);
2888
2889        request_reinit(req);
2890        target_copy(&req->r_t, &lreq->t);
2891
2892        WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2893                op->watch.cookie != lreq->linger_id ||
2894                op->watch.op != CEPH_OSD_WATCH_OP_PING);
2895        op->watch.gen = lreq->register_gen;
2896        req->r_callback = linger_ping_cb;
2897        req->r_priv = linger_get(lreq);
2898        req->r_linger = true;
2899
2900        ceph_osdc_get_request(req);
2901        account_request(req);
2902        req->r_tid = atomic64_inc_return(&osdc->last_tid);
2903        link_request(lreq->osd, req);
2904        send_request(req);
2905}
2906
2907static void linger_submit(struct ceph_osd_linger_request *lreq)
2908{
2909        struct ceph_osd_client *osdc = lreq->osdc;
2910        struct ceph_osd *osd;
2911
2912        calc_target(osdc, &lreq->t, NULL, false);
2913        osd = lookup_create_osd(osdc, lreq->t.osd, true);
2914        link_linger(osd, lreq);
2915
2916        send_linger(lreq);
2917}
2918
2919static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2920{
2921        struct ceph_osd_client *osdc = lreq->osdc;
2922        struct ceph_osd_linger_request *lookup_lreq;
2923
2924        verify_osdc_wrlocked(osdc);
2925
2926        lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2927                                       lreq->linger_id);
2928        if (!lookup_lreq)
2929                return;
2930
2931        WARN_ON(lookup_lreq != lreq);
2932        erase_linger_mc(&osdc->linger_map_checks, lreq);
2933        linger_put(lreq);
2934}
2935
2936/*
2937 * @lreq has to be both registered and linked.
2938 */
2939static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2940{
2941        if (lreq->is_watch && lreq->ping_req->r_osd)
2942                cancel_linger_request(lreq->ping_req);
2943        if (lreq->reg_req->r_osd)
2944                cancel_linger_request(lreq->reg_req);
2945        cancel_linger_map_check(lreq);
2946        unlink_linger(lreq->osd, lreq);
2947        linger_unregister(lreq);
2948}
2949
2950static void linger_cancel(struct ceph_osd_linger_request *lreq)
2951{
2952        struct ceph_osd_client *osdc = lreq->osdc;
2953
2954        down_write(&osdc->lock);
2955        if (__linger_registered(lreq))
2956                __linger_cancel(lreq);
2957        up_write(&osdc->lock);
2958}
2959
2960static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2961
2962static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2963{
2964        struct ceph_osd_client *osdc = lreq->osdc;
2965        struct ceph_osdmap *map = osdc->osdmap;
2966
2967        verify_osdc_wrlocked(osdc);
2968        WARN_ON(!map->epoch);
2969
2970        if (lreq->register_gen) {
2971                lreq->map_dne_bound = map->epoch;
2972                dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2973                     lreq, lreq->linger_id);
2974        } else {
2975                dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2976                     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2977                     map->epoch);
2978        }
2979
2980        if (lreq->map_dne_bound) {
2981                if (map->epoch >= lreq->map_dne_bound) {
2982                        /* we had a new enough map */
2983                        pr_info("linger_id %llu pool does not exist\n",
2984                                lreq->linger_id);
2985                        linger_reg_commit_complete(lreq, -ENOENT);
2986                        __linger_cancel(lreq);
2987                }
2988        } else {
2989                send_linger_map_check(lreq);
2990        }
2991}
2992
2993static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2994{
2995        struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2996        struct ceph_osd_linger_request *lreq;
2997        u64 linger_id = greq->private_data;
2998
2999        WARN_ON(greq->result || !greq->u.newest);
3000
3001        down_write(&osdc->lock);
3002        lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3003        if (!lreq) {
3004                dout("%s linger_id %llu dne\n", __func__, linger_id);
3005                goto out_unlock;
3006        }
3007
3008        dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3009             __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3010             greq->u.newest);
3011        if (!lreq->map_dne_bound)
3012                lreq->map_dne_bound = greq->u.newest;
3013        erase_linger_mc(&osdc->linger_map_checks, lreq);
3014        check_linger_pool_dne(lreq);
3015
3016        linger_put(lreq);
3017out_unlock:
3018        up_write(&osdc->lock);
3019}
3020
3021static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3022{
3023        struct ceph_osd_client *osdc = lreq->osdc;
3024        struct ceph_osd_linger_request *lookup_lreq;
3025        int ret;
3026
3027        verify_osdc_wrlocked(osdc);
3028
3029        lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3030                                       lreq->linger_id);
3031        if (lookup_lreq) {
3032                WARN_ON(lookup_lreq != lreq);
3033                return;
3034        }
3035
3036        linger_get(lreq);
3037        insert_linger_mc(&osdc->linger_map_checks, lreq);
3038        ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3039                                          linger_map_check_cb, lreq->linger_id);
3040        WARN_ON(ret);
3041}
3042
3043static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3044{
3045        int ret;
3046
3047        dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3048        ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3049        return ret ?: lreq->reg_commit_error;
3050}
3051
3052static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3053{
3054        int ret;
3055
3056        dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3057        ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3058        return ret ?: lreq->notify_finish_error;
3059}
3060
3061/*
3062 * Timeout callback, called every N seconds.  When 1 or more OSD
3063 * requests has been active for more than N seconds, we send a keepalive
3064 * (tag + timestamp) to its OSD to ensure any communications channel
3065 * reset is detected.
3066 */
3067static void handle_timeout(struct work_struct *work)
3068{
3069        struct ceph_osd_client *osdc =
3070                container_of(work, struct ceph_osd_client, timeout_work.work);
3071        struct ceph_options *opts = osdc->client->options;
3072        unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3073        unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3074        LIST_HEAD(slow_osds);
3075        struct rb_node *n, *p;
3076
3077        dout("%s osdc %p\n", __func__, osdc);
3078        down_write(&osdc->lock);
3079
3080        /*
3081         * ping osds that are a bit slow.  this ensures that if there
3082         * is a break in the TCP connection we will notice, and reopen
3083         * a connection with that osd (from the fault callback).
3084         */
3085        for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3086                struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3087                bool found = false;
3088
3089                for (p = rb_first(&osd->o_requests); p; ) {
3090                        struct ceph_osd_request *req =
3091                            rb_entry(p, struct ceph_osd_request, r_node);
3092
3093                        p = rb_next(p); /* abort_request() */
3094
3095                        if (time_before(req->r_stamp, cutoff)) {
3096                                dout(" req %p tid %llu on osd%d is laggy\n",
3097                                     req, req->r_tid, osd->o_osd);
3098                                found = true;
3099                        }
3100                        if (opts->osd_request_timeout &&
3101                            time_before(req->r_start_stamp, expiry_cutoff)) {
3102                                pr_err_ratelimited("tid %llu on osd%d timeout\n",
3103                                       req->r_tid, osd->o_osd);
3104                                abort_request(req, -ETIMEDOUT);
3105                        }
3106                }
3107                for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3108                        struct ceph_osd_linger_request *lreq =
3109                            rb_entry(p, struct ceph_osd_linger_request, node);
3110
3111                        dout(" lreq %p linger_id %llu is served by osd%d\n",
3112                             lreq, lreq->linger_id, osd->o_osd);
3113                        found = true;
3114
3115                        mutex_lock(&lreq->lock);
3116                        if (lreq->is_watch && lreq->committed && !lreq->last_error)
3117                                send_linger_ping(lreq);
3118                        mutex_unlock(&lreq->lock);
3119                }
3120
3121                if (found)
3122                        list_move_tail(&osd->o_keepalive_item, &slow_osds);
3123        }
3124
3125        if (opts->osd_request_timeout) {
3126                for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3127                        struct ceph_osd_request *req =
3128                            rb_entry(p, struct ceph_osd_request, r_node);
3129
3130                        p = rb_next(p); /* abort_request() */
3131
3132                        if (time_before(req->r_start_stamp, expiry_cutoff)) {
3133                                pr_err_ratelimited("tid %llu on osd%d timeout\n",
3134                                       req->r_tid, osdc->homeless_osd.o_osd);
3135                                abort_request(req, -ETIMEDOUT);
3136                        }
3137                }
3138        }
3139
3140        if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3141                maybe_request_map(osdc);
3142
3143        while (!list_empty(&slow_osds)) {
3144                struct ceph_osd *osd = list_first_entry(&slow_osds,
3145                                                        struct ceph_osd,
3146                                                        o_keepalive_item);
3147                list_del_init(&osd->o_keepalive_item);
3148                ceph_con_keepalive(&osd->o_con);
3149        }
3150
3151        up_write(&osdc->lock);
3152        schedule_delayed_work(&osdc->timeout_work,
3153                              osdc->client->options->osd_keepalive_timeout);
3154}
3155
3156static void handle_osds_timeout(struct work_struct *work)
3157{
3158        struct ceph_osd_client *osdc =
3159                container_of(work, struct ceph_osd_client,
3160                             osds_timeout_work.work);
3161        unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3162        struct ceph_osd *osd, *nosd;
3163
3164        dout("%s osdc %p\n", __func__, osdc);
3165        down_write(&osdc->lock);
3166        list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3167                if (time_before(jiffies, osd->lru_ttl))
3168                        break;
3169
3170                WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3171                WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3172                close_osd(osd);
3173        }
3174
3175        up_write(&osdc->lock);
3176        schedule_delayed_work(&osdc->osds_timeout_work,
3177                              round_jiffies_relative(delay));
3178}
3179
3180static int ceph_oloc_decode(void **p, void *end,
3181                            struct ceph_object_locator *oloc)
3182{
3183        u8 struct_v, struct_cv;
3184        u32 len;
3185        void *struct_end;
3186        int ret = 0;
3187
3188        ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3189        struct_v = ceph_decode_8(p);
3190        struct_cv = ceph_decode_8(p);
3191        if (struct_v < 3) {
3192                pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3193                        struct_v, struct_cv);
3194                goto e_inval;
3195        }
3196        if (struct_cv > 6) {
3197                pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3198                        struct_v, struct_cv);
3199                goto e_inval;
3200        }
3201        len = ceph_decode_32(p);
3202        ceph_decode_need(p, end, len, e_inval);
3203        struct_end = *p + len;
3204
3205        oloc->pool = ceph_decode_64(p);
3206        *p += 4; /* skip preferred */
3207
3208        len = ceph_decode_32(p);
3209        if (len > 0) {
3210                pr_warn("ceph_object_locator::key is set\n");
3211                goto e_inval;
3212        }
3213
3214        if (struct_v >= 5) {
3215                bool changed = false;
3216
3217                len = ceph_decode_32(p);
3218                if (len > 0) {
3219                        ceph_decode_need(p, end, len, e_inval);
3220                        if (!oloc->pool_ns ||
3221                            ceph_compare_string(oloc->pool_ns, *p, len))
3222                                changed = true;
3223                        *p += len;
3224                } else {
3225                        if (oloc->pool_ns)
3226                                changed = true;
3227                }
3228                if (changed) {
3229                        /* redirect changes namespace */
3230                        pr_warn("ceph_object_locator::nspace is changed\n");
3231                        goto e_inval;
3232                }
3233        }
3234
3235        if (struct_v >= 6) {
3236                s64 hash = ceph_decode_64(p);
3237                if (hash != -1) {
3238                        pr_warn("ceph_object_locator::hash is set\n");
3239                        goto e_inval;
3240                }
3241        }
3242
3243        /* skip the rest */
3244        *p = struct_end;
3245out:
3246        return ret;
3247
3248e_inval:
3249        ret = -EINVAL;
3250        goto out;
3251}
3252
3253static int ceph_redirect_decode(void **p, void *end,
3254                                struct ceph_request_redirect *redir)
3255{
3256        u8 struct_v, struct_cv;
3257        u32 len;
3258        void *struct_end;
3259        int ret;
3260
3261        ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3262        struct_v = ceph_decode_8(p);
3263        struct_cv = ceph_decode_8(p);
3264        if (struct_cv > 1) {
3265                pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3266                        struct_v, struct_cv);
3267                goto e_inval;
3268        }
3269        len = ceph_decode_32(p);
3270        ceph_decode_need(p, end, len, e_inval);
3271        struct_end = *p + len;
3272
3273        ret = ceph_oloc_decode(p, end, &redir->oloc);
3274        if (ret)
3275                goto out;
3276
3277        len = ceph_decode_32(p);
3278        if (len > 0) {
3279                pr_warn("ceph_request_redirect::object_name is set\n");
3280                goto e_inval;
3281        }
3282
3283        len = ceph_decode_32(p);
3284        *p += len; /* skip osd_instructions */
3285
3286        /* skip the rest */
3287        *p = struct_end;
3288out:
3289        return ret;
3290
3291e_inval:
3292        ret = -EINVAL;
3293        goto out;
3294}
3295
3296struct MOSDOpReply {
3297        struct ceph_pg pgid;
3298        u64 flags;
3299        int result;
3300        u32 epoch;
3301        int num_ops;
3302        u32 outdata_len[CEPH_OSD_MAX_OPS];
3303        s32 rval[CEPH_OSD_MAX_OPS];
3304        int retry_attempt;
3305        struct ceph_eversion replay_version;
3306        u64 user_version;
3307        struct ceph_request_redirect redirect;
3308};
3309
3310static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3311{
3312        void *p = msg->front.iov_base;
3313        void *const end = p + msg->front.iov_len;
3314        u16 version = le16_to_cpu(msg->hdr.version);
3315        struct ceph_eversion bad_replay_version;
3316        u8 decode_redir;
3317        u32 len;
3318        int ret;
3319        int i;
3320
3321        ceph_decode_32_safe(&p, end, len, e_inval);
3322        ceph_decode_need(&p, end, len, e_inval);
3323        p += len; /* skip oid */
3324
3325        ret = ceph_decode_pgid(&p, end, &m->pgid);
3326        if (ret)
3327                return ret;
3328
3329        ceph_decode_64_safe(&p, end, m->flags, e_inval);
3330        ceph_decode_32_safe(&p, end, m->result, e_inval);
3331        ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3332        memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3333        p += sizeof(bad_replay_version);
3334        ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3335
3336        ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3337        if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3338                goto e_inval;
3339
3340        ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3341                         e_inval);
3342        for (i = 0; i < m->num_ops; i++) {
3343                struct ceph_osd_op *op = p;
3344
3345                m->outdata_len[i] = le32_to_cpu(op->payload_len);
3346                p += sizeof(*op);
3347        }
3348
3349        ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3350        for (i = 0; i < m->num_ops; i++)
3351                ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3352
3353        if (version >= 5) {
3354                ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3355                memcpy(&m->replay_version, p, sizeof(m->replay_version));
3356                p += sizeof(m->replay_version);
3357                ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3358        } else {
3359                m->replay_version = bad_replay_version; /* struct */
3360                m->user_version = le64_to_cpu(m->replay_version.version);
3361        }
3362
3363        if (version >= 6) {
3364                if (version >= 7)
3365                        ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3366                else
3367                        decode_redir = 1;
3368        } else {
3369                decode_redir = 0;
3370        }
3371
3372        if (decode_redir) {
3373                ret = ceph_redirect_decode(&p, end, &m->redirect);
3374                if (ret)
3375                        return ret;
3376        } else {
3377                ceph_oloc_init(&m->redirect.oloc);
3378        }
3379
3380        return 0;
3381
3382e_inval:
3383        return -EINVAL;
3384}
3385
3386/*
3387 * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3388 * specified.
3389 */
3390static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3391{
3392        struct ceph_osd_client *osdc = osd->o_osdc;
3393        struct ceph_osd_request *req;
3394        struct MOSDOpReply m;
3395        u64 tid = le64_to_cpu(msg->hdr.tid);
3396        u32 data_len = 0;
3397        int ret;
3398        int i;
3399
3400        dout("%s msg %p tid %llu\n", __func__, msg, tid);
3401
3402        down_read(&osdc->lock);
3403        if (!osd_registered(osd)) {
3404                dout("%s osd%d unknown\n", __func__, osd->o_osd);
3405                goto out_unlock_osdc;
3406        }
3407        WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3408
3409        mutex_lock(&osd->lock);
3410        req = lookup_request(&osd->o_requests, tid);
3411        if (!req) {
3412                dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3413                goto out_unlock_session;
3414        }
3415
3416        m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3417        ret = decode_MOSDOpReply(msg, &m);
3418        m.redirect.oloc.pool_ns = NULL;
3419        if (ret) {
3420                pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3421                       req->r_tid, ret);
3422                ceph_msg_dump(msg);
3423                goto fail_request;
3424        }
3425        dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3426             __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3427             m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3428             le64_to_cpu(m.replay_version.version), m.user_version);
3429
3430        if (m.retry_attempt >= 0) {
3431                if (m.retry_attempt != req->r_attempts - 1) {
3432                        dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3433                             req, req->r_tid, m.retry_attempt,
3434                             req->r_attempts - 1);
3435                        goto out_unlock_session;
3436                }
3437        } else {
3438                WARN_ON(1); /* MOSDOpReply v4 is assumed */
3439        }
3440
3441        if (!ceph_oloc_empty(&m.redirect.oloc)) {
3442                dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3443                     m.redirect.oloc.pool);
3444                unlink_request(osd, req);
3445                mutex_unlock(&osd->lock);
3446
3447                /*
3448                 * Not ceph_oloc_copy() - changing pool_ns is not
3449                 * supported.
3450                 */
3451                req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3452                req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
3453                req->r_tid = 0;
3454                __submit_request(req, false);
3455                goto out_unlock_osdc;
3456        }
3457
3458        if (m.num_ops != req->r_num_ops) {
3459                pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3460                       req->r_num_ops, req->r_tid);
3461                goto fail_request;
3462        }
3463        for (i = 0; i < req->r_num_ops; i++) {
3464                dout(" req %p tid %llu op %d rval %d len %u\n", req,
3465                     req->r_tid, i, m.rval[i], m.outdata_len[i]);
3466                req->r_ops[i].rval = m.rval[i];
3467                req->r_ops[i].outdata_len = m.outdata_len[i];
3468                data_len += m.outdata_len[i];
3469        }
3470        if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3471                pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3472                       le32_to_cpu(msg->hdr.data_len), req->r_tid);
3473                goto fail_request;
3474        }
3475        dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3476             req, req->r_tid, m.result, data_len);
3477
3478        /*
3479         * Since we only ever request ONDISK, we should only ever get
3480         * one (type of) reply back.
3481         */
3482        WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3483        req->r_result = m.result ?: data_len;
3484        finish_request(req);
3485        mutex_unlock(&osd->lock);
3486        up_read(&osdc->lock);
3487
3488        __complete_request(req);
3489        complete_all(&req->r_completion);
3490        ceph_osdc_put_request(req);
3491        return;
3492
3493fail_request:
3494        complete_request(req, -EIO);
3495out_unlock_session:
3496        mutex_unlock(&osd->lock);
3497out_unlock_osdc:
3498        up_read(&osdc->lock);
3499}
3500
3501static void set_pool_was_full(struct ceph_osd_client *osdc)
3502{
3503        struct rb_node *n;
3504
3505        for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3506                struct ceph_pg_pool_info *pi =
3507                    rb_entry(n, struct ceph_pg_pool_info, node);
3508
3509                pi->was_full = __pool_full(pi);
3510        }
3511}
3512
3513static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3514{
3515        struct ceph_pg_pool_info *pi;
3516
3517        pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3518        if (!pi)
3519                return false;
3520
3521        return pi->was_full && !__pool_full(pi);
3522}
3523
3524static enum calc_target_result
3525recalc_linger_target(struct ceph_osd_linger_request *lreq)
3526{
3527        struct ceph_osd_client *osdc = lreq->osdc;
3528        enum calc_target_result ct_res;
3529
3530        ct_res = calc_target(osdc, &lreq->t, NULL, true);
3531        if (ct_res == CALC_TARGET_NEED_RESEND) {
3532                struct ceph_osd *osd;
3533
3534                osd = lookup_create_osd(osdc, lreq->t.osd, true);
3535                if (osd != lreq->osd) {
3536                        unlink_linger(lreq->osd, lreq);
3537                        link_linger(osd, lreq);
3538                }
3539        }
3540
3541        return ct_res;
3542}
3543
3544/*
3545 * Requeue requests whose mapping to an OSD has changed.
3546 */
3547static void scan_requests(struct ceph_osd *osd,
3548                          bool force_resend,
3549                          bool cleared_full,
3550                          bool check_pool_cleared_full,
3551                          struct rb_root *need_resend,
3552                          struct list_head *need_resend_linger)
3553{
3554        struct ceph_osd_client *osdc = osd->o_osdc;
3555        struct rb_node *n;
3556        bool force_resend_writes;
3557
3558        for (n = rb_first(&osd->o_linger_requests); n; ) {
3559                struct ceph_osd_linger_request *lreq =
3560                    rb_entry(n, struct ceph_osd_linger_request, node);
3561                enum calc_target_result ct_res;
3562
3563                n = rb_next(n); /* recalc_linger_target() */
3564
3565                dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3566                     lreq->linger_id);
3567                ct_res = recalc_linger_target(lreq);
3568                switch (ct_res) {
3569                case CALC_TARGET_NO_ACTION:
3570                        force_resend_writes = cleared_full ||
3571                            (check_pool_cleared_full &&
3572                             pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3573                        if (!force_resend && !force_resend_writes)
3574                                break;
3575
3576                        /* fall through */
3577                case CALC_TARGET_NEED_RESEND:
3578                        cancel_linger_map_check(lreq);
3579                        /*
3580                         * scan_requests() for the previous epoch(s)
3581                         * may have already added it to the list, since
3582                         * it's not unlinked here.
3583                         */
3584                        if (list_empty(&lreq->scan_item))
3585                                list_add_tail(&lreq->scan_item, need_resend_linger);
3586                        break;
3587                case CALC_TARGET_POOL_DNE:
3588                        list_del_init(&lreq->scan_item);
3589                        check_linger_pool_dne(lreq);
3590                        break;
3591                }
3592        }
3593
3594        for (n = rb_first(&osd->o_requests); n; ) {
3595                struct ceph_osd_request *req =
3596                    rb_entry(n, struct ceph_osd_request, r_node);
3597                enum calc_target_result ct_res;
3598
3599                n = rb_next(n); /* unlink_request(), check_pool_dne() */
3600
3601                dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3602                ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
3603                                     false);
3604                switch (ct_res) {
3605                case CALC_TARGET_NO_ACTION:
3606                        force_resend_writes = cleared_full ||
3607                            (check_pool_cleared_full &&
3608                             pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3609                        if (!force_resend &&
3610                            (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3611                             !force_resend_writes))
3612                                break;
3613
3614                        /* fall through */
3615                case CALC_TARGET_NEED_RESEND:
3616                        cancel_map_check(req);
3617                        unlink_request(osd, req);
3618                        insert_request(need_resend, req);
3619                        break;
3620                case CALC_TARGET_POOL_DNE:
3621                        check_pool_dne(req);
3622                        break;
3623                }
3624        }
3625}
3626
3627static int handle_one_map(struct ceph_osd_client *osdc,
3628                          void *p, void *end, bool incremental,
3629                          struct rb_root *need_resend,
3630                          struct list_head *need_resend_linger)
3631{
3632        struct ceph_osdmap *newmap;
3633        struct rb_node *n;
3634        bool skipped_map = false;
3635        bool was_full;
3636
3637        was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3638        set_pool_was_full(osdc);
3639
3640        if (incremental)
3641                newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3642        else
3643                newmap = ceph_osdmap_decode(&p, end);
3644        if (IS_ERR(newmap))
3645                return PTR_ERR(newmap);
3646
3647        if (newmap != osdc->osdmap) {
3648                /*
3649                 * Preserve ->was_full before destroying the old map.
3650                 * For pools that weren't in the old map, ->was_full
3651                 * should be false.
3652                 */
3653                for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3654                        struct ceph_pg_pool_info *pi =
3655                            rb_entry(n, struct ceph_pg_pool_info, node);
3656                        struct ceph_pg_pool_info *old_pi;
3657
3658                        old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3659                        if (old_pi)
3660                                pi->was_full = old_pi->was_full;
3661                        else
3662                                WARN_ON(pi->was_full);
3663                }
3664
3665                if (osdc->osdmap->epoch &&
3666                    osdc->osdmap->epoch + 1 < newmap->epoch) {
3667                        WARN_ON(incremental);
3668                        skipped_map = true;
3669                }
3670
3671                ceph_osdmap_destroy(osdc->osdmap);
3672                osdc->osdmap = newmap;
3673        }
3674
3675        was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3676        scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3677                      need_resend, need_resend_linger);
3678
3679        for (n = rb_first(&osdc->osds); n; ) {
3680                struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3681
3682                n = rb_next(n); /* close_osd() */
3683
3684                scan_requests(osd, skipped_map, was_full, true, need_resend,
3685                              need_resend_linger);
3686                if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3687                    memcmp(&osd->o_con.peer_addr,
3688                           ceph_osd_addr(osdc->osdmap, osd->o_osd),
3689                           sizeof(struct ceph_entity_addr)))
3690                        close_osd(osd);
3691        }
3692
3693        return 0;
3694}
3695
3696static void kick_requests(struct ceph_osd_client *osdc,
3697                          struct rb_root *need_resend,
3698                          struct list_head *need_resend_linger)
3699{
3700        struct ceph_osd_linger_request *lreq, *nlreq;
3701        enum calc_target_result ct_res;
3702        struct rb_node *n;
3703
3704        /* make sure need_resend targets reflect latest map */
3705        for (n = rb_first(need_resend); n; ) {
3706                struct ceph_osd_request *req =
3707                    rb_entry(n, struct ceph_osd_request, r_node);
3708
3709                n = rb_next(n);
3710
3711                if (req->r_t.epoch < osdc->osdmap->epoch) {
3712                        ct_res = calc_target(osdc, &req->r_t, NULL, false);
3713                        if (ct_res == CALC_TARGET_POOL_DNE) {
3714                                erase_request(need_resend, req);
3715                                check_pool_dne(req);
3716                        }
3717                }
3718        }
3719
3720        for (n = rb_first(need_resend); n; ) {
3721                struct ceph_osd_request *req =
3722                    rb_entry(n, struct ceph_osd_request, r_node);
3723                struct ceph_osd *osd;
3724
3725                n = rb_next(n);
3726                erase_request(need_resend, req); /* before link_request() */
3727
3728                osd = lookup_create_osd(osdc, req->r_t.osd, true);
3729                link_request(osd, req);
3730                if (!req->r_linger) {
3731                        if (!osd_homeless(osd) && !req->r_t.paused)
3732                                send_request(req);
3733                } else {
3734                        cancel_linger_request(req);
3735                }
3736        }
3737
3738        list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3739                if (!osd_homeless(lreq->osd))
3740                        send_linger(lreq);
3741
3742                list_del_init(&lreq->scan_item);
3743        }
3744}
3745
3746/*
3747 * Process updated osd map.
3748 *
3749 * The message contains any number of incremental and full maps, normally
3750 * indicating some sort of topology change in the cluster.  Kick requests
3751 * off to different OSDs as needed.
3752 */
3753void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3754{
3755        void *p = msg->front.iov_base;
3756        void *const end = p + msg->front.iov_len;
3757        u32 nr_maps, maplen;
3758        u32 epoch;
3759        struct ceph_fsid fsid;
3760        struct rb_root need_resend = RB_ROOT;
3761        LIST_HEAD(need_resend_linger);
3762        bool handled_incremental = false;
3763        bool was_pauserd, was_pausewr;
3764        bool pauserd, pausewr;
3765        int err;
3766
3767        dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3768        down_write(&osdc->lock);
3769
3770        /* verify fsid */
3771        ceph_decode_need(&p, end, sizeof(fsid), bad);
3772        ceph_decode_copy(&p, &fsid, sizeof(fsid));
3773        if (ceph_check_fsid(osdc->client, &fsid) < 0)
3774                goto bad;
3775
3776        was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3777        was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3778                      ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3779                      have_pool_full(osdc);
3780
3781        /* incremental maps */
3782        ceph_decode_32_safe(&p, end, nr_maps, bad);
3783        dout(" %d inc maps\n", nr_maps);
3784        while (nr_maps > 0) {
3785                ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3786                epoch = ceph_decode_32(&p);
3787                maplen = ceph_decode_32(&p);
3788                ceph_decode_need(&p, end, maplen, bad);
3789                if (osdc->osdmap->epoch &&
3790                    osdc->osdmap->epoch + 1 == epoch) {
3791                        dout("applying incremental map %u len %d\n",
3792                             epoch, maplen);
3793                        err = handle_one_map(osdc, p, p + maplen, true,
3794                                             &need_resend, &need_resend_linger);
3795                        if (err)
3796                                goto bad;
3797                        handled_incremental = true;
3798                } else {
3799                        dout("ignoring incremental map %u len %d\n",
3800                             epoch, maplen);
3801                }
3802                p += maplen;
3803                nr_maps--;
3804        }
3805        if (handled_incremental)
3806                goto done;
3807
3808        /* full maps */
3809        ceph_decode_32_safe(&p, end, nr_maps, bad);
3810        dout(" %d full maps\n", nr_maps);
3811        while (nr_maps) {
3812                ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3813                epoch = ceph_decode_32(&p);
3814                maplen = ceph_decode_32(&p);
3815                ceph_decode_need(&p, end, maplen, bad);
3816                if (nr_maps > 1) {
3817                        dout("skipping non-latest full map %u len %d\n",
3818                             epoch, maplen);
3819                } else if (osdc->osdmap->epoch >= epoch) {
3820                        dout("skipping full map %u len %d, "
3821                             "older than our %u\n", epoch, maplen,
3822                             osdc->osdmap->epoch);
3823                } else {
3824                        dout("taking full map %u len %d\n", epoch, maplen);
3825                        err = handle_one_map(osdc, p, p + maplen, false,
3826                                             &need_resend, &need_resend_linger);
3827                        if (err)
3828                                goto bad;
3829                }
3830                p += maplen;
3831                nr_maps--;
3832        }
3833
3834done:
3835        /*
3836         * subscribe to subsequent osdmap updates if full to ensure
3837         * we find out when we are no longer full and stop returning
3838         * ENOSPC.
3839         */
3840        pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3841        pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3842                  ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3843                  have_pool_full(osdc);
3844        if (was_pauserd || was_pausewr || pauserd || pausewr ||
3845            osdc->osdmap->epoch < osdc->epoch_barrier)
3846                maybe_request_map(osdc);
3847
3848        kick_requests(osdc, &need_resend, &need_resend_linger);
3849
3850        ceph_osdc_abort_on_full(osdc);
3851        ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3852                          osdc->osdmap->epoch);
3853        up_write(&osdc->lock);
3854        wake_up_all(&osdc->client->auth_wq);
3855        return;
3856
3857bad:
3858        pr_err("osdc handle_map corrupt msg\n");
3859        ceph_msg_dump(msg);
3860        up_write(&osdc->lock);
3861}
3862
3863/*
3864 * Resubmit requests pending on the given osd.
3865 */
3866static void kick_osd_requests(struct ceph_osd *osd)
3867{
3868        struct rb_node *n;
3869
3870        clear_backoffs(osd);
3871
3872        for (n = rb_first(&osd->o_requests); n; ) {
3873                struct ceph_osd_request *req =
3874                    rb_entry(n, struct ceph_osd_request, r_node);
3875
3876                n = rb_next(n); /* cancel_linger_request() */
3877
3878                if (!req->r_linger) {
3879                        if (!req->r_t.paused)
3880                                send_request(req);
3881                } else {
3882                        cancel_linger_request(req);
3883                }
3884        }
3885        for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3886                struct ceph_osd_linger_request *lreq =
3887                    rb_entry(n, struct ceph_osd_linger_request, node);
3888
3889                send_linger(lreq);
3890        }
3891}
3892
3893/*
3894 * If the osd connection drops, we need to resubmit all requests.
3895 */
3896static void osd_fault(struct ceph_connection *con)
3897{
3898        struct ceph_osd *osd = con->private;
3899        struct ceph_osd_client *osdc = osd->o_osdc;
3900
3901        dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3902
3903        down_write(&osdc->lock);
3904        if (!osd_registered(osd)) {
3905                dout("%s osd%d unknown\n", __func__, osd->o_osd);
3906                goto out_unlock;
3907        }
3908
3909        if (!reopen_osd(osd))
3910                kick_osd_requests(osd);
3911        maybe_request_map(osdc);
3912
3913out_unlock:
3914        up_write(&osdc->lock);
3915}
3916
3917struct MOSDBackoff {
3918        struct ceph_spg spgid;
3919        u32 map_epoch;
3920        u8 op;
3921        u64 id;
3922        struct ceph_hobject_id *begin;
3923        struct ceph_hobject_id *end;
3924};
3925
3926static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
3927{
3928        void *p = msg->front.iov_base;
3929        void *const end = p + msg->front.iov_len;
3930        u8 struct_v;
3931        u32 struct_len;
3932        int ret;
3933
3934        ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
3935        if (ret)
3936                return ret;
3937
3938        ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
3939        if (ret)
3940                return ret;
3941
3942        ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
3943        ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
3944        ceph_decode_8_safe(&p, end, m->op, e_inval);
3945        ceph_decode_64_safe(&p, end, m->id, e_inval);
3946
3947        m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
3948        if (!m->begin)
3949                return -ENOMEM;
3950
3951        ret = decode_hoid(&p, end, m->begin);
3952        if (ret) {
3953                free_hoid(m->begin);
3954                return ret;
3955        }
3956
3957        m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
3958        if (!m->end) {
3959                free_hoid(m->begin);
3960                return -ENOMEM;
3961        }
3962
3963        ret = decode_hoid(&p, end, m->end);
3964        if (ret) {
3965                free_hoid(m->begin);
3966                free_hoid(m->end);
3967                return ret;
3968        }
3969
3970        return 0;
3971
3972e_inval:
3973        return -EINVAL;
3974}
3975
3976static struct ceph_msg *create_backoff_message(
3977                                const struct ceph_osd_backoff *backoff,
3978                                u32 map_epoch)
3979{
3980        struct ceph_msg *msg;
3981        void *p, *end;
3982        int msg_size;
3983
3984        msg_size = CEPH_ENCODING_START_BLK_LEN +
3985                        CEPH_PGID_ENCODING_LEN + 1; /* spgid */
3986        msg_size += 4 + 1 + 8; /* map_epoch, op, id */
3987        msg_size += CEPH_ENCODING_START_BLK_LEN +
3988                        hoid_encoding_size(backoff->begin);
3989        msg_size += CEPH_ENCODING_START_BLK_LEN +
3990                        hoid_encoding_size(backoff->end);
3991
3992        msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
3993        if (!msg)
3994                return NULL;
3995
3996        p = msg->front.iov_base;
3997        end = p + msg->front_alloc_len;
3998
3999        encode_spgid(&p, &backoff->spgid);
4000        ceph_encode_32(&p, map_epoch);
4001        ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4002        ceph_encode_64(&p, backoff->id);
4003        encode_hoid(&p, end, backoff->begin);
4004        encode_hoid(&p, end, backoff->end);
4005        BUG_ON(p != end);
4006
4007        msg->front.iov_len = p - msg->front.iov_base;
4008        msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4009        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4010
4011        return msg;
4012}
4013
4014static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4015{
4016        struct ceph_spg_mapping *spg;
4017        struct ceph_osd_backoff *backoff;
4018        struct ceph_msg *msg;
4019
4020        dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4021             m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4022
4023        spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4024        if (!spg) {
4025                spg = alloc_spg_mapping();
4026                if (!spg) {
4027                        pr_err("%s failed to allocate spg\n", __func__);
4028                        return;
4029                }
4030                spg->spgid = m->spgid; /* struct */
4031                insert_spg_mapping(&osd->o_backoff_mappings, spg);
4032        }
4033
4034        backoff = alloc_backoff();
4035        if (!backoff) {
4036                pr_err("%s failed to allocate backoff\n", __func__);
4037                return;
4038        }
4039        backoff->spgid = m->spgid; /* struct */
4040        backoff->id = m->id;
4041        backoff->begin = m->begin;
4042        m->begin = NULL; /* backoff now owns this */
4043        backoff->end = m->end;
4044        m->end = NULL;   /* ditto */
4045
4046        insert_backoff(&spg->backoffs, backoff);
4047        insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4048
4049        /*
4050         * Ack with original backoff's epoch so that the OSD can
4051         * discard this if there was a PG split.
4052         */
4053        msg = create_backoff_message(backoff, m->map_epoch);
4054        if (!msg) {
4055                pr_err("%s failed to allocate msg\n", __func__);
4056                return;
4057        }
4058        ceph_con_send(&osd->o_con, msg);
4059}
4060
4061static bool target_contained_by(const struct ceph_osd_request_target *t,
4062                                const struct ceph_hobject_id *begin,
4063                                const struct ceph_hobject_id *end)
4064{
4065        struct ceph_hobject_id hoid;
4066        int cmp;
4067
4068        hoid_fill_from_target(&hoid, t);
4069        cmp = hoid_compare(&hoid, begin);
4070        return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4071}
4072
4073static void handle_backoff_unblock(struct ceph_osd *osd,
4074                                   const struct MOSDBackoff *m)
4075{
4076        struct ceph_spg_mapping *spg;
4077        struct ceph_osd_backoff *backoff;
4078        struct rb_node *n;
4079
4080        dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4081             m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4082
4083        backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4084        if (!backoff) {
4085                pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4086                       __func__, osd->o_osd, m->spgid.pgid.pool,
4087                       m->spgid.pgid.seed, m->spgid.shard, m->id);
4088                return;
4089        }
4090
4091        if (hoid_compare(backoff->begin, m->begin) &&
4092            hoid_compare(backoff->end, m->end)) {
4093                pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4094                       __func__, osd->o_osd, m->spgid.pgid.pool,
4095                       m->spgid.pgid.seed, m->spgid.shard, m->id);
4096                /* unblock it anyway... */
4097        }
4098
4099        spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4100        BUG_ON(!spg);
4101
4102        erase_backoff(&spg->backoffs, backoff);
4103        erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4104        free_backoff(backoff);
4105
4106        if (RB_EMPTY_ROOT(&spg->backoffs)) {
4107                erase_spg_mapping(&osd->o_backoff_mappings, spg);
4108                free_spg_mapping(spg);
4109        }
4110
4111        for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4112                struct ceph_osd_request *req =
4113                    rb_entry(n, struct ceph_osd_request, r_node);
4114
4115                if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4116                        /*
4117                         * Match against @m, not @backoff -- the PG may
4118                         * have split on the OSD.
4119                         */
4120                        if (target_contained_by(&req->r_t, m->begin, m->end)) {
4121                                /*
4122                                 * If no other installed backoff applies,
4123                                 * resend.
4124                                 */
4125                                send_request(req);
4126                        }
4127                }
4128        }
4129}
4130
4131static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4132{
4133        struct ceph_osd_client *osdc = osd->o_osdc;
4134        struct MOSDBackoff m;
4135        int ret;
4136
4137        down_read(&osdc->lock);
4138        if (!osd_registered(osd)) {
4139                dout("%s osd%d unknown\n", __func__, osd->o_osd);
4140                up_read(&osdc->lock);
4141                return;
4142        }
4143        WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4144
4145        mutex_lock(&osd->lock);
4146        ret = decode_MOSDBackoff(msg, &m);
4147        if (ret) {
4148                pr_err("failed to decode MOSDBackoff: %d\n", ret);
4149                ceph_msg_dump(msg);
4150                goto out_unlock;
4151        }
4152
4153        switch (m.op) {
4154        case CEPH_OSD_BACKOFF_OP_BLOCK:
4155                handle_backoff_block(osd, &m);
4156                break;
4157        case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4158                handle_backoff_unblock(osd, &m);
4159                break;
4160        default:
4161                pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4162        }
4163
4164        free_hoid(m.begin);
4165        free_hoid(m.end);
4166
4167out_unlock:
4168        mutex_unlock(&osd->lock);
4169        up_read(&osdc->lock);
4170}
4171
4172/*
4173 * Process osd watch notifications
4174 */
4175static void handle_watch_notify(struct ceph_osd_client *osdc,
4176                                struct ceph_msg *msg)
4177{
4178        void *p = msg->front.iov_base;
4179        void *const end = p + msg->front.iov_len;
4180        struct ceph_osd_linger_request *lreq;
4181        struct linger_work *lwork;
4182        u8 proto_ver, opcode;
4183        u64 cookie, notify_id;
4184        u64 notifier_id = 0;
4185        s32 return_code = 0;
4186        void *payload = NULL;
4187        u32 payload_len = 0;
4188
4189        ceph_decode_8_safe(&p, end, proto_ver, bad);
4190        ceph_decode_8_safe(&p, end, opcode, bad);
4191        ceph_decode_64_safe(&p, end, cookie, bad);
4192        p += 8; /* skip ver */
4193        ceph_decode_64_safe(&p, end, notify_id, bad);
4194
4195        if (proto_ver >= 1) {
4196                ceph_decode_32_safe(&p, end, payload_len, bad);
4197                ceph_decode_need(&p, end, payload_len, bad);
4198                payload = p;
4199                p += payload_len;
4200        }
4201
4202        if (le16_to_cpu(msg->hdr.version) >= 2)
4203                ceph_decode_32_safe(&p, end, return_code, bad);
4204
4205        if (le16_to_cpu(msg->hdr.version) >= 3)
4206                ceph_decode_64_safe(&p, end, notifier_id, bad);
4207
4208        down_read(&osdc->lock);
4209        lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4210        if (!lreq) {
4211                dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4212                     cookie);
4213                goto out_unlock_osdc;
4214        }
4215
4216        mutex_lock(&lreq->lock);
4217        dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4218             opcode, cookie, lreq, lreq->is_watch);
4219        if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4220                if (!lreq->last_error) {
4221                        lreq->last_error = -ENOTCONN;
4222                        queue_watch_error(lreq);
4223                }
4224        } else if (!lreq->is_watch) {
4225                /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4226                if (lreq->notify_id && lreq->notify_id != notify_id) {
4227                        dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4228                             lreq->notify_id, notify_id);
4229                } else if (!completion_done(&lreq->notify_finish_wait)) {
4230                        struct ceph_msg_data *data =
4231                            list_first_entry_or_null(&msg->data,
4232                                                     struct ceph_msg_data,
4233                                                     links);
4234
4235                        if (data) {
4236                                if (lreq->preply_pages) {
4237                                        WARN_ON(data->type !=
4238                                                        CEPH_MSG_DATA_PAGES);
4239                                        *lreq->preply_pages = data->pages;
4240                                        *lreq->preply_len = data->length;
4241                                } else {
4242                                        ceph_release_page_vector(data->pages,
4243                                               calc_pages_for(0, data->length));
4244                                }
4245                        }
4246                        lreq->notify_finish_error = return_code;
4247                        complete_all(&lreq->notify_finish_wait);
4248                }
4249        } else {
4250                /* CEPH_WATCH_EVENT_NOTIFY */
4251                lwork = lwork_alloc(lreq, do_watch_notify);
4252                if (!lwork) {
4253                        pr_err("failed to allocate notify-lwork\n");
4254                        goto out_unlock_lreq;
4255                }
4256
4257                lwork->notify.notify_id = notify_id;
4258                lwork->notify.notifier_id = notifier_id;
4259                lwork->notify.payload = payload;
4260                lwork->notify.payload_len = payload_len;
4261                lwork->notify.msg = ceph_msg_get(msg);
4262                lwork_queue(lwork);
4263        }
4264
4265out_unlock_lreq:
4266        mutex_unlock(&lreq->lock);
4267out_unlock_osdc:
4268        up_read(&osdc->lock);
4269        return;
4270
4271bad:
4272        pr_err("osdc handle_watch_notify corrupt msg\n");
4273}
4274
4275/*
4276 * Register request, send initial attempt.
4277 */
4278int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4279                            struct ceph_osd_request *req,
4280                            bool nofail)
4281{
4282        down_read(&osdc->lock);
4283        submit_request(req, false);
4284        up_read(&osdc->lock);
4285
4286        return 0;
4287}
4288EXPORT_SYMBOL(ceph_osdc_start_request);
4289
4290/*
4291 * Unregister a registered request.  The request is not completed:
4292 * ->r_result isn't set and __complete_request() isn't called.
4293 */
4294void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4295{
4296        struct ceph_osd_client *osdc = req->r_osdc;
4297
4298        down_write(&osdc->lock);
4299        if (req->r_osd)
4300                cancel_request(req);
4301        up_write(&osdc->lock);
4302}
4303EXPORT_SYMBOL(ceph_osdc_cancel_request);
4304
4305/*
4306 * @timeout: in jiffies, 0 means "wait forever"
4307 */
4308static int wait_request_timeout(struct ceph_osd_request *req,
4309                                unsigned long timeout)
4310{
4311        long left;
4312
4313        dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4314        left = wait_for_completion_killable_timeout(&req->r_completion,
4315                                                ceph_timeout_jiffies(timeout));
4316        if (left <= 0) {
4317                left = left ?: -ETIMEDOUT;
4318                ceph_osdc_cancel_request(req);
4319        } else {
4320                left = req->r_result; /* completed */
4321        }
4322
4323        return left;
4324}
4325
4326/*
4327 * wait for a request to complete
4328 */
4329int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4330                           struct ceph_osd_request *req)
4331{
4332        return wait_request_timeout(req, 0);
4333}
4334EXPORT_SYMBOL(ceph_osdc_wait_request);
4335
4336/*
4337 * sync - wait for all in-flight requests to flush.  avoid starvation.
4338 */
4339void ceph_osdc_sync(struct ceph_osd_client *osdc)
4340{
4341        struct rb_node *n, *p;
4342        u64 last_tid = atomic64_read(&osdc->last_tid);
4343
4344again:
4345        down_read(&osdc->lock);
4346        for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4347                struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4348
4349                mutex_lock(&osd->lock);
4350                for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4351                        struct ceph_osd_request *req =
4352                            rb_entry(p, struct ceph_osd_request, r_node);
4353
4354                        if (req->r_tid > last_tid)
4355                                break;
4356
4357                        if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4358                                continue;
4359
4360                        ceph_osdc_get_request(req);
4361                        mutex_unlock(&osd->lock);
4362                        up_read(&osdc->lock);
4363                        dout("%s waiting on req %p tid %llu last_tid %llu\n",
4364                             __func__, req, req->r_tid, last_tid);
4365                        wait_for_completion(&req->r_completion);
4366                        ceph_osdc_put_request(req);
4367                        goto again;
4368                }
4369
4370                mutex_unlock(&osd->lock);
4371        }
4372
4373        up_read(&osdc->lock);
4374        dout("%s done last_tid %llu\n", __func__, last_tid);
4375}
4376EXPORT_SYMBOL(ceph_osdc_sync);
4377
4378static struct ceph_osd_request *
4379alloc_linger_request(struct ceph_osd_linger_request *lreq)
4380{
4381        struct ceph_osd_request *req;
4382
4383        req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4384        if (!req)
4385                return NULL;
4386
4387        ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4388        ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4389
4390        if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4391                ceph_osdc_put_request(req);
4392                return NULL;
4393        }
4394
4395        return req;
4396}
4397
4398/*
4399 * Returns a handle, caller owns a ref.
4400 */
4401struct ceph_osd_linger_request *
4402ceph_osdc_watch(struct ceph_osd_client *osdc,
4403                struct ceph_object_id *oid,
4404                struct ceph_object_locator *oloc,
4405                rados_watchcb2_t wcb,
4406                rados_watcherrcb_t errcb,
4407                void *data)
4408{
4409        struct ceph_osd_linger_request *lreq;
4410        int ret;
4411
4412        lreq = linger_alloc(osdc);
4413        if (!lreq)
4414                return ERR_PTR(-ENOMEM);
4415
4416        lreq->is_watch = true;
4417        lreq->wcb = wcb;
4418        lreq->errcb = errcb;
4419        lreq->data = data;
4420        lreq->watch_valid_thru = jiffies;
4421
4422        ceph_oid_copy(&lreq->t.base_oid, oid);
4423        ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4424        lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4425        ktime_get_real_ts(&lreq->mtime);
4426
4427        lreq->reg_req = alloc_linger_request(lreq);
4428        if (!lreq->reg_req) {
4429                ret = -ENOMEM;
4430                goto err_put_lreq;
4431        }
4432
4433        lreq->ping_req = alloc_linger_request(lreq);
4434        if (!lreq->ping_req) {
4435                ret = -ENOMEM;
4436                goto err_put_lreq;
4437        }
4438
4439        down_write(&osdc->lock);
4440        linger_register(lreq); /* before osd_req_op_* */
4441        osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
4442                              CEPH_OSD_WATCH_OP_WATCH);
4443        osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
4444                              CEPH_OSD_WATCH_OP_PING);
4445        linger_submit(lreq);
4446        up_write(&osdc->lock);
4447
4448        ret = linger_reg_commit_wait(lreq);
4449        if (ret) {
4450                linger_cancel(lreq);
4451                goto err_put_lreq;
4452        }
4453
4454        return lreq;
4455
4456err_put_lreq:
4457        linger_put(lreq);
4458        return ERR_PTR(ret);
4459}
4460EXPORT_SYMBOL(ceph_osdc_watch);
4461
4462/*
4463 * Releases a ref.
4464 *
4465 * Times out after mount_timeout to preserve rbd unmap behaviour
4466 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4467 * with mount_timeout").
4468 */
4469int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4470                      struct ceph_osd_linger_request *lreq)
4471{
4472        struct ceph_options *opts = osdc->client->options;
4473        struct ceph_osd_request *req;
4474        int ret;
4475
4476        req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4477        if (!req)
4478                return -ENOMEM;
4479
4480        ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4481        ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4482        req->r_flags = CEPH_OSD_FLAG_WRITE;
4483        ktime_get_real_ts(&req->r_mtime);
4484        osd_req_op_watch_init(req, 0, lreq->linger_id,
4485                              CEPH_OSD_WATCH_OP_UNWATCH);
4486
4487        ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4488        if (ret)
4489                goto out_put_req;
4490
4491        ceph_osdc_start_request(osdc, req, false);
4492        linger_cancel(lreq);
4493        linger_put(lreq);
4494        ret = wait_request_timeout(req, opts->mount_timeout);
4495
4496out_put_req:
4497        ceph_osdc_put_request(req);
4498        return ret;
4499}
4500EXPORT_SYMBOL(ceph_osdc_unwatch);
4501
4502static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4503                                      u64 notify_id, u64 cookie, void *payload,
4504                                      size_t payload_len)
4505{
4506        struct ceph_osd_req_op *op;
4507        struct ceph_pagelist *pl;
4508        int ret;
4509
4510        op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4511
4512        pl = kmalloc(sizeof(*pl), GFP_NOIO);
4513        if (!pl)
4514                return -ENOMEM;
4515
4516        ceph_pagelist_init(pl);
4517        ret = ceph_pagelist_encode_64(pl, notify_id);
4518        ret |= ceph_pagelist_encode_64(pl, cookie);
4519        if (payload) {
4520                ret |= ceph_pagelist_encode_32(pl, payload_len);
4521                ret |= ceph_pagelist_append(pl, payload, payload_len);
4522        } else {
4523                ret |= ceph_pagelist_encode_32(pl, 0);
4524        }
4525        if (ret) {
4526                ceph_pagelist_release(pl);
4527                return -ENOMEM;
4528        }
4529
4530        ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4531        op->indata_len = pl->length;
4532        return 0;
4533}
4534
4535int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4536                         struct ceph_object_id *oid,
4537                         struct ceph_object_locator *oloc,
4538                         u64 notify_id,
4539                         u64 cookie,
4540                         void *payload,
4541                         size_t payload_len)
4542{
4543        struct ceph_osd_request *req;
4544        int ret;
4545
4546        req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4547        if (!req)
4548                return -ENOMEM;
4549
4550        ceph_oid_copy(&req->r_base_oid, oid);
4551        ceph_oloc_copy(&req->r_base_oloc, oloc);
4552        req->r_flags = CEPH_OSD_FLAG_READ;
4553
4554        ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4555        if (ret)
4556                goto out_put_req;
4557
4558        ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4559                                         payload_len);
4560        if (ret)
4561                goto out_put_req;
4562
4563        ceph_osdc_start_request(osdc, req, false);
4564        ret = ceph_osdc_wait_request(osdc, req);
4565
4566out_put_req:
4567        ceph_osdc_put_request(req);
4568        return ret;
4569}
4570EXPORT_SYMBOL(ceph_osdc_notify_ack);
4571
4572static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4573                                  u64 cookie, u32 prot_ver, u32 timeout,
4574                                  void *payload, size_t payload_len)
4575{
4576        struct ceph_osd_req_op *op;
4577        struct ceph_pagelist *pl;
4578        int ret;
4579
4580        op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4581        op->notify.cookie = cookie;
4582
4583        pl = kmalloc(sizeof(*pl), GFP_NOIO);
4584        if (!pl)
4585                return -ENOMEM;
4586
4587        ceph_pagelist_init(pl);
4588        ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4589        ret |= ceph_pagelist_encode_32(pl, timeout);
4590        ret |= ceph_pagelist_encode_32(pl, payload_len);
4591        ret |= ceph_pagelist_append(pl, payload, payload_len);
4592        if (ret) {
4593                ceph_pagelist_release(pl);
4594                return -ENOMEM;
4595        }
4596
4597        ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4598        op->indata_len = pl->length;
4599        return 0;
4600}
4601
4602/*
4603 * @timeout: in seconds
4604 *
4605 * @preply_{pages,len} are initialized both on success and error.
4606 * The caller is responsible for:
4607 *
4608 *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4609 */
4610int ceph_osdc_notify(struct ceph_osd_client *osdc,
4611                     struct ceph_object_id *oid,
4612                     struct ceph_object_locator *oloc,
4613                     void *payload,
4614                     size_t payload_len,
4615                     u32 timeout,
4616                     struct page ***preply_pages,
4617                     size_t *preply_len)
4618{
4619        struct ceph_osd_linger_request *lreq;
4620        struct page **pages;
4621        int ret;
4622
4623        WARN_ON(!timeout);
4624        if (preply_pages) {
4625                *preply_pages = NULL;
4626                *preply_len = 0;
4627        }
4628
4629        lreq = linger_alloc(osdc);
4630        if (!lreq)
4631                return -ENOMEM;
4632
4633        lreq->preply_pages = preply_pages;
4634        lreq->preply_len = preply_len;
4635
4636        ceph_oid_copy(&lreq->t.base_oid, oid);
4637        ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4638        lreq->t.flags = CEPH_OSD_FLAG_READ;
4639
4640        lreq->reg_req = alloc_linger_request(lreq);
4641        if (!lreq->reg_req) {
4642                ret = -ENOMEM;
4643                goto out_put_lreq;
4644        }
4645
4646        /* for notify_id */
4647        pages = ceph_alloc_page_vector(1, GFP_NOIO);
4648        if (IS_ERR(pages)) {
4649                ret = PTR_ERR(pages);
4650                goto out_put_lreq;
4651        }
4652
4653        down_write(&osdc->lock);
4654        linger_register(lreq); /* before osd_req_op_* */
4655        ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
4656                                     timeout, payload, payload_len);
4657        if (ret) {
4658                linger_unregister(lreq);
4659                up_write(&osdc->lock);
4660                ceph_release_page_vector(pages, 1);
4661                goto out_put_lreq;
4662        }
4663        ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4664                                                 response_data),
4665                                 pages, PAGE_SIZE, 0, false, true);
4666        linger_submit(lreq);
4667        up_write(&osdc->lock);
4668
4669        ret = linger_reg_commit_wait(lreq);
4670        if (!ret)
4671                ret = linger_notify_finish_wait(lreq);
4672        else
4673                dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4674
4675        linger_cancel(lreq);
4676out_put_lreq:
4677        linger_put(lreq);
4678        return ret;
4679}
4680EXPORT_SYMBOL(ceph_osdc_notify);
4681
4682/*
4683 * Return the number of milliseconds since the watch was last
4684 * confirmed, or an error.  If there is an error, the watch is no
4685 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4686 */
4687int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4688                          struct ceph_osd_linger_request *lreq)
4689{
4690        unsigned long stamp, age;
4691        int ret;
4692
4693        down_read(&osdc->lock);
4694        mutex_lock(&lreq->lock);
4695        stamp = lreq->watch_valid_thru;
4696        if (!list_empty(&lreq->pending_lworks)) {
4697                struct linger_work *lwork =
4698                    list_first_entry(&lreq->pending_lworks,
4699                                     struct linger_work,
4700                                     pending_item);
4701
4702                if (time_before(lwork->queued_stamp, stamp))
4703                        stamp = lwork->queued_stamp;
4704        }
4705        age = jiffies - stamp;
4706        dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4707             lreq, lreq->linger_id, age, lreq->last_error);
4708        /* we are truncating to msecs, so return a safe upper bound */
4709        ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4710
4711        mutex_unlock(&lreq->lock);
4712        up_read(&osdc->lock);
4713        return ret;
4714}
4715
4716static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4717{
4718        u8 struct_v;
4719        u32 struct_len;
4720        int ret;
4721
4722        ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4723                                  &struct_v, &struct_len);
4724        if (ret)
4725                return ret;
4726
4727        ceph_decode_copy(p, &item->name, sizeof(item->name));
4728        item->cookie = ceph_decode_64(p);
4729        *p += 4; /* skip timeout_seconds */
4730        if (struct_v >= 2) {
4731                ceph_decode_copy(p, &item->addr, sizeof(item->addr));
4732                ceph_decode_addr(&item->addr);
4733        }
4734
4735        dout("%s %s%llu cookie %llu addr %s\n", __func__,
4736             ENTITY_NAME(item->name), item->cookie,
4737             ceph_pr_addr(&item->addr.in_addr));
4738        return 0;
4739}
4740
4741static int decode_watchers(void **p, void *end,
4742                           struct ceph_watch_item **watchers,
4743                           u32 *num_watchers)
4744{
4745        u8 struct_v;
4746        u32 struct_len;
4747        int i;
4748        int ret;
4749
4750        ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4751                                  &struct_v, &struct_len);
4752        if (ret)
4753                return ret;
4754
4755        *num_watchers = ceph_decode_32(p);
4756        *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4757        if (!*watchers)
4758                return -ENOMEM;
4759
4760        for (i = 0; i < *num_watchers; i++) {
4761                ret = decode_watcher(p, end, *watchers + i);
4762                if (ret) {
4763                        kfree(*watchers);
4764                        return ret;
4765                }
4766        }
4767
4768        return 0;
4769}
4770
4771/*
4772 * On success, the caller is responsible for:
4773 *
4774 *     kfree(watchers);
4775 */
4776int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4777                            struct ceph_object_id *oid,
4778                            struct ceph_object_locator *oloc,
4779                            struct ceph_watch_item **watchers,
4780                            u32 *num_watchers)
4781{
4782        struct ceph_osd_request *req;
4783        struct page **pages;
4784        int ret;
4785
4786        req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4787        if (!req)
4788                return -ENOMEM;
4789
4790        ceph_oid_copy(&req->r_base_oid, oid);
4791        ceph_oloc_copy(&req->r_base_oloc, oloc);
4792        req->r_flags = CEPH_OSD_FLAG_READ;
4793
4794        ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4795        if (ret)
4796                goto out_put_req;
4797
4798        pages = ceph_alloc_page_vector(1, GFP_NOIO);
4799        if (IS_ERR(pages)) {
4800                ret = PTR_ERR(pages);
4801                goto out_put_req;
4802        }
4803
4804        osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4805        ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4806                                                 response_data),
4807                                 pages, PAGE_SIZE, 0, false, true);
4808
4809        ceph_osdc_start_request(osdc, req, false);
4810        ret = ceph_osdc_wait_request(osdc, req);
4811        if (ret >= 0) {
4812                void *p = page_address(pages[0]);
4813                void *const end = p + req->r_ops[0].outdata_len;
4814
4815                ret = decode_watchers(&p, end, watchers, num_watchers);
4816        }
4817
4818out_put_req:
4819        ceph_osdc_put_request(req);
4820        return ret;
4821}
4822EXPORT_SYMBOL(ceph_osdc_list_watchers);
4823
4824/*
4825 * Call all pending notify callbacks - for use after a watch is
4826 * unregistered, to make sure no more callbacks for it will be invoked
4827 */
4828void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4829{
4830        dout("%s osdc %p\n", __func__, osdc);
4831        flush_workqueue(osdc->notify_wq);
4832}
4833EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4834
4835void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4836{
4837        down_read(&osdc->lock);
4838        maybe_request_map(osdc);
4839        up_read(&osdc->lock);
4840}
4841EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4842
4843/*
4844 * Execute an OSD class method on an object.
4845 *
4846 * @flags: CEPH_OSD_FLAG_*
4847 * @resp_len: in/out param for reply length
4848 */
4849int ceph_osdc_call(struct ceph_osd_client *osdc,
4850                   struct ceph_object_id *oid,
4851                   struct ceph_object_locator *oloc,
4852                   const char *class, const char *method,
4853                   unsigned int flags,
4854                   struct page *req_page, size_t req_len,
4855                   struct page *resp_page, size_t *resp_len)
4856{
4857        struct ceph_osd_request *req;
4858        int ret;
4859
4860        if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4861                return -E2BIG;
4862
4863        req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4864        if (!req)
4865                return -ENOMEM;
4866
4867        ceph_oid_copy(&req->r_base_oid, oid);
4868        ceph_oloc_copy(&req->r_base_oloc, oloc);
4869        req->r_flags = flags;
4870
4871        ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4872        if (ret)
4873                goto out_put_req;
4874
4875        osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4876        if (req_page)
4877                osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4878                                                  0, false, false);
4879        if (resp_page)
4880                osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4881                                                   *resp_len, 0, false, false);
4882
4883        ceph_osdc_start_request(osdc, req, false);
4884        ret = ceph_osdc_wait_request(osdc, req);
4885        if (ret >= 0) {
4886                ret = req->r_ops[0].rval;
4887                if (resp_page)
4888                        *resp_len = req->r_ops[0].outdata_len;
4889        }
4890
4891out_put_req:
4892        ceph_osdc_put_request(req);
4893        return ret;
4894}
4895EXPORT_SYMBOL(ceph_osdc_call);
4896
4897/*
4898 * init, shutdown
4899 */
4900int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4901{
4902        int err;
4903
4904        dout("init\n");
4905        osdc->client = client;
4906        init_rwsem(&osdc->lock);
4907        osdc->osds = RB_ROOT;
4908        INIT_LIST_HEAD(&osdc->osd_lru);
4909        spin_lock_init(&osdc->osd_lru_lock);
4910        osd_init(&osdc->homeless_osd);
4911        osdc->homeless_osd.o_osdc = osdc;
4912        osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4913        osdc->last_linger_id = CEPH_LINGER_ID_START;
4914        osdc->linger_requests = RB_ROOT;
4915        osdc->map_checks = RB_ROOT;
4916        osdc->linger_map_checks = RB_ROOT;
4917        INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4918        INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4919
4920        err = -ENOMEM;
4921        osdc->osdmap = ceph_osdmap_alloc();
4922        if (!osdc->osdmap)
4923                goto out;
4924
4925        osdc->req_mempool = mempool_create_slab_pool(10,
4926                                                     ceph_osd_request_cache);
4927        if (!osdc->req_mempool)
4928                goto out_map;
4929
4930        err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4931                                PAGE_SIZE, 10, true, "osd_op");
4932        if (err < 0)
4933                goto out_mempool;
4934        err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4935                                PAGE_SIZE, 10, true, "osd_op_reply");
4936        if (err < 0)
4937                goto out_msgpool;
4938
4939        err = -ENOMEM;
4940        osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4941        if (!osdc->notify_wq)
4942                goto out_msgpool_reply;
4943
4944        schedule_delayed_work(&osdc->timeout_work,
4945                              osdc->client->options->osd_keepalive_timeout);
4946        schedule_delayed_work(&osdc->osds_timeout_work,
4947            round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4948
4949        return 0;
4950
4951out_msgpool_reply:
4952        ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4953out_msgpool:
4954        ceph_msgpool_destroy(&osdc->msgpool_op);
4955out_mempool:
4956        mempool_destroy(osdc->req_mempool);
4957out_map:
4958        ceph_osdmap_destroy(osdc->osdmap);
4959out:
4960        return err;
4961}
4962
4963void ceph_osdc_stop(struct ceph_osd_client *osdc)
4964{
4965        flush_workqueue(osdc->notify_wq);
4966        destroy_workqueue(osdc->notify_wq);
4967        cancel_delayed_work_sync(&osdc->timeout_work);
4968        cancel_delayed_work_sync(&osdc->osds_timeout_work);
4969
4970        down_write(&osdc->lock);
4971        while (!RB_EMPTY_ROOT(&osdc->osds)) {
4972                struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4973                                                struct ceph_osd, o_node);
4974                close_osd(osd);
4975        }
4976        up_write(&osdc->lock);
4977        WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
4978        osd_cleanup(&osdc->homeless_osd);
4979
4980        WARN_ON(!list_empty(&osdc->osd_lru));
4981        WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4982        WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4983        WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4984        WARN_ON(atomic_read(&osdc->num_requests));
4985        WARN_ON(atomic_read(&osdc->num_homeless));
4986
4987        ceph_osdmap_destroy(osdc->osdmap);
4988        mempool_destroy(osdc->req_mempool);
4989        ceph_msgpool_destroy(&osdc->msgpool_op);
4990        ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4991}
4992
4993/*
4994 * Read some contiguous pages.  If we cross a stripe boundary, shorten
4995 * *plen.  Return number of bytes read, or error.
4996 */
4997int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4998                        struct ceph_vino vino, struct ceph_file_layout *layout,
4999                        u64 off, u64 *plen,
5000                        u32 truncate_seq, u64 truncate_size,
5001                        struct page **pages, int num_pages, int page_align)
5002{
5003        struct ceph_osd_request *req;
5004        int rc = 0;
5005
5006        dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5007             vino.snap, off, *plen);
5008        req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5009                                    CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5010                                    NULL, truncate_seq, truncate_size,
5011                                    false);
5012        if (IS_ERR(req))
5013                return PTR_ERR(req);
5014
5015        /* it may be a short read due to an object boundary */
5016        osd_req_op_extent_osd_data_pages(req, 0,
5017                                pages, *plen, page_align, false, false);
5018
5019        dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
5020             off, *plen, *plen, page_align);
5021
5022        rc = ceph_osdc_start_request(osdc, req, false);
5023        if (!rc)
5024                rc = ceph_osdc_wait_request(osdc, req);
5025
5026        ceph_osdc_put_request(req);
5027        dout("readpages result %d\n", rc);
5028        return rc;
5029}
5030EXPORT_SYMBOL(ceph_osdc_readpages);
5031
5032/*
5033 * do a synchronous write on N pages
5034 */
5035int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5036                         struct ceph_file_layout *layout,
5037                         struct ceph_snap_context *snapc,
5038                         u64 off, u64 len,
5039                         u32 truncate_seq, u64 truncate_size,
5040                         struct timespec *mtime,
5041                         struct page **pages, int num_pages)
5042{
5043        struct ceph_osd_request *req;
5044        int rc = 0;
5045        int page_align = off & ~PAGE_MASK;
5046
5047        req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5048                                    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5049                                    snapc, truncate_seq, truncate_size,
5050                                    true);
5051        if (IS_ERR(req))
5052                return PTR_ERR(req);
5053
5054        /* it may be a short write due to an object boundary */
5055        osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5056                                false, false);
5057        dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5058
5059        req->r_mtime = *mtime;
5060        rc = ceph_osdc_start_request(osdc, req, true);
5061        if (!rc)
5062                rc = ceph_osdc_wait_request(osdc, req);
5063
5064        ceph_osdc_put_request(req);
5065        if (rc == 0)
5066                rc = len;
5067        dout("writepages result %d\n", rc);
5068        return rc;
5069}
5070EXPORT_SYMBOL(ceph_osdc_writepages);
5071
5072int ceph_osdc_setup(void)
5073{
5074        size_t size = sizeof(struct ceph_osd_request) +
5075            CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5076
5077        BUG_ON(ceph_osd_request_cache);
5078        ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5079                                                   0, 0, NULL);
5080
5081        return ceph_osd_request_cache ? 0 : -ENOMEM;
5082}
5083EXPORT_SYMBOL(ceph_osdc_setup);
5084
5085void ceph_osdc_cleanup(void)
5086{
5087        BUG_ON(!ceph_osd_request_cache);
5088        kmem_cache_destroy(ceph_osd_request_cache);
5089        ceph_osd_request_cache = NULL;
5090}
5091EXPORT_SYMBOL(ceph_osdc_cleanup);
5092
5093/*
5094 * handle incoming message
5095 */
5096static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5097{
5098        struct ceph_osd *osd = con->private;
5099        struct ceph_osd_client *osdc = osd->o_osdc;
5100        int type = le16_to_cpu(msg->hdr.type);
5101
5102        switch (type) {
5103        case CEPH_MSG_OSD_MAP:
5104                ceph_osdc_handle_map(osdc, msg);
5105                break;
5106        case CEPH_MSG_OSD_OPREPLY:
5107                handle_reply(osd, msg);
5108                break;
5109        case CEPH_MSG_OSD_BACKOFF:
5110                handle_backoff(osd, msg);
5111                break;
5112        case CEPH_MSG_WATCH_NOTIFY:
5113                handle_watch_notify(osdc, msg);
5114                break;
5115
5116        default:
5117                pr_err("received unknown message type %d %s\n", type,
5118                       ceph_msg_type_name(type));
5119        }
5120
5121        ceph_msg_put(msg);
5122}
5123
5124/*
5125 * Lookup and return message for incoming reply.  Don't try to do
5126 * anything about a larger than preallocated data portion of the
5127 * message at the moment - for now, just skip the message.
5128 */
5129static struct ceph_msg *get_reply(struct ceph_connection *con,
5130                                  struct ceph_msg_header *hdr,
5131                                  int *skip)
5132{
5133        struct ceph_osd *osd = con->private;
5134        struct ceph_osd_client *osdc = osd->o_osdc;
5135        struct ceph_msg *m = NULL;
5136        struct ceph_osd_request *req;
5137        int front_len = le32_to_cpu(hdr->front_len);
5138        int data_len = le32_to_cpu(hdr->data_len);
5139        u64 tid = le64_to_cpu(hdr->tid);
5140
5141        down_read(&osdc->lock);
5142        if (!osd_registered(osd)) {
5143                dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5144                *skip = 1;
5145                goto out_unlock_osdc;
5146        }
5147        WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5148
5149        mutex_lock(&osd->lock);
5150        req = lookup_request(&osd->o_requests, tid);
5151        if (!req) {
5152                dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5153                     osd->o_osd, tid);
5154                *skip = 1;
5155                goto out_unlock_session;
5156        }
5157
5158        ceph_msg_revoke_incoming(req->r_reply);
5159
5160        if (front_len > req->r_reply->front_alloc_len) {
5161                pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5162                        __func__, osd->o_osd, req->r_tid, front_len,
5163                        req->r_reply->front_alloc_len);
5164                m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5165                                 false);
5166                if (!m)
5167                        goto out_unlock_session;
5168                ceph_msg_put(req->r_reply);
5169                req->r_reply = m;
5170        }
5171
5172        if (data_len > req->r_reply->data_length) {
5173                pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5174                        __func__, osd->o_osd, req->r_tid, data_len,
5175                        req->r_reply->data_length);
5176                m = NULL;
5177                *skip = 1;
5178                goto out_unlock_session;
5179        }
5180
5181        m = ceph_msg_get(req->r_reply);
5182        dout("get_reply tid %lld %p\n", tid, m);
5183
5184out_unlock_session:
5185        mutex_unlock(&osd->lock);
5186out_unlock_osdc:
5187        up_read(&osdc->lock);
5188        return m;
5189}
5190
5191/*
5192 * TODO: switch to a msg-owned pagelist
5193 */
5194static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5195{
5196        struct ceph_msg *m;
5197        int type = le16_to_cpu(hdr->type);
5198        u32 front_len = le32_to_cpu(hdr->front_len);
5199        u32 data_len = le32_to_cpu(hdr->data_len);
5200
5201        m = ceph_msg_new(type, front_len, GFP_NOIO, false);
5202        if (!m)
5203                return NULL;
5204
5205        if (data_len) {
5206                struct page **pages;
5207                struct ceph_osd_data osd_data;
5208
5209                pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5210                                               GFP_NOIO);
5211                if (IS_ERR(pages)) {
5212                        ceph_msg_put(m);
5213                        return NULL;
5214                }
5215
5216                ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
5217                                         false);
5218                ceph_osdc_msg_data_add(m, &osd_data);
5219        }
5220
5221        return m;
5222}
5223
5224static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5225                                  struct ceph_msg_header *hdr,
5226                                  int *skip)
5227{
5228        struct ceph_osd *osd = con->private;
5229        int type = le16_to_cpu(hdr->type);
5230
5231        *skip = 0;
5232        switch (type) {
5233        case CEPH_MSG_OSD_MAP:
5234        case CEPH_MSG_OSD_BACKOFF:
5235        case CEPH_MSG_WATCH_NOTIFY:
5236                return alloc_msg_with_page_vector(hdr);
5237        case CEPH_MSG_OSD_OPREPLY:
5238                return get_reply(con, hdr, skip);
5239        default:
5240                pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5241                        osd->o_osd, type);
5242                *skip = 1;
5243                return NULL;
5244        }
5245}
5246
5247/*
5248 * Wrappers to refcount containing ceph_osd struct
5249 */
5250static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5251{
5252        struct ceph_osd *osd = con->private;
5253        if (get_osd(osd))
5254                return con;
5255        return NULL;
5256}
5257
5258static void put_osd_con(struct ceph_connection *con)
5259{
5260        struct ceph_osd *osd = con->private;
5261        put_osd(osd);
5262}
5263
5264/*
5265 * authentication
5266 */
5267/*
5268 * Note: returned pointer is the address of a structure that's
5269 * managed separately.  Caller must *not* attempt to free it.
5270 */
5271static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5272                                        int *proto, int force_new)
5273{
5274        struct ceph_osd *o = con->private;
5275        struct ceph_osd_client *osdc = o->o_osdc;
5276        struct ceph_auth_client *ac = osdc->client->monc.auth;
5277        struct ceph_auth_handshake *auth = &o->o_auth;
5278
5279        if (force_new && auth->authorizer) {
5280                ceph_auth_destroy_authorizer(auth->authorizer);
5281                auth->authorizer = NULL;
5282        }
5283        if (!auth->authorizer) {
5284                int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5285                                                      auth);
5286                if (ret)
5287                        return ERR_PTR(ret);
5288        } else {
5289                int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5290                                                     auth);
5291                if (ret)
5292                        return ERR_PTR(ret);
5293        }
5294        *proto = ac->protocol;
5295
5296        return auth;
5297}
5298
5299
5300static int verify_authorizer_reply(struct ceph_connection *con)
5301{
5302        struct ceph_osd *o = con->private;
5303        struct ceph_osd_client *osdc = o->o_osdc;
5304        struct ceph_auth_client *ac = osdc->client->monc.auth;
5305
5306        return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5307}
5308
5309static int invalidate_authorizer(struct ceph_connection *con)
5310{
5311        struct ceph_osd *o = con->private;
5312        struct ceph_osd_client *osdc = o->o_osdc;
5313        struct ceph_auth_client *ac = osdc->client->monc.auth;
5314
5315        ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5316        return ceph_monc_validate_auth(&osdc->client->monc);
5317}
5318
5319static void osd_reencode_message(struct ceph_msg *msg)
5320{
5321        int type = le16_to_cpu(msg->hdr.type);
5322
5323        if (type == CEPH_MSG_OSD_OP)
5324                encode_request_finish(msg);
5325}
5326
5327static int osd_sign_message(struct ceph_msg *msg)
5328{
5329        struct ceph_osd *o = msg->con->private;
5330        struct ceph_auth_handshake *auth = &o->o_auth;
5331
5332        return ceph_auth_sign_message(auth, msg);
5333}
5334
5335static int osd_check_message_signature(struct ceph_msg *msg)
5336{
5337        struct ceph_osd *o = msg->con->private;
5338        struct ceph_auth_handshake *auth = &o->o_auth;
5339
5340        return ceph_auth_check_message_signature(auth, msg);
5341}
5342
5343static const struct ceph_connection_operations osd_con_ops = {
5344        .get = get_osd_con,
5345        .put = put_osd_con,
5346        .dispatch = dispatch,
5347        .get_authorizer = get_authorizer,
5348        .verify_authorizer_reply = verify_authorizer_reply,
5349        .invalidate_authorizer = invalidate_authorizer,
5350        .alloc_msg = alloc_msg,
5351        .reencode_message = osd_reencode_message,
5352        .sign_message = osd_sign_message,
5353        .check_message_signature = osd_check_message_signature,
5354        .fault = osd_fault,
5355};
5356