linux/fs/ceph/mds_client.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _FS_CEPH_MDS_CLIENT_H
   3#define _FS_CEPH_MDS_CLIENT_H
   4
   5#include <linux/completion.h>
   6#include <linux/kref.h>
   7#include <linux/list.h>
   8#include <linux/mutex.h>
   9#include <linux/rbtree.h>
  10#include <linux/spinlock.h>
  11#include <linux/refcount.h>
  12#include <linux/utsname.h>
  13
  14#include <linux/ceph/types.h>
  15#include <linux/ceph/messenger.h>
  16#include <linux/ceph/mdsmap.h>
  17#include <linux/ceph/auth.h>
  18
  19/*
  20 * Some lock dependencies:
  21 *
  22 * session->s_mutex
  23 *         mdsc->mutex
  24 *
  25 *         mdsc->snap_rwsem
  26 *
  27 *         ci->i_ceph_lock
  28 *                 mdsc->snap_flush_lock
  29 *                 mdsc->cap_delay_lock
  30 *
  31 */
  32
  33struct ceph_fs_client;
  34struct ceph_cap;
  35
  36/*
  37 * parsed info about a single inode.  pointers are into the encoded
  38 * on-wire structures within the mds reply message payload.
  39 */
  40struct ceph_mds_reply_info_in {
  41        struct ceph_mds_reply_inode *in;
  42        struct ceph_dir_layout dir_layout;
  43        u32 symlink_len;
  44        char *symlink;
  45        u32 xattr_len;
  46        char *xattr_data;
  47        u64 inline_version;
  48        u32 inline_len;
  49        char *inline_data;
  50        u32 pool_ns_len;
  51        char *pool_ns_data;
  52        u64 max_bytes;
  53        u64 max_files;
  54};
  55
  56struct ceph_mds_reply_dir_entry {
  57        char                          *name;
  58        u32                           name_len;
  59        struct ceph_mds_reply_lease   *lease;
  60        struct ceph_mds_reply_info_in inode;
  61        loff_t                        offset;
  62};
  63
  64/*
  65 * parsed info about an mds reply, including information about
  66 * either: 1) the target inode and/or its parent directory and dentry,
  67 * and directory contents (for readdir results), or
  68 * 2) the file range lock info (for fcntl F_GETLK results).
  69 */
  70struct ceph_mds_reply_info_parsed {
  71        struct ceph_mds_reply_head    *head;
  72
  73        /* trace */
  74        struct ceph_mds_reply_info_in diri, targeti;
  75        struct ceph_mds_reply_dirfrag *dirfrag;
  76        char                          *dname;
  77        u32                           dname_len;
  78        struct ceph_mds_reply_lease   *dlease;
  79
  80        /* extra */
  81        union {
  82                /* for fcntl F_GETLK results */
  83                struct ceph_filelock *filelock_reply;
  84
  85                /* for readdir results */
  86                struct {
  87                        struct ceph_mds_reply_dirfrag *dir_dir;
  88                        size_t                        dir_buf_size;
  89                        int                           dir_nr;
  90                        bool                          dir_end;
  91                        bool                          dir_complete;
  92                        bool                          hash_order;
  93                        bool                          offset_hash;
  94                        struct ceph_mds_reply_dir_entry  *dir_entries;
  95                };
  96
  97                /* for create results */
  98                struct {
  99                        bool has_create_ino;
 100                        u64 ino;
 101                };
 102        };
 103
 104        /* encoded blob describing snapshot contexts for certain
 105           operations (e.g., open) */
 106        void *snapblob;
 107        int snapblob_len;
 108};
 109
 110
 111/*
 112 * cap releases are batched and sent to the MDS en masse.
 113 *
 114 * Account for per-message overhead of mds_cap_release header
 115 * and __le32 for osd epoch barrier trailing field.
 116 */
 117#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - sizeof(u32) -               \
 118                                sizeof(struct ceph_mds_cap_release)) /  \
 119                                sizeof(struct ceph_mds_cap_item))
 120
 121
 122/*
 123 * state associated with each MDS<->client session
 124 */
 125enum {
 126        CEPH_MDS_SESSION_NEW = 1,
 127        CEPH_MDS_SESSION_OPENING = 2,
 128        CEPH_MDS_SESSION_OPEN = 3,
 129        CEPH_MDS_SESSION_HUNG = 4,
 130        CEPH_MDS_SESSION_CLOSING = 5,
 131        CEPH_MDS_SESSION_RESTARTING = 6,
 132        CEPH_MDS_SESSION_RECONNECTING = 7,
 133        CEPH_MDS_SESSION_REJECTED = 8,
 134};
 135
 136struct ceph_mds_session {
 137        struct ceph_mds_client *s_mdsc;
 138        int               s_mds;
 139        int               s_state;
 140        unsigned long     s_ttl;      /* time until mds kills us */
 141        u64               s_seq;      /* incoming msg seq # */
 142        struct mutex      s_mutex;    /* serialize session messages */
 143
 144        struct ceph_connection s_con;
 145
 146        struct ceph_auth_handshake s_auth;
 147
 148        /* protected by s_gen_ttl_lock */
 149        spinlock_t        s_gen_ttl_lock;
 150        u32               s_cap_gen;  /* inc each time we get mds stale msg */
 151        unsigned long     s_cap_ttl;  /* when session caps expire */
 152
 153        /* protected by s_cap_lock */
 154        spinlock_t        s_cap_lock;
 155        struct list_head  s_caps;     /* all caps issued by this session */
 156        int               s_nr_caps, s_trim_caps;
 157        int               s_num_cap_releases;
 158        int               s_cap_reconnect;
 159        int               s_readonly;
 160        struct list_head  s_cap_releases; /* waiting cap_release messages */
 161        struct ceph_cap  *s_cap_iterator;
 162
 163        /* protected by mutex */
 164        struct list_head  s_cap_flushing;     /* inodes w/ flushing caps */
 165        unsigned long     s_renew_requested; /* last time we sent a renew req */
 166        u64               s_renew_seq;
 167
 168        refcount_t          s_ref;
 169        struct list_head  s_waiting;  /* waiting requests */
 170        struct list_head  s_unsafe;   /* unsafe requests */
 171};
 172
 173/*
 174 * modes of choosing which MDS to send a request to
 175 */
 176enum {
 177        USE_ANY_MDS,
 178        USE_RANDOM_MDS,
 179        USE_AUTH_MDS,   /* prefer authoritative mds for this metadata item */
 180};
 181
 182struct ceph_mds_request;
 183struct ceph_mds_client;
 184
 185/*
 186 * request completion callback
 187 */
 188typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc,
 189                                             struct ceph_mds_request *req);
 190/*
 191 * wait for request completion callback
 192 */
 193typedef int (*ceph_mds_request_wait_callback_t) (struct ceph_mds_client *mdsc,
 194                                                 struct ceph_mds_request *req);
 195
 196/*
 197 * an in-flight mds request
 198 */
 199struct ceph_mds_request {
 200        u64 r_tid;                   /* transaction id */
 201        struct rb_node r_node;
 202        struct ceph_mds_client *r_mdsc;
 203
 204        int r_op;                    /* mds op code */
 205
 206        /* operation on what? */
 207        struct inode *r_inode;              /* arg1 */
 208        struct dentry *r_dentry;            /* arg1 */
 209        struct dentry *r_old_dentry;        /* arg2: rename from or link from */
 210        struct inode *r_old_dentry_dir;     /* arg2: old dentry's parent dir */
 211        char *r_path1, *r_path2;
 212        struct ceph_vino r_ino1, r_ino2;
 213
 214        struct inode *r_parent;             /* parent dir inode */
 215        struct inode *r_target_inode;       /* resulting inode */
 216
 217#define CEPH_MDS_R_DIRECT_IS_HASH       (1) /* r_direct_hash is valid */
 218#define CEPH_MDS_R_ABORTED              (2) /* call was aborted */
 219#define CEPH_MDS_R_GOT_UNSAFE           (3) /* got an unsafe reply */
 220#define CEPH_MDS_R_GOT_SAFE             (4) /* got a safe reply */
 221#define CEPH_MDS_R_GOT_RESULT           (5) /* got a result */
 222#define CEPH_MDS_R_DID_PREPOPULATE      (6) /* prepopulated readdir */
 223#define CEPH_MDS_R_PARENT_LOCKED        (7) /* is r_parent->i_rwsem wlocked? */
 224        unsigned long   r_req_flags;
 225
 226        struct mutex r_fill_mutex;
 227
 228        union ceph_mds_request_args r_args;
 229        int r_fmode;        /* file mode, if expecting cap */
 230        kuid_t r_uid;
 231        kgid_t r_gid;
 232        struct timespec r_stamp;
 233
 234        /* for choosing which mds to send this request to */
 235        int r_direct_mode;
 236        u32 r_direct_hash;      /* choose dir frag based on this dentry hash */
 237
 238        /* data payload is used for xattr ops */
 239        struct ceph_pagelist *r_pagelist;
 240
 241        /* what caps shall we drop? */
 242        int r_inode_drop, r_inode_unless;
 243        int r_dentry_drop, r_dentry_unless;
 244        int r_old_dentry_drop, r_old_dentry_unless;
 245        struct inode *r_old_inode;
 246        int r_old_inode_drop, r_old_inode_unless;
 247
 248        struct ceph_msg  *r_request;  /* original request */
 249        int r_request_release_offset;
 250        struct ceph_msg  *r_reply;
 251        struct ceph_mds_reply_info_parsed r_reply_info;
 252        struct page *r_locked_page;
 253        int r_err;
 254
 255        unsigned long r_timeout;  /* optional.  jiffies, 0 is "wait forever" */
 256        unsigned long r_started;  /* start time to measure timeout against */
 257        unsigned long r_request_started; /* start time for mds request only,
 258                                            used to measure lease durations */
 259
 260        /* link unsafe requests to parent directory, for fsync */
 261        struct inode    *r_unsafe_dir;
 262        struct list_head r_unsafe_dir_item;
 263
 264        /* unsafe requests that modify the target inode */
 265        struct list_head r_unsafe_target_item;
 266
 267        struct ceph_mds_session *r_session;
 268
 269        int               r_attempts;   /* resend attempts */
 270        int               r_num_fwd;    /* number of forward attempts */
 271        int               r_resend_mds; /* mds to resend to next, if any*/
 272        u32               r_sent_on_mseq; /* cap mseq request was sent at*/
 273
 274        struct kref       r_kref;
 275        struct list_head  r_wait;
 276        struct completion r_completion;
 277        struct completion r_safe_completion;
 278        ceph_mds_request_callback_t r_callback;
 279        ceph_mds_request_wait_callback_t r_wait_for_completion;
 280        struct list_head  r_unsafe_item;  /* per-session unsafe list item */
 281
 282        long long         r_dir_release_cnt;
 283        long long         r_dir_ordered_cnt;
 284        int               r_readdir_cache_idx;
 285        u32               r_readdir_offset;
 286
 287        struct ceph_cap_reservation r_caps_reservation;
 288        int r_num_caps;
 289};
 290
 291struct ceph_pool_perm {
 292        struct rb_node node;
 293        int perm;
 294        s64 pool;
 295        size_t pool_ns_len;
 296        char pool_ns[];
 297};
 298
 299/*
 300 * mds client state
 301 */
 302struct ceph_mds_client {
 303        struct ceph_fs_client  *fsc;
 304        struct mutex            mutex;         /* all nested structures */
 305
 306        struct ceph_mdsmap      *mdsmap;
 307        struct completion       safe_umount_waiters;
 308        wait_queue_head_t       session_close_wq;
 309        struct list_head        waiting_for_map;
 310        int                     mdsmap_err;
 311
 312        struct ceph_mds_session **sessions;    /* NULL for mds if no session */
 313        atomic_t                num_sessions;
 314        int                     max_sessions;  /* len of s_mds_sessions */
 315        int                     stopping;      /* true if shutting down */
 316
 317        atomic64_t              quotarealms_count; /* # realms with quota */
 318
 319        /*
 320         * snap_rwsem will cover cap linkage into snaprealms, and
 321         * realm snap contexts.  (later, we can do per-realm snap
 322         * contexts locks..)  the empty list contains realms with no
 323         * references (implying they contain no inodes with caps) that
 324         * should be destroyed.
 325         */
 326        u64                     last_snap_seq;
 327        struct rw_semaphore     snap_rwsem;
 328        struct rb_root          snap_realms;
 329        struct list_head        snap_empty;
 330        spinlock_t              snap_empty_lock;  /* protect snap_empty */
 331
 332        u64                    last_tid;      /* most recent mds request */
 333        u64                    oldest_tid;    /* oldest incomplete mds request,
 334                                                 excluding setfilelock requests */
 335        struct rb_root         request_tree;  /* pending mds requests */
 336        struct delayed_work    delayed_work;  /* delayed work */
 337        unsigned long    last_renew_caps;  /* last time we renewed our caps */
 338        struct list_head cap_delay_list;   /* caps with delayed release */
 339        spinlock_t       cap_delay_lock;   /* protects cap_delay_list */
 340        struct list_head snap_flush_list;  /* cap_snaps ready to flush */
 341        spinlock_t       snap_flush_lock;
 342
 343        u64               last_cap_flush_tid;
 344        struct list_head  cap_flush_list;
 345        struct list_head  cap_dirty;        /* inodes with dirty caps */
 346        struct list_head  cap_dirty_migrating; /* ...that are migration... */
 347        int               num_cap_flushing; /* # caps we are flushing */
 348        spinlock_t        cap_dirty_lock;   /* protects above items */
 349        wait_queue_head_t cap_flushing_wq;
 350
 351        /*
 352         * Cap reservations
 353         *
 354         * Maintain a global pool of preallocated struct ceph_caps, referenced
 355         * by struct ceph_caps_reservations.  This ensures that we preallocate
 356         * memory needed to successfully process an MDS response.  (If an MDS
 357         * sends us cap information and we fail to process it, we will have
 358         * problems due to the client and MDS being out of sync.)
 359         *
 360         * Reservations are 'owned' by a ceph_cap_reservation context.
 361         */
 362        spinlock_t      caps_list_lock;
 363        struct          list_head caps_list; /* unused (reserved or
 364                                                unreserved) */
 365        int             caps_total_count;    /* total caps allocated */
 366        int             caps_use_count;      /* in use */
 367        int             caps_reserve_count;  /* unused, reserved */
 368        int             caps_avail_count;    /* unused, unreserved */
 369        int             caps_min_count;      /* keep at least this many
 370                                                (unreserved) */
 371        spinlock_t        dentry_lru_lock;
 372        struct list_head  dentry_lru;
 373        int               num_dentry;
 374
 375        struct rw_semaphore     pool_perm_rwsem;
 376        struct rb_root          pool_perm_tree;
 377
 378        char nodename[__NEW_UTS_LEN + 1];
 379};
 380
 381extern const char *ceph_mds_op_name(int op);
 382
 383extern struct ceph_mds_session *
 384__ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
 385
 386static inline struct ceph_mds_session *
 387ceph_get_mds_session(struct ceph_mds_session *s)
 388{
 389        refcount_inc(&s->s_ref);
 390        return s;
 391}
 392
 393extern const char *ceph_session_state_name(int s);
 394
 395extern void ceph_put_mds_session(struct ceph_mds_session *s);
 396
 397extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
 398                             struct ceph_msg *msg, int mds);
 399
 400extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
 401extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
 402extern void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc);
 403extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
 404
 405extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
 406
 407extern void ceph_invalidate_dir_request(struct ceph_mds_request *req);
 408extern int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
 409                                           struct inode *dir);
 410extern struct ceph_mds_request *
 411ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode);
 412extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
 413                                     struct ceph_mds_request *req);
 414extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
 415                                struct inode *dir,
 416                                struct ceph_mds_request *req);
 417static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
 418{
 419        kref_get(&req->r_kref);
 420}
 421extern void ceph_mdsc_release_request(struct kref *kref);
 422static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
 423{
 424        kref_put(&req->r_kref, ceph_mdsc_release_request);
 425}
 426
 427extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
 428                                   struct ceph_mds_session *session);
 429
 430extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
 431
 432extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
 433                                  int stop_on_nosnap);
 434
 435extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
 436extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
 437                                     struct inode *inode,
 438                                     struct dentry *dentry, char action,
 439                                     u32 seq);
 440
 441extern void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc,
 442                                    struct ceph_msg *msg);
 443extern void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc,
 444                                   struct ceph_msg *msg);
 445
 446extern struct ceph_mds_session *
 447ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target);
 448extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
 449                                          struct ceph_mds_session *session);
 450
 451extern int ceph_trim_caps(struct ceph_mds_client *mdsc,
 452                          struct ceph_mds_session *session,
 453                          int max_caps);
 454#endif
 455