linux/drivers/staging/lustre/lustre/llite/xattr_cache.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Xyratex Technology Limited
   3 *
   4 * Copyright (c) 2013, 2015, Intel Corporation.
   5 *
   6 * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
   7 *
   8 */
   9
  10#define DEBUG_SUBSYSTEM S_LLITE
  11
  12#include <linux/fs.h>
  13#include <linux/sched.h>
  14#include <linux/mm.h>
  15#include "../include/obd_support.h"
  16#include "../include/lustre_dlm.h"
  17#include "../include/lustre_ver.h"
  18#include "llite_internal.h"
  19
  20/* If we ever have hundreds of extended attributes, we might want to consider
  21 * using a hash or a tree structure instead of list for faster lookups.
  22 */
  23struct ll_xattr_entry {
  24        struct list_head        xe_list;    /* protected with
  25                                             * lli_xattrs_list_rwsem
  26                                             */
  27        char                    *xe_name;   /* xattr name, \0-terminated */
  28        char                    *xe_value;  /* xattr value */
  29        unsigned                xe_namelen; /* strlen(xe_name) + 1 */
  30        unsigned                xe_vallen;  /* xattr value length */
  31};
  32
  33static struct kmem_cache *xattr_kmem;
  34static struct lu_kmem_descr xattr_caches[] = {
  35        {
  36                .ckd_cache = &xattr_kmem,
  37                .ckd_name  = "xattr_kmem",
  38                .ckd_size  = sizeof(struct ll_xattr_entry)
  39        },
  40        {
  41                .ckd_cache = NULL
  42        }
  43};
  44
  45int ll_xattr_init(void)
  46{
  47        return lu_kmem_init(xattr_caches);
  48}
  49
  50void ll_xattr_fini(void)
  51{
  52        lu_kmem_fini(xattr_caches);
  53}
  54
  55/**
  56 * Initializes xattr cache for an inode.
  57 *
  58 * This initializes the xattr list and marks cache presence.
  59 */
  60static void ll_xattr_cache_init(struct ll_inode_info *lli)
  61{
  62        INIT_LIST_HEAD(&lli->lli_xattrs);
  63        lli->lli_flags |= LLIF_XATTR_CACHE;
  64}
  65
  66/**
  67 *  This looks for a specific extended attribute.
  68 *
  69 *  Find in @cache and return @xattr_name attribute in @xattr,
  70 *  for the NULL @xattr_name return the first cached @xattr.
  71 *
  72 *  \retval 0        success
  73 *  \retval -ENODATA if not found
  74 */
  75static int ll_xattr_cache_find(struct list_head *cache,
  76                               const char *xattr_name,
  77                               struct ll_xattr_entry **xattr)
  78{
  79        struct ll_xattr_entry *entry;
  80
  81        list_for_each_entry(entry, cache, xe_list) {
  82                /* xattr_name == NULL means look for any entry */
  83                if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) {
  84                        *xattr = entry;
  85                        CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
  86                               entry->xe_name, entry->xe_vallen,
  87                               entry->xe_value);
  88                        return 0;
  89                }
  90        }
  91
  92        return -ENODATA;
  93}
  94
  95/**
  96 * This adds an xattr.
  97 *
  98 * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
  99 *
 100 * \retval 0       success
 101 * \retval -ENOMEM if no memory could be allocated for the cached attr
 102 * \retval -EPROTO if duplicate xattr is being added
 103 */
 104static int ll_xattr_cache_add(struct list_head *cache,
 105                              const char *xattr_name,
 106                              const char *xattr_val,
 107                              unsigned xattr_val_len)
 108{
 109        struct ll_xattr_entry *xattr;
 110
 111        if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
 112                CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
 113                return -EPROTO;
 114        }
 115
 116        xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS);
 117        if (!xattr) {
 118                CDEBUG(D_CACHE, "failed to allocate xattr\n");
 119                return -ENOMEM;
 120        }
 121
 122        xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
 123        if (!xattr->xe_name) {
 124                CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
 125                       xattr->xe_namelen);
 126                goto err_name;
 127        }
 128        xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
 129        if (!xattr->xe_value)
 130                goto err_value;
 131
 132        xattr->xe_vallen = xattr_val_len;
 133        list_add(&xattr->xe_list, cache);
 134
 135        CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len,
 136               xattr_val);
 137
 138        return 0;
 139err_value:
 140        kfree(xattr->xe_name);
 141err_name:
 142        kmem_cache_free(xattr_kmem, xattr);
 143
 144        return -ENOMEM;
 145}
 146
 147/**
 148 * This removes an extended attribute from cache.
 149 *
 150 * Remove @xattr_name attribute from @cache.
 151 *
 152 * \retval 0        success
 153 * \retval -ENODATA if @xattr_name is not cached
 154 */
 155static int ll_xattr_cache_del(struct list_head *cache,
 156                              const char *xattr_name)
 157{
 158        struct ll_xattr_entry *xattr;
 159
 160        CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
 161
 162        if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
 163                list_del(&xattr->xe_list);
 164                kfree(xattr->xe_name);
 165                kfree(xattr->xe_value);
 166                kmem_cache_free(xattr_kmem, xattr);
 167
 168                return 0;
 169        }
 170
 171        return -ENODATA;
 172}
 173
 174/**
 175 * This iterates cached extended attributes.
 176 *
 177 * Walk over cached attributes in @cache and
 178 * fill in @xld_buffer or only calculate buffer
 179 * size if @xld_buffer is NULL.
 180 *
 181 * \retval >= 0     buffer list size
 182 * \retval -ENODATA if the list cannot fit @xld_size buffer
 183 */
 184static int ll_xattr_cache_list(struct list_head *cache,
 185                               char *xld_buffer,
 186                               int xld_size)
 187{
 188        struct ll_xattr_entry *xattr, *tmp;
 189        int xld_tail = 0;
 190
 191        list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
 192                CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
 193                       xld_buffer, xld_tail, xattr->xe_name);
 194
 195                if (xld_buffer) {
 196                        xld_size -= xattr->xe_namelen;
 197                        if (xld_size < 0)
 198                                break;
 199                        memcpy(&xld_buffer[xld_tail],
 200                               xattr->xe_name, xattr->xe_namelen);
 201                }
 202                xld_tail += xattr->xe_namelen;
 203        }
 204
 205        if (xld_size < 0)
 206                return -ERANGE;
 207
 208        return xld_tail;
 209}
 210
 211/**
 212 * Check if the xattr cache is initialized (filled).
 213 *
 214 * \retval 0 @cache is not initialized
 215 * \retval 1 @cache is initialized
 216 */
 217static int ll_xattr_cache_valid(struct ll_inode_info *lli)
 218{
 219        return !!(lli->lli_flags & LLIF_XATTR_CACHE);
 220}
 221
 222/**
 223 * This finalizes the xattr cache.
 224 *
 225 * Free all xattr memory. @lli is the inode info pointer.
 226 *
 227 * \retval 0 no error occurred
 228 */
 229static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
 230{
 231        if (!ll_xattr_cache_valid(lli))
 232                return 0;
 233
 234        while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
 235                ; /* empty loop */
 236        lli->lli_flags &= ~LLIF_XATTR_CACHE;
 237
 238        return 0;
 239}
 240
 241int ll_xattr_cache_destroy(struct inode *inode)
 242{
 243        struct ll_inode_info *lli = ll_i2info(inode);
 244        int rc;
 245
 246        down_write(&lli->lli_xattrs_list_rwsem);
 247        rc = ll_xattr_cache_destroy_locked(lli);
 248        up_write(&lli->lli_xattrs_list_rwsem);
 249
 250        return rc;
 251}
 252
 253/**
 254 * Match or enqueue a PR lock.
 255 *
 256 * Find or request an LDLM lock with xattr data.
 257 * Since LDLM does not provide API for atomic match_or_enqueue,
 258 * the function handles it with a separate enq lock.
 259 * If successful, the function exits with the list lock held.
 260 *
 261 * \retval 0       no error occurred
 262 * \retval -ENOMEM not enough memory
 263 */
 264static int ll_xattr_find_get_lock(struct inode *inode,
 265                                  struct lookup_intent *oit,
 266                                  struct ptlrpc_request **req)
 267{
 268        enum ldlm_mode mode;
 269        struct lustre_handle lockh = { 0 };
 270        struct md_op_data *op_data;
 271        struct ll_inode_info *lli = ll_i2info(inode);
 272        struct ldlm_enqueue_info einfo = {
 273                .ei_type = LDLM_IBITS,
 274                .ei_mode = it_to_lock_mode(oit),
 275                .ei_cb_bl = &ll_md_blocking_ast,
 276                .ei_cb_cp = &ldlm_completion_ast,
 277        };
 278        struct ll_sb_info *sbi = ll_i2sbi(inode);
 279        struct obd_export *exp = sbi->ll_md_exp;
 280        int rc;
 281
 282        mutex_lock(&lli->lli_xattrs_enq_lock);
 283        /* inode may have been shrunk and recreated, so data is gone, match lock
 284         * only when data exists.
 285         */
 286        if (ll_xattr_cache_valid(lli)) {
 287                /* Try matching first. */
 288                mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
 289                                       LCK_PR);
 290                if (mode != 0) {
 291                        /* fake oit in mdc_revalidate_lock() manner */
 292                        oit->it_lock_handle = lockh.cookie;
 293                        oit->it_lock_mode = mode;
 294                        goto out;
 295                }
 296        }
 297
 298        /* Enqueue if the lock isn't cached locally. */
 299        op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
 300                                     LUSTRE_OPC_ANY, NULL);
 301        if (IS_ERR(op_data)) {
 302                mutex_unlock(&lli->lli_xattrs_enq_lock);
 303                return PTR_ERR(op_data);
 304        }
 305
 306        op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
 307
 308        rc = md_enqueue(exp, &einfo, NULL, oit, op_data, &lockh, 0);
 309        ll_finish_md_op_data(op_data);
 310
 311        if (rc < 0) {
 312                CDEBUG(D_CACHE,
 313                       "md_intent_lock failed with %d for fid "DFID"\n",
 314                       rc, PFID(ll_inode2fid(inode)));
 315                mutex_unlock(&lli->lli_xattrs_enq_lock);
 316                return rc;
 317        }
 318
 319        *req = oit->it_request;
 320out:
 321        down_write(&lli->lli_xattrs_list_rwsem);
 322        mutex_unlock(&lli->lli_xattrs_enq_lock);
 323
 324        return 0;
 325}
 326
 327/**
 328 * Refill the xattr cache.
 329 *
 330 * Fetch and cache the whole of xattrs for @inode, acquiring
 331 * a read or a write xattr lock depending on operation in @oit.
 332 * Intent is dropped on exit unless the operation is setxattr.
 333 *
 334 * \retval 0       no error occurred
 335 * \retval -EPROTO network protocol error
 336 * \retval -ENOMEM not enough memory for the cache
 337 */
 338static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
 339{
 340        struct ll_sb_info *sbi = ll_i2sbi(inode);
 341        struct ptlrpc_request *req = NULL;
 342        const char *xdata, *xval, *xtail, *xvtail;
 343        struct ll_inode_info *lli = ll_i2info(inode);
 344        struct mdt_body *body;
 345        __u32 *xsizes;
 346        int rc, i;
 347
 348        rc = ll_xattr_find_get_lock(inode, oit, &req);
 349        if (rc)
 350                goto out_no_unlock;
 351
 352        /* Do we have the data at this point? */
 353        if (ll_xattr_cache_valid(lli)) {
 354                ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
 355                rc = 0;
 356                goto out_maybe_drop;
 357        }
 358
 359        /* Matched but no cache? Cancelled on error by a parallel refill. */
 360        if (unlikely(!req)) {
 361                CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
 362                rc = -EIO;
 363                goto out_maybe_drop;
 364        }
 365
 366        if (oit->it_status < 0) {
 367                CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
 368                       oit->it_status, PFID(ll_inode2fid(inode)));
 369                rc = oit->it_status;
 370                /* xattr data is so large that we don't want to cache it */
 371                if (rc == -ERANGE)
 372                        rc = -EAGAIN;
 373                goto out_destroy;
 374        }
 375
 376        body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
 377        if (!body) {
 378                CERROR("no MDT BODY in the refill xattr reply\n");
 379                rc = -EPROTO;
 380                goto out_destroy;
 381        }
 382        /* do not need swab xattr data */
 383        xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
 384                                             body->mbo_eadatasize);
 385        xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
 386                                            body->mbo_aclsize);
 387        xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
 388                                              body->mbo_max_mdsize * sizeof(__u32));
 389        if (!xdata || !xval || !xsizes) {
 390                CERROR("wrong setxattr reply\n");
 391                rc = -EPROTO;
 392                goto out_destroy;
 393        }
 394
 395        xtail = xdata + body->mbo_eadatasize;
 396        xvtail = xval + body->mbo_aclsize;
 397
 398        CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
 399
 400        ll_xattr_cache_init(lli);
 401
 402        for (i = 0; i < body->mbo_max_mdsize; i++) {
 403                CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
 404                /* Perform consistency checks: attr names and vals in pill */
 405                if (!memchr(xdata, 0, xtail - xdata)) {
 406                        CERROR("xattr protocol violation (names are broken)\n");
 407                        rc = -EPROTO;
 408                } else if (xval + *xsizes > xvtail) {
 409                        CERROR("xattr protocol violation (vals are broken)\n");
 410                        rc = -EPROTO;
 411                } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
 412                        rc = -ENOMEM;
 413                } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
 414                        /* Filter out ACL ACCESS since it's cached separately */
 415                        CDEBUG(D_CACHE, "not caching %s\n",
 416                               XATTR_NAME_ACL_ACCESS);
 417                        rc = 0;
 418                } else {
 419                        rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
 420                                                *xsizes);
 421                }
 422                if (rc < 0) {
 423                        ll_xattr_cache_destroy_locked(lli);
 424                        goto out_destroy;
 425                }
 426                xdata += strlen(xdata) + 1;
 427                xval  += *xsizes;
 428                xsizes++;
 429        }
 430
 431        if (xdata != xtail || xval != xvtail)
 432                CERROR("a hole in xattr data\n");
 433
 434        ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
 435
 436        goto out_maybe_drop;
 437out_maybe_drop:
 438
 439                ll_intent_drop_lock(oit);
 440
 441        if (rc != 0)
 442                up_write(&lli->lli_xattrs_list_rwsem);
 443out_no_unlock:
 444        ptlrpc_req_finished(req);
 445
 446        return rc;
 447
 448out_destroy:
 449        up_write(&lli->lli_xattrs_list_rwsem);
 450
 451        ldlm_lock_decref_and_cancel((struct lustre_handle *)
 452                                        &oit->it_lock_handle,
 453                                        oit->it_lock_mode);
 454
 455        goto out_no_unlock;
 456}
 457
 458/**
 459 * Get an xattr value or list xattrs using the write-through cache.
 460 *
 461 * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
 462 * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
 463 * The resulting value/list is stored in @buffer if the former
 464 * is not larger than @size.
 465 *
 466 * \retval 0        no error occurred
 467 * \retval -EPROTO  network protocol error
 468 * \retval -ENOMEM  not enough memory for the cache
 469 * \retval -ERANGE  the buffer is not large enough
 470 * \retval -ENODATA no such attr or the list is empty
 471 */
 472int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer,
 473                       size_t size, __u64 valid)
 474{
 475        struct lookup_intent oit = { .it_op = IT_GETXATTR };
 476        struct ll_inode_info *lli = ll_i2info(inode);
 477        int rc = 0;
 478
 479        LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
 480
 481        down_read(&lli->lli_xattrs_list_rwsem);
 482        if (!ll_xattr_cache_valid(lli)) {
 483                up_read(&lli->lli_xattrs_list_rwsem);
 484                rc = ll_xattr_cache_refill(inode, &oit);
 485                if (rc)
 486                        return rc;
 487                downgrade_write(&lli->lli_xattrs_list_rwsem);
 488        } else {
 489                ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
 490        }
 491
 492        if (valid & OBD_MD_FLXATTR) {
 493                struct ll_xattr_entry *xattr;
 494
 495                rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
 496                if (rc == 0) {
 497                        rc = xattr->xe_vallen;
 498                        /* zero size means we are only requested size in rc */
 499                        if (size != 0) {
 500                                if (size >= xattr->xe_vallen)
 501                                        memcpy(buffer, xattr->xe_value,
 502                                               xattr->xe_vallen);
 503                                else
 504                                        rc = -ERANGE;
 505                        }
 506                }
 507        } else if (valid & OBD_MD_FLXATTRLS) {
 508                rc = ll_xattr_cache_list(&lli->lli_xattrs,
 509                                         size ? buffer : NULL, size);
 510        }
 511
 512        goto out;
 513out:
 514        up_read(&lli->lli_xattrs_list_rwsem);
 515
 516        return rc;
 517}
 518