linux/drivers/staging/lustre/lustre/obdclass/lu_object.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lustre/obdclass/lu_object.c
  37 *
  38 * Lustre Object.
  39 * These are the only exported functions, they provide some generic
  40 * infrastructure for managing object devices
  41 *
  42 *   Author: Nikita Danilov <nikita.danilov@sun.com>
  43 */
  44
  45#define DEBUG_SUBSYSTEM S_CLASS
  46
  47#include <linux/libcfs/libcfs.h>
  48
  49# include <linux/module.h>
  50
  51/* hash_long() */
  52#include <linux/libcfs/libcfs_hash.h>
  53#include <obd_class.h>
  54#include <obd_support.h>
  55#include <lustre_disk.h>
  56#include <lustre_fid.h>
  57#include <lu_object.h>
  58#include <lu_ref.h>
  59#include <linux/list.h>
  60
  61static void lu_object_free(const struct lu_env *env, struct lu_object *o);
  62
  63/**
  64 * Decrease reference counter on object. If last reference is freed, return
  65 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
  66 * case, free object immediately.
  67 */
  68void lu_object_put(const struct lu_env *env, struct lu_object *o)
  69{
  70        struct lu_site_bkt_data *bkt;
  71        struct lu_object_header *top;
  72        struct lu_site    *site;
  73        struct lu_object        *orig;
  74        cfs_hash_bd_t       bd;
  75        const struct lu_fid     *fid;
  76
  77        top  = o->lo_header;
  78        site = o->lo_dev->ld_site;
  79        orig = o;
  80
  81        /*
  82         * till we have full fids-on-OST implemented anonymous objects
  83         * are possible in OSP. such an object isn't listed in the site
  84         * so we should not remove it from the site.
  85         */
  86        fid = lu_object_fid(o);
  87        if (fid_is_zero(fid)) {
  88                LASSERT(top->loh_hash.next == NULL
  89                        && top->loh_hash.pprev == NULL);
  90                LASSERT(list_empty(&top->loh_lru));
  91                if (!atomic_dec_and_test(&top->loh_ref))
  92                        return;
  93                list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
  94                        if (o->lo_ops->loo_object_release != NULL)
  95                                o->lo_ops->loo_object_release(env, o);
  96                }
  97                lu_object_free(env, orig);
  98                return;
  99        }
 100
 101        cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
 102        bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
 103
 104        if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
 105                if (lu_object_is_dying(top)) {
 106
 107                        /*
 108                         * somebody may be waiting for this, currently only
 109                         * used for cl_object, see cl_object_put_last().
 110                         */
 111                        wake_up_all(&bkt->lsb_marche_funebre);
 112                }
 113                return;
 114        }
 115
 116        LASSERT(bkt->lsb_busy > 0);
 117        bkt->lsb_busy--;
 118        /*
 119         * When last reference is released, iterate over object
 120         * layers, and notify them that object is no longer busy.
 121         */
 122        list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
 123                if (o->lo_ops->loo_object_release != NULL)
 124                        o->lo_ops->loo_object_release(env, o);
 125        }
 126
 127        if (!lu_object_is_dying(top)) {
 128                LASSERT(list_empty(&top->loh_lru));
 129                list_add_tail(&top->loh_lru, &bkt->lsb_lru);
 130                cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
 131                return;
 132        }
 133
 134        /*
 135         * If object is dying (will not be cached), removed it
 136         * from hash table and LRU.
 137         *
 138         * This is done with hash table and LRU lists locked. As the only
 139         * way to acquire first reference to previously unreferenced
 140         * object is through hash-table lookup (lu_object_find()),
 141         * or LRU scanning (lu_site_purge()), that are done under hash-table
 142         * and LRU lock, no race with concurrent object lookup is possible
 143         * and we can safely destroy object below.
 144         */
 145        if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
 146                cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
 147        cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
 148        /*
 149         * Object was already removed from hash and lru above, can
 150         * kill it.
 151         */
 152        lu_object_free(env, orig);
 153}
 154EXPORT_SYMBOL(lu_object_put);
 155
 156/**
 157 * Put object and don't keep in cache. This is temporary solution for
 158 * multi-site objects when its layering is not constant.
 159 */
 160void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
 161{
 162        set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
 163        return lu_object_put(env, o);
 164}
 165EXPORT_SYMBOL(lu_object_put_nocache);
 166
 167/**
 168 * Kill the object and take it out of LRU cache.
 169 * Currently used by client code for layout change.
 170 */
 171void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
 172{
 173        struct lu_object_header *top;
 174
 175        top = o->lo_header;
 176        set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
 177        if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
 178                cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
 179                cfs_hash_bd_t bd;
 180
 181                cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
 182                list_del_init(&top->loh_lru);
 183                cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
 184                cfs_hash_bd_unlock(obj_hash, &bd, 1);
 185        }
 186}
 187EXPORT_SYMBOL(lu_object_unhash);
 188
 189/**
 190 * Allocate new object.
 191 *
 192 * This follows object creation protocol, described in the comment within
 193 * struct lu_device_operations definition.
 194 */
 195static struct lu_object *lu_object_alloc(const struct lu_env *env,
 196                                         struct lu_device *dev,
 197                                         const struct lu_fid *f,
 198                                         const struct lu_object_conf *conf)
 199{
 200        struct lu_object *scan;
 201        struct lu_object *top;
 202        struct list_head *layers;
 203        int clean;
 204        int result;
 205        ENTRY;
 206
 207        /*
 208         * Create top-level object slice. This will also create
 209         * lu_object_header.
 210         */
 211        top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
 212        if (top == NULL)
 213                RETURN(ERR_PTR(-ENOMEM));
 214        if (IS_ERR(top))
 215                RETURN(top);
 216        /*
 217         * This is the only place where object fid is assigned. It's constant
 218         * after this point.
 219         */
 220        top->lo_header->loh_fid = *f;
 221        layers = &top->lo_header->loh_layers;
 222        do {
 223                /*
 224                 * Call ->loo_object_init() repeatedly, until no more new
 225                 * object slices are created.
 226                 */
 227                clean = 1;
 228                list_for_each_entry(scan, layers, lo_linkage) {
 229                        if (scan->lo_flags & LU_OBJECT_ALLOCATED)
 230                                continue;
 231                        clean = 0;
 232                        scan->lo_header = top->lo_header;
 233                        result = scan->lo_ops->loo_object_init(env, scan, conf);
 234                        if (result != 0) {
 235                                lu_object_free(env, top);
 236                                RETURN(ERR_PTR(result));
 237                        }
 238                        scan->lo_flags |= LU_OBJECT_ALLOCATED;
 239                }
 240        } while (!clean);
 241
 242        list_for_each_entry_reverse(scan, layers, lo_linkage) {
 243                if (scan->lo_ops->loo_object_start != NULL) {
 244                        result = scan->lo_ops->loo_object_start(env, scan);
 245                        if (result != 0) {
 246                                lu_object_free(env, top);
 247                                RETURN(ERR_PTR(result));
 248                        }
 249                }
 250        }
 251
 252        lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
 253        RETURN(top);
 254}
 255
 256/**
 257 * Free an object.
 258 */
 259static void lu_object_free(const struct lu_env *env, struct lu_object *o)
 260{
 261        struct lu_site_bkt_data *bkt;
 262        struct lu_site    *site;
 263        struct lu_object        *scan;
 264        struct list_head              *layers;
 265        struct list_head               splice;
 266
 267        site   = o->lo_dev->ld_site;
 268        layers = &o->lo_header->loh_layers;
 269        bkt    = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
 270        /*
 271         * First call ->loo_object_delete() method to release all resources.
 272         */
 273        list_for_each_entry_reverse(scan, layers, lo_linkage) {
 274                if (scan->lo_ops->loo_object_delete != NULL)
 275                        scan->lo_ops->loo_object_delete(env, scan);
 276        }
 277
 278        /*
 279         * Then, splice object layers into stand-alone list, and call
 280         * ->loo_object_free() on all layers to free memory. Splice is
 281         * necessary, because lu_object_header is freed together with the
 282         * top-level slice.
 283         */
 284        INIT_LIST_HEAD(&splice);
 285        list_splice_init(layers, &splice);
 286        while (!list_empty(&splice)) {
 287                /*
 288                 * Free layers in bottom-to-top order, so that object header
 289                 * lives as long as possible and ->loo_object_free() methods
 290                 * can look at its contents.
 291                 */
 292                o = container_of0(splice.prev, struct lu_object, lo_linkage);
 293                list_del_init(&o->lo_linkage);
 294                LASSERT(o->lo_ops->loo_object_free != NULL);
 295                o->lo_ops->loo_object_free(env, o);
 296        }
 297
 298        if (waitqueue_active(&bkt->lsb_marche_funebre))
 299                wake_up_all(&bkt->lsb_marche_funebre);
 300}
 301
 302/**
 303 * Free \a nr objects from the cold end of the site LRU list.
 304 */
 305int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
 306{
 307        struct lu_object_header *h;
 308        struct lu_object_header *temp;
 309        struct lu_site_bkt_data *bkt;
 310        cfs_hash_bd_t       bd;
 311        cfs_hash_bd_t       bd2;
 312        struct list_head               dispose;
 313        int                   did_sth;
 314        int                   start;
 315        int                   count;
 316        int                   bnr;
 317        int                   i;
 318
 319        if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
 320                RETURN(0);
 321
 322        INIT_LIST_HEAD(&dispose);
 323        /*
 324         * Under LRU list lock, scan LRU list and move unreferenced objects to
 325         * the dispose list, removing them from LRU and hash table.
 326         */
 327        start = s->ls_purge_start;
 328        bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
 329 again:
 330        did_sth = 0;
 331        cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
 332                if (i < start)
 333                        continue;
 334                count = bnr;
 335                cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
 336                bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
 337
 338                list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
 339                        LASSERT(atomic_read(&h->loh_ref) == 0);
 340
 341                        cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
 342                        LASSERT(bd.bd_bucket == bd2.bd_bucket);
 343
 344                        cfs_hash_bd_del_locked(s->ls_obj_hash,
 345                                               &bd2, &h->loh_hash);
 346                        list_move(&h->loh_lru, &dispose);
 347                        if (did_sth == 0)
 348                                did_sth = 1;
 349
 350                        if (nr != ~0 && --nr == 0)
 351                                break;
 352
 353                        if (count > 0 && --count == 0)
 354                                break;
 355
 356                }
 357                cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
 358                cond_resched();
 359                /*
 360                 * Free everything on the dispose list. This is safe against
 361                 * races due to the reasons described in lu_object_put().
 362                 */
 363                while (!list_empty(&dispose)) {
 364                        h = container_of0(dispose.next,
 365                                          struct lu_object_header, loh_lru);
 366                        list_del_init(&h->loh_lru);
 367                        lu_object_free(env, lu_object_top(h));
 368                        lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
 369                }
 370
 371                if (nr == 0)
 372                        break;
 373        }
 374
 375        if (nr != 0 && did_sth && start != 0) {
 376                start = 0; /* restart from the first bucket */
 377                goto again;
 378        }
 379        /* race on s->ls_purge_start, but nobody cares */
 380        s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
 381
 382        return nr;
 383}
 384EXPORT_SYMBOL(lu_site_purge);
 385
 386/*
 387 * Object printing.
 388 *
 389 * Code below has to jump through certain loops to output object description
 390 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
 391 * composes object description from strings that are parts of _lines_ of
 392 * output (i.e., strings that are not terminated by newline). This doesn't fit
 393 * very well into libcfs_debug_msg() interface that assumes that each message
 394 * supplied to it is a self-contained output line.
 395 *
 396 * To work around this, strings are collected in a temporary buffer
 397 * (implemented as a value of lu_cdebug_key key), until terminating newline
 398 * character is detected.
 399 *
 400 */
 401
 402enum {
 403        /**
 404         * Maximal line size.
 405         *
 406         * XXX overflow is not handled correctly.
 407         */
 408        LU_CDEBUG_LINE = 512
 409};
 410
 411struct lu_cdebug_data {
 412        /**
 413         * Temporary buffer.
 414         */
 415        char lck_area[LU_CDEBUG_LINE];
 416};
 417
 418/* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
 419LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
 420
 421/**
 422 * Key, holding temporary buffer. This key is registered very early by
 423 * lu_global_init().
 424 */
 425struct lu_context_key lu_global_key = {
 426        .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
 427                    LCT_MG_THREAD | LCT_CL_THREAD,
 428        .lct_init = lu_global_key_init,
 429        .lct_fini = lu_global_key_fini
 430};
 431
 432/**
 433 * Printer function emitting messages through libcfs_debug_msg().
 434 */
 435int lu_cdebug_printer(const struct lu_env *env,
 436                      void *cookie, const char *format, ...)
 437{
 438        struct libcfs_debug_msg_data *msgdata = cookie;
 439        struct lu_cdebug_data   *key;
 440        int used;
 441        int complete;
 442        va_list args;
 443
 444        va_start(args, format);
 445
 446        key = lu_context_key_get(&env->le_ctx, &lu_global_key);
 447        LASSERT(key != NULL);
 448
 449        used = strlen(key->lck_area);
 450        complete = format[strlen(format) - 1] == '\n';
 451        /*
 452         * Append new chunk to the buffer.
 453         */
 454        vsnprintf(key->lck_area + used,
 455                  ARRAY_SIZE(key->lck_area) - used, format, args);
 456        if (complete) {
 457                if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
 458                        libcfs_debug_msg(msgdata, "%s", key->lck_area);
 459                key->lck_area[0] = 0;
 460        }
 461        va_end(args);
 462        return 0;
 463}
 464EXPORT_SYMBOL(lu_cdebug_printer);
 465
 466/**
 467 * Print object header.
 468 */
 469void lu_object_header_print(const struct lu_env *env, void *cookie,
 470                            lu_printer_t printer,
 471                            const struct lu_object_header *hdr)
 472{
 473        (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
 474                   hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
 475                   PFID(&hdr->loh_fid),
 476                   hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
 477                   list_empty((struct list_head *)&hdr->loh_lru) ? \
 478                   "" : " lru",
 479                   hdr->loh_attr & LOHA_EXISTS ? " exist":"");
 480}
 481EXPORT_SYMBOL(lu_object_header_print);
 482
 483/**
 484 * Print human readable representation of the \a o to the \a printer.
 485 */
 486void lu_object_print(const struct lu_env *env, void *cookie,
 487                     lu_printer_t printer, const struct lu_object *o)
 488{
 489        static const char ruler[] = "........................................";
 490        struct lu_object_header *top;
 491        int depth;
 492
 493        top = o->lo_header;
 494        lu_object_header_print(env, cookie, printer, top);
 495        (*printer)(env, cookie, "{ \n");
 496        list_for_each_entry(o, &top->loh_layers, lo_linkage) {
 497                depth = o->lo_depth + 4;
 498
 499                /*
 500                 * print `.' \a depth times followed by type name and address
 501                 */
 502                (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
 503                           o->lo_dev->ld_type->ldt_name, o);
 504                if (o->lo_ops->loo_object_print != NULL)
 505                        o->lo_ops->loo_object_print(env, cookie, printer, o);
 506                (*printer)(env, cookie, "\n");
 507        }
 508        (*printer)(env, cookie, "} header@%p\n", top);
 509}
 510EXPORT_SYMBOL(lu_object_print);
 511
 512/**
 513 * Check object consistency.
 514 */
 515int lu_object_invariant(const struct lu_object *o)
 516{
 517        struct lu_object_header *top;
 518
 519        top = o->lo_header;
 520        list_for_each_entry(o, &top->loh_layers, lo_linkage) {
 521                if (o->lo_ops->loo_object_invariant != NULL &&
 522                    !o->lo_ops->loo_object_invariant(o))
 523                        return 0;
 524        }
 525        return 1;
 526}
 527EXPORT_SYMBOL(lu_object_invariant);
 528
 529static struct lu_object *htable_lookup(struct lu_site *s,
 530                                       cfs_hash_bd_t *bd,
 531                                       const struct lu_fid *f,
 532                                       wait_queue_t *waiter,
 533                                       __u64 *version)
 534{
 535        struct lu_site_bkt_data *bkt;
 536        struct lu_object_header *h;
 537        struct hlist_node       *hnode;
 538        __u64  ver = cfs_hash_bd_version_get(bd);
 539
 540        if (*version == ver)
 541                return NULL;
 542
 543        *version = ver;
 544        bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
 545        /* cfs_hash_bd_peek_locked is a somehow "internal" function
 546         * of cfs_hash, it doesn't add refcount on object. */
 547        hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
 548        if (hnode == NULL) {
 549                lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
 550                return NULL;
 551        }
 552
 553        h = container_of0(hnode, struct lu_object_header, loh_hash);
 554        if (likely(!lu_object_is_dying(h))) {
 555                cfs_hash_get(s->ls_obj_hash, hnode);
 556                lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
 557                list_del_init(&h->loh_lru);
 558                return lu_object_top(h);
 559        }
 560
 561        /*
 562         * Lookup found an object being destroyed this object cannot be
 563         * returned (to assure that references to dying objects are eventually
 564         * drained), and moreover, lookup has to wait until object is freed.
 565         */
 566
 567        init_waitqueue_entry_current(waiter);
 568        add_wait_queue(&bkt->lsb_marche_funebre, waiter);
 569        set_current_state(TASK_UNINTERRUPTIBLE);
 570        lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
 571        return ERR_PTR(-EAGAIN);
 572}
 573
 574/**
 575 * Search cache for an object with the fid \a f. If such object is found,
 576 * return it. Otherwise, create new object, insert it into cache and return
 577 * it. In any case, additional reference is acquired on the returned object.
 578 */
 579struct lu_object *lu_object_find(const struct lu_env *env,
 580                                 struct lu_device *dev, const struct lu_fid *f,
 581                                 const struct lu_object_conf *conf)
 582{
 583        return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
 584}
 585EXPORT_SYMBOL(lu_object_find);
 586
 587static struct lu_object *lu_object_new(const struct lu_env *env,
 588                                       struct lu_device *dev,
 589                                       const struct lu_fid *f,
 590                                       const struct lu_object_conf *conf)
 591{
 592        struct lu_object        *o;
 593        cfs_hash_t            *hs;
 594        cfs_hash_bd_t       bd;
 595        struct lu_site_bkt_data *bkt;
 596
 597        o = lu_object_alloc(env, dev, f, conf);
 598        if (unlikely(IS_ERR(o)))
 599                return o;
 600
 601        hs = dev->ld_site->ls_obj_hash;
 602        cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
 603        bkt = cfs_hash_bd_extra_get(hs, &bd);
 604        cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
 605        bkt->lsb_busy++;
 606        cfs_hash_bd_unlock(hs, &bd, 1);
 607        return o;
 608}
 609
 610/**
 611 * Core logic of lu_object_find*() functions.
 612 */
 613static struct lu_object *lu_object_find_try(const struct lu_env *env,
 614                                            struct lu_device *dev,
 615                                            const struct lu_fid *f,
 616                                            const struct lu_object_conf *conf,
 617                                            wait_queue_t *waiter)
 618{
 619        struct lu_object      *o;
 620        struct lu_object      *shadow;
 621        struct lu_site  *s;
 622        cfs_hash_t          *hs;
 623        cfs_hash_bd_t     bd;
 624        __u64             version = 0;
 625
 626        /*
 627         * This uses standard index maintenance protocol:
 628         *
 629         *     - search index under lock, and return object if found;
 630         *     - otherwise, unlock index, allocate new object;
 631         *     - lock index and search again;
 632         *     - if nothing is found (usual case), insert newly created
 633         *       object into index;
 634         *     - otherwise (race: other thread inserted object), free
 635         *       object just allocated.
 636         *     - unlock index;
 637         *     - return object.
 638         *
 639         * For "LOC_F_NEW" case, we are sure the object is new established.
 640         * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
 641         * just alloc and insert directly.
 642         *
 643         * If dying object is found during index search, add @waiter to the
 644         * site wait-queue and return ERR_PTR(-EAGAIN).
 645         */
 646        if (conf != NULL && conf->loc_flags & LOC_F_NEW)
 647                return lu_object_new(env, dev, f, conf);
 648
 649        s  = dev->ld_site;
 650        hs = s->ls_obj_hash;
 651        cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
 652        o = htable_lookup(s, &bd, f, waiter, &version);
 653        cfs_hash_bd_unlock(hs, &bd, 1);
 654        if (o != NULL)
 655                return o;
 656
 657        /*
 658         * Allocate new object. This may result in rather complicated
 659         * operations, including fld queries, inode loading, etc.
 660         */
 661        o = lu_object_alloc(env, dev, f, conf);
 662        if (unlikely(IS_ERR(o)))
 663                return o;
 664
 665        LASSERT(lu_fid_eq(lu_object_fid(o), f));
 666
 667        cfs_hash_bd_lock(hs, &bd, 1);
 668
 669        shadow = htable_lookup(s, &bd, f, waiter, &version);
 670        if (likely(shadow == NULL)) {
 671                struct lu_site_bkt_data *bkt;
 672
 673                bkt = cfs_hash_bd_extra_get(hs, &bd);
 674                cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
 675                bkt->lsb_busy++;
 676                cfs_hash_bd_unlock(hs, &bd, 1);
 677                return o;
 678        }
 679
 680        lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
 681        cfs_hash_bd_unlock(hs, &bd, 1);
 682        lu_object_free(env, o);
 683        return shadow;
 684}
 685
 686/**
 687 * Much like lu_object_find(), but top level device of object is specifically
 688 * \a dev rather than top level device of the site. This interface allows
 689 * objects of different "stacking" to be created within the same site.
 690 */
 691struct lu_object *lu_object_find_at(const struct lu_env *env,
 692                                    struct lu_device *dev,
 693                                    const struct lu_fid *f,
 694                                    const struct lu_object_conf *conf)
 695{
 696        struct lu_site_bkt_data *bkt;
 697        struct lu_object        *obj;
 698        wait_queue_t       wait;
 699
 700        while (1) {
 701                obj = lu_object_find_try(env, dev, f, conf, &wait);
 702                if (obj != ERR_PTR(-EAGAIN))
 703                        return obj;
 704                /*
 705                 * lu_object_find_try() already added waiter into the
 706                 * wait queue.
 707                 */
 708                waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
 709                bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
 710                remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
 711        }
 712}
 713EXPORT_SYMBOL(lu_object_find_at);
 714
 715/**
 716 * Find object with given fid, and return its slice belonging to given device.
 717 */
 718struct lu_object *lu_object_find_slice(const struct lu_env *env,
 719                                       struct lu_device *dev,
 720                                       const struct lu_fid *f,
 721                                       const struct lu_object_conf *conf)
 722{
 723        struct lu_object *top;
 724        struct lu_object *obj;
 725
 726        top = lu_object_find(env, dev, f, conf);
 727        if (!IS_ERR(top)) {
 728                obj = lu_object_locate(top->lo_header, dev->ld_type);
 729                if (obj == NULL)
 730                        lu_object_put(env, top);
 731        } else
 732                obj = top;
 733        return obj;
 734}
 735EXPORT_SYMBOL(lu_object_find_slice);
 736
 737/**
 738 * Global list of all device types.
 739 */
 740static LIST_HEAD(lu_device_types);
 741
 742int lu_device_type_init(struct lu_device_type *ldt)
 743{
 744        int result = 0;
 745
 746        INIT_LIST_HEAD(&ldt->ldt_linkage);
 747        if (ldt->ldt_ops->ldto_init)
 748                result = ldt->ldt_ops->ldto_init(ldt);
 749        if (result == 0)
 750                list_add(&ldt->ldt_linkage, &lu_device_types);
 751        return result;
 752}
 753EXPORT_SYMBOL(lu_device_type_init);
 754
 755void lu_device_type_fini(struct lu_device_type *ldt)
 756{
 757        list_del_init(&ldt->ldt_linkage);
 758        if (ldt->ldt_ops->ldto_fini)
 759                ldt->ldt_ops->ldto_fini(ldt);
 760}
 761EXPORT_SYMBOL(lu_device_type_fini);
 762
 763void lu_types_stop(void)
 764{
 765        struct lu_device_type *ldt;
 766
 767        list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
 768                if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
 769                        ldt->ldt_ops->ldto_stop(ldt);
 770        }
 771}
 772EXPORT_SYMBOL(lu_types_stop);
 773
 774/**
 775 * Global list of all sites on this node
 776 */
 777static LIST_HEAD(lu_sites);
 778static DEFINE_MUTEX(lu_sites_guard);
 779
 780/**
 781 * Global environment used by site shrinker.
 782 */
 783static struct lu_env lu_shrink_env;
 784
 785struct lu_site_print_arg {
 786        struct lu_env   *lsp_env;
 787        void        *lsp_cookie;
 788        lu_printer_t     lsp_printer;
 789};
 790
 791static int
 792lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 793                  struct hlist_node *hnode, void *data)
 794{
 795        struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
 796        struct lu_object_header  *h;
 797
 798        h = hlist_entry(hnode, struct lu_object_header, loh_hash);
 799        if (!list_empty(&h->loh_layers)) {
 800                const struct lu_object *o;
 801
 802                o = lu_object_top(h);
 803                lu_object_print(arg->lsp_env, arg->lsp_cookie,
 804                                arg->lsp_printer, o);
 805        } else {
 806                lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
 807                                       arg->lsp_printer, h);
 808        }
 809        return 0;
 810}
 811
 812/**
 813 * Print all objects in \a s.
 814 */
 815void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
 816                   lu_printer_t printer)
 817{
 818        struct lu_site_print_arg arg = {
 819                .lsp_env     = (struct lu_env *)env,
 820                .lsp_cookie  = cookie,
 821                .lsp_printer = printer,
 822        };
 823
 824        cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
 825}
 826EXPORT_SYMBOL(lu_site_print);
 827
 828enum {
 829        LU_CACHE_PERCENT_MAX     = 50,
 830        LU_CACHE_PERCENT_DEFAULT = 20
 831};
 832
 833static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
 834CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
 835                "Percentage of memory to be used as lu_object cache");
 836
 837/**
 838 * Return desired hash table order.
 839 */
 840static int lu_htable_order(void)
 841{
 842        unsigned long cache_size;
 843        int bits;
 844
 845        /*
 846         * Calculate hash table size, assuming that we want reasonable
 847         * performance when 20% of total memory is occupied by cache of
 848         * lu_objects.
 849         *
 850         * Size of lu_object is (arbitrary) taken as 1K (together with inode).
 851         */
 852        cache_size = num_physpages;
 853
 854#if BITS_PER_LONG == 32
 855        /* limit hashtable size for lowmem systems to low RAM */
 856        if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
 857                cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
 858#endif
 859
 860        /* clear off unreasonable cache setting. */
 861        if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
 862                CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
 863                      " the range of (0, %u]. Will use default value: %u.\n",
 864                      lu_cache_percent, LU_CACHE_PERCENT_MAX,
 865                      LU_CACHE_PERCENT_DEFAULT);
 866
 867                lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
 868        }
 869        cache_size = cache_size / 100 * lu_cache_percent *
 870                (PAGE_CACHE_SIZE / 1024);
 871
 872        for (bits = 1; (1 << bits) < cache_size; ++bits) {
 873                ;
 874        }
 875        return bits;
 876}
 877
 878static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
 879                                const void *key, unsigned mask)
 880{
 881        struct lu_fid  *fid = (struct lu_fid *)key;
 882        __u32      hash;
 883
 884        hash = fid_flatten32(fid);
 885        hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
 886        hash = cfs_hash_long(hash, hs->hs_bkt_bits);
 887
 888        /* give me another random factor */
 889        hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
 890
 891        hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
 892        hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
 893
 894        return hash & mask;
 895}
 896
 897static void *lu_obj_hop_object(struct hlist_node *hnode)
 898{
 899        return hlist_entry(hnode, struct lu_object_header, loh_hash);
 900}
 901
 902static void *lu_obj_hop_key(struct hlist_node *hnode)
 903{
 904        struct lu_object_header *h;
 905
 906        h = hlist_entry(hnode, struct lu_object_header, loh_hash);
 907        return &h->loh_fid;
 908}
 909
 910static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
 911{
 912        struct lu_object_header *h;
 913
 914        h = hlist_entry(hnode, struct lu_object_header, loh_hash);
 915        return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
 916}
 917
 918static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
 919{
 920        struct lu_object_header *h;
 921
 922        h = hlist_entry(hnode, struct lu_object_header, loh_hash);
 923        if (atomic_add_return(1, &h->loh_ref) == 1) {
 924                struct lu_site_bkt_data *bkt;
 925                cfs_hash_bd_t       bd;
 926
 927                cfs_hash_bd_get(hs, &h->loh_fid, &bd);
 928                bkt = cfs_hash_bd_extra_get(hs, &bd);
 929                bkt->lsb_busy++;
 930        }
 931}
 932
 933static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
 934{
 935        LBUG(); /* we should never called it */
 936}
 937
 938cfs_hash_ops_t lu_site_hash_ops = {
 939        .hs_hash        = lu_obj_hop_hash,
 940        .hs_key  = lu_obj_hop_key,
 941        .hs_keycmp      = lu_obj_hop_keycmp,
 942        .hs_object      = lu_obj_hop_object,
 943        .hs_get  = lu_obj_hop_get,
 944        .hs_put_locked  = lu_obj_hop_put_locked,
 945};
 946
 947void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
 948{
 949        spin_lock(&s->ls_ld_lock);
 950        if (list_empty(&d->ld_linkage))
 951                list_add(&d->ld_linkage, &s->ls_ld_linkage);
 952        spin_unlock(&s->ls_ld_lock);
 953}
 954EXPORT_SYMBOL(lu_dev_add_linkage);
 955
 956void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
 957{
 958        spin_lock(&s->ls_ld_lock);
 959        list_del_init(&d->ld_linkage);
 960        spin_unlock(&s->ls_ld_lock);
 961}
 962EXPORT_SYMBOL(lu_dev_del_linkage);
 963
 964/**
 965 * Initialize site \a s, with \a d as the top level device.
 966 */
 967#define LU_SITE_BITS_MIN    12
 968#define LU_SITE_BITS_MAX    24
 969/**
 970 * total 256 buckets, we don't want too many buckets because:
 971 * - consume too much memory
 972 * - avoid unbalanced LRU list
 973 */
 974#define LU_SITE_BKT_BITS    8
 975
 976int lu_site_init(struct lu_site *s, struct lu_device *top)
 977{
 978        struct lu_site_bkt_data *bkt;
 979        cfs_hash_bd_t bd;
 980        char name[16];
 981        int bits;
 982        int i;
 983        ENTRY;
 984
 985        memset(s, 0, sizeof *s);
 986        bits = lu_htable_order();
 987        snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
 988        for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
 989             bits >= LU_SITE_BITS_MIN; bits--) {
 990                s->ls_obj_hash = cfs_hash_create(name, bits, bits,
 991                                                 bits - LU_SITE_BKT_BITS,
 992                                                 sizeof(*bkt), 0, 0,
 993                                                 &lu_site_hash_ops,
 994                                                 CFS_HASH_SPIN_BKTLOCK |
 995                                                 CFS_HASH_NO_ITEMREF |
 996                                                 CFS_HASH_DEPTH |
 997                                                 CFS_HASH_ASSERT_EMPTY);
 998                if (s->ls_obj_hash != NULL)
 999                        break;
1000        }
1001
1002        if (s->ls_obj_hash == NULL) {
1003                CERROR("failed to create lu_site hash with bits: %d\n", bits);
1004                return -ENOMEM;
1005        }
1006
1007        cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1008                bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1009                INIT_LIST_HEAD(&bkt->lsb_lru);
1010                init_waitqueue_head(&bkt->lsb_marche_funebre);
1011        }
1012
1013        s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1014        if (s->ls_stats == NULL) {
1015                cfs_hash_putref(s->ls_obj_hash);
1016                s->ls_obj_hash = NULL;
1017                return -ENOMEM;
1018        }
1019
1020        lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1021                             0, "created", "created");
1022        lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1023                             0, "cache_hit", "cache_hit");
1024        lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1025                             0, "cache_miss", "cache_miss");
1026        lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1027                             0, "cache_race", "cache_race");
1028        lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1029                             0, "cache_death_race", "cache_death_race");
1030        lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1031                             0, "lru_purged", "lru_purged");
1032
1033        INIT_LIST_HEAD(&s->ls_linkage);
1034        s->ls_top_dev = top;
1035        top->ld_site = s;
1036        lu_device_get(top);
1037        lu_ref_add(&top->ld_reference, "site-top", s);
1038
1039        INIT_LIST_HEAD(&s->ls_ld_linkage);
1040        spin_lock_init(&s->ls_ld_lock);
1041
1042        lu_dev_add_linkage(s, top);
1043
1044        RETURN(0);
1045}
1046EXPORT_SYMBOL(lu_site_init);
1047
1048/**
1049 * Finalize \a s and release its resources.
1050 */
1051void lu_site_fini(struct lu_site *s)
1052{
1053        mutex_lock(&lu_sites_guard);
1054        list_del_init(&s->ls_linkage);
1055        mutex_unlock(&lu_sites_guard);
1056
1057        if (s->ls_obj_hash != NULL) {
1058                cfs_hash_putref(s->ls_obj_hash);
1059                s->ls_obj_hash = NULL;
1060        }
1061
1062        if (s->ls_top_dev != NULL) {
1063                s->ls_top_dev->ld_site = NULL;
1064                lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1065                lu_device_put(s->ls_top_dev);
1066                s->ls_top_dev = NULL;
1067        }
1068
1069        if (s->ls_stats != NULL)
1070                lprocfs_free_stats(&s->ls_stats);
1071}
1072EXPORT_SYMBOL(lu_site_fini);
1073
1074/**
1075 * Called when initialization of stack for this site is completed.
1076 */
1077int lu_site_init_finish(struct lu_site *s)
1078{
1079        int result;
1080        mutex_lock(&lu_sites_guard);
1081        result = lu_context_refill(&lu_shrink_env.le_ctx);
1082        if (result == 0)
1083                list_add(&s->ls_linkage, &lu_sites);
1084        mutex_unlock(&lu_sites_guard);
1085        return result;
1086}
1087EXPORT_SYMBOL(lu_site_init_finish);
1088
1089/**
1090 * Acquire additional reference on device \a d
1091 */
1092void lu_device_get(struct lu_device *d)
1093{
1094        atomic_inc(&d->ld_ref);
1095}
1096EXPORT_SYMBOL(lu_device_get);
1097
1098/**
1099 * Release reference on device \a d.
1100 */
1101void lu_device_put(struct lu_device *d)
1102{
1103        LASSERT(atomic_read(&d->ld_ref) > 0);
1104        atomic_dec(&d->ld_ref);
1105}
1106EXPORT_SYMBOL(lu_device_put);
1107
1108/**
1109 * Initialize device \a d of type \a t.
1110 */
1111int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1112{
1113        if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1114                t->ldt_ops->ldto_start(t);
1115        memset(d, 0, sizeof *d);
1116        atomic_set(&d->ld_ref, 0);
1117        d->ld_type = t;
1118        lu_ref_init(&d->ld_reference);
1119        INIT_LIST_HEAD(&d->ld_linkage);
1120        return 0;
1121}
1122EXPORT_SYMBOL(lu_device_init);
1123
1124/**
1125 * Finalize device \a d.
1126 */
1127void lu_device_fini(struct lu_device *d)
1128{
1129        struct lu_device_type *t;
1130
1131        t = d->ld_type;
1132        if (d->ld_obd != NULL) {
1133                d->ld_obd->obd_lu_dev = NULL;
1134                d->ld_obd = NULL;
1135        }
1136
1137        lu_ref_fini(&d->ld_reference);
1138        LASSERTF(atomic_read(&d->ld_ref) == 0,
1139                 "Refcount is %u\n", atomic_read(&d->ld_ref));
1140        LASSERT(t->ldt_device_nr > 0);
1141        if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1142                t->ldt_ops->ldto_stop(t);
1143}
1144EXPORT_SYMBOL(lu_device_fini);
1145
1146/**
1147 * Initialize object \a o that is part of compound object \a h and was created
1148 * by device \a d.
1149 */
1150int lu_object_init(struct lu_object *o,
1151                   struct lu_object_header *h, struct lu_device *d)
1152{
1153        memset(o, 0, sizeof *o);
1154        o->lo_header = h;
1155        o->lo_dev    = d;
1156        lu_device_get(d);
1157        o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1158        INIT_LIST_HEAD(&o->lo_linkage);
1159        return 0;
1160}
1161EXPORT_SYMBOL(lu_object_init);
1162
1163/**
1164 * Finalize object and release its resources.
1165 */
1166void lu_object_fini(struct lu_object *o)
1167{
1168        struct lu_device *dev = o->lo_dev;
1169
1170        LASSERT(list_empty(&o->lo_linkage));
1171
1172        if (dev != NULL) {
1173                lu_ref_del_at(&dev->ld_reference,
1174                              o->lo_dev_ref , "lu_object", o);
1175                lu_device_put(dev);
1176                o->lo_dev = NULL;
1177        }
1178}
1179EXPORT_SYMBOL(lu_object_fini);
1180
1181/**
1182 * Add object \a o as first layer of compound object \a h
1183 *
1184 * This is typically called by the ->ldo_object_alloc() method of top-level
1185 * device.
1186 */
1187void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1188{
1189        list_move(&o->lo_linkage, &h->loh_layers);
1190}
1191EXPORT_SYMBOL(lu_object_add_top);
1192
1193/**
1194 * Add object \a o as a layer of compound object, going after \a before.
1195 *
1196 * This is typically called by the ->ldo_object_alloc() method of \a
1197 * before->lo_dev.
1198 */
1199void lu_object_add(struct lu_object *before, struct lu_object *o)
1200{
1201        list_move(&o->lo_linkage, &before->lo_linkage);
1202}
1203EXPORT_SYMBOL(lu_object_add);
1204
1205/**
1206 * Initialize compound object.
1207 */
1208int lu_object_header_init(struct lu_object_header *h)
1209{
1210        memset(h, 0, sizeof *h);
1211        atomic_set(&h->loh_ref, 1);
1212        INIT_HLIST_NODE(&h->loh_hash);
1213        INIT_LIST_HEAD(&h->loh_lru);
1214        INIT_LIST_HEAD(&h->loh_layers);
1215        lu_ref_init(&h->loh_reference);
1216        return 0;
1217}
1218EXPORT_SYMBOL(lu_object_header_init);
1219
1220/**
1221 * Finalize compound object.
1222 */
1223void lu_object_header_fini(struct lu_object_header *h)
1224{
1225        LASSERT(list_empty(&h->loh_layers));
1226        LASSERT(list_empty(&h->loh_lru));
1227        LASSERT(hlist_unhashed(&h->loh_hash));
1228        lu_ref_fini(&h->loh_reference);
1229}
1230EXPORT_SYMBOL(lu_object_header_fini);
1231
1232/**
1233 * Given a compound object, find its slice, corresponding to the device type
1234 * \a dtype.
1235 */
1236struct lu_object *lu_object_locate(struct lu_object_header *h,
1237                                   const struct lu_device_type *dtype)
1238{
1239        struct lu_object *o;
1240
1241        list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1242                if (o->lo_dev->ld_type == dtype)
1243                        return o;
1244        }
1245        return NULL;
1246}
1247EXPORT_SYMBOL(lu_object_locate);
1248
1249
1250
1251/**
1252 * Finalize and free devices in the device stack.
1253 *
1254 * Finalize device stack by purging object cache, and calling
1255 * lu_device_type_operations::ldto_device_fini() and
1256 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1257 */
1258void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1259{
1260        struct lu_site   *site = top->ld_site;
1261        struct lu_device *scan;
1262        struct lu_device *next;
1263
1264        lu_site_purge(env, site, ~0);
1265        for (scan = top; scan != NULL; scan = next) {
1266                next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1267                lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1268                lu_device_put(scan);
1269        }
1270
1271        /* purge again. */
1272        lu_site_purge(env, site, ~0);
1273
1274        for (scan = top; scan != NULL; scan = next) {
1275                const struct lu_device_type *ldt = scan->ld_type;
1276                struct obd_type      *type;
1277
1278                next = ldt->ldt_ops->ldto_device_free(env, scan);
1279                type = ldt->ldt_obd_type;
1280                if (type != NULL) {
1281                        type->typ_refcnt--;
1282                        class_put_type(type);
1283                }
1284        }
1285}
1286EXPORT_SYMBOL(lu_stack_fini);
1287
1288enum {
1289        /**
1290         * Maximal number of tld slots.
1291         */
1292        LU_CONTEXT_KEY_NR = 40
1293};
1294
1295static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1296
1297static DEFINE_SPINLOCK(lu_keys_guard);
1298
1299/**
1300 * Global counter incremented whenever key is registered, unregistered,
1301 * revived or quiesced. This is used to void unnecessary calls to
1302 * lu_context_refill(). No locking is provided, as initialization and shutdown
1303 * are supposed to be externally serialized.
1304 */
1305static unsigned key_set_version = 0;
1306
1307/**
1308 * Register new key.
1309 */
1310int lu_context_key_register(struct lu_context_key *key)
1311{
1312        int result;
1313        int i;
1314
1315        LASSERT(key->lct_init != NULL);
1316        LASSERT(key->lct_fini != NULL);
1317        LASSERT(key->lct_tags != 0);
1318        LASSERT(key->lct_owner != NULL);
1319
1320        result = -ENFILE;
1321        spin_lock(&lu_keys_guard);
1322        for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1323                if (lu_keys[i] == NULL) {
1324                        key->lct_index = i;
1325                        atomic_set(&key->lct_used, 1);
1326                        lu_keys[i] = key;
1327                        lu_ref_init(&key->lct_reference);
1328                        result = 0;
1329                        ++key_set_version;
1330                        break;
1331                }
1332        }
1333        spin_unlock(&lu_keys_guard);
1334        return result;
1335}
1336EXPORT_SYMBOL(lu_context_key_register);
1337
1338static void key_fini(struct lu_context *ctx, int index)
1339{
1340        if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1341                struct lu_context_key *key;
1342
1343                key = lu_keys[index];
1344                LASSERT(key != NULL);
1345                LASSERT(key->lct_fini != NULL);
1346                LASSERT(atomic_read(&key->lct_used) > 1);
1347
1348                key->lct_fini(ctx, key, ctx->lc_value[index]);
1349                lu_ref_del(&key->lct_reference, "ctx", ctx);
1350                atomic_dec(&key->lct_used);
1351
1352                LASSERT(key->lct_owner != NULL);
1353                if ((ctx->lc_tags & LCT_NOREF) == 0) {
1354#ifdef CONFIG_MODULE_UNLOAD
1355                        LINVRNT(module_refcount(key->lct_owner) > 0);
1356#endif
1357                        module_put(key->lct_owner);
1358                }
1359                ctx->lc_value[index] = NULL;
1360        }
1361}
1362
1363/**
1364 * Deregister key.
1365 */
1366void lu_context_key_degister(struct lu_context_key *key)
1367{
1368        LASSERT(atomic_read(&key->lct_used) >= 1);
1369        LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1370
1371        lu_context_key_quiesce(key);
1372
1373        ++key_set_version;
1374        spin_lock(&lu_keys_guard);
1375        key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1376        if (lu_keys[key->lct_index]) {
1377                lu_keys[key->lct_index] = NULL;
1378                lu_ref_fini(&key->lct_reference);
1379        }
1380        spin_unlock(&lu_keys_guard);
1381
1382        LASSERTF(atomic_read(&key->lct_used) == 1,
1383                 "key has instances: %d\n",
1384                 atomic_read(&key->lct_used));
1385}
1386EXPORT_SYMBOL(lu_context_key_degister);
1387
1388/**
1389 * Register a number of keys. This has to be called after all keys have been
1390 * initialized by a call to LU_CONTEXT_KEY_INIT().
1391 */
1392int lu_context_key_register_many(struct lu_context_key *k, ...)
1393{
1394        struct lu_context_key *key = k;
1395        va_list args;
1396        int result;
1397
1398        va_start(args, k);
1399        do {
1400                result = lu_context_key_register(key);
1401                if (result)
1402                        break;
1403                key = va_arg(args, struct lu_context_key *);
1404        } while (key != NULL);
1405        va_end(args);
1406
1407        if (result != 0) {
1408                va_start(args, k);
1409                while (k != key) {
1410                        lu_context_key_degister(k);
1411                        k = va_arg(args, struct lu_context_key *);
1412                }
1413                va_end(args);
1414        }
1415
1416        return result;
1417}
1418EXPORT_SYMBOL(lu_context_key_register_many);
1419
1420/**
1421 * De-register a number of keys. This is a dual to
1422 * lu_context_key_register_many().
1423 */
1424void lu_context_key_degister_many(struct lu_context_key *k, ...)
1425{
1426        va_list args;
1427
1428        va_start(args, k);
1429        do {
1430                lu_context_key_degister(k);
1431                k = va_arg(args, struct lu_context_key*);
1432        } while (k != NULL);
1433        va_end(args);
1434}
1435EXPORT_SYMBOL(lu_context_key_degister_many);
1436
1437/**
1438 * Revive a number of keys.
1439 */
1440void lu_context_key_revive_many(struct lu_context_key *k, ...)
1441{
1442        va_list args;
1443
1444        va_start(args, k);
1445        do {
1446                lu_context_key_revive(k);
1447                k = va_arg(args, struct lu_context_key*);
1448        } while (k != NULL);
1449        va_end(args);
1450}
1451EXPORT_SYMBOL(lu_context_key_revive_many);
1452
1453/**
1454 * Quiescent a number of keys.
1455 */
1456void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1457{
1458        va_list args;
1459
1460        va_start(args, k);
1461        do {
1462                lu_context_key_quiesce(k);
1463                k = va_arg(args, struct lu_context_key*);
1464        } while (k != NULL);
1465        va_end(args);
1466}
1467EXPORT_SYMBOL(lu_context_key_quiesce_many);
1468
1469/**
1470 * Return value associated with key \a key in context \a ctx.
1471 */
1472void *lu_context_key_get(const struct lu_context *ctx,
1473                         const struct lu_context_key *key)
1474{
1475        LINVRNT(ctx->lc_state == LCS_ENTERED);
1476        LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1477        LASSERT(lu_keys[key->lct_index] == key);
1478        return ctx->lc_value[key->lct_index];
1479}
1480EXPORT_SYMBOL(lu_context_key_get);
1481
1482/**
1483 * List of remembered contexts. XXX document me.
1484 */
1485static LIST_HEAD(lu_context_remembered);
1486
1487/**
1488 * Destroy \a key in all remembered contexts. This is used to destroy key
1489 * values in "shared" contexts (like service threads), when a module owning
1490 * the key is about to be unloaded.
1491 */
1492void lu_context_key_quiesce(struct lu_context_key *key)
1493{
1494        struct lu_context *ctx;
1495
1496        if (!(key->lct_tags & LCT_QUIESCENT)) {
1497                /*
1498                 * XXX layering violation.
1499                 */
1500                key->lct_tags |= LCT_QUIESCENT;
1501                /*
1502                 * XXX memory barrier has to go here.
1503                 */
1504                spin_lock(&lu_keys_guard);
1505                list_for_each_entry(ctx, &lu_context_remembered,
1506                                        lc_remember)
1507                        key_fini(ctx, key->lct_index);
1508                spin_unlock(&lu_keys_guard);
1509                ++key_set_version;
1510        }
1511}
1512EXPORT_SYMBOL(lu_context_key_quiesce);
1513
1514void lu_context_key_revive(struct lu_context_key *key)
1515{
1516        key->lct_tags &= ~LCT_QUIESCENT;
1517        ++key_set_version;
1518}
1519EXPORT_SYMBOL(lu_context_key_revive);
1520
1521static void keys_fini(struct lu_context *ctx)
1522{
1523        int     i;
1524
1525        if (ctx->lc_value == NULL)
1526                return;
1527
1528        for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1529                key_fini(ctx, i);
1530
1531        OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1532        ctx->lc_value = NULL;
1533}
1534
1535static int keys_fill(struct lu_context *ctx)
1536{
1537        int i;
1538
1539        LINVRNT(ctx->lc_value != NULL);
1540        for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1541                struct lu_context_key *key;
1542
1543                key = lu_keys[i];
1544                if (ctx->lc_value[i] == NULL && key != NULL &&
1545                    (key->lct_tags & ctx->lc_tags) &&
1546                    /*
1547                     * Don't create values for a LCT_QUIESCENT key, as this
1548                     * will pin module owning a key.
1549                     */
1550                    !(key->lct_tags & LCT_QUIESCENT)) {
1551                        void *value;
1552
1553                        LINVRNT(key->lct_init != NULL);
1554                        LINVRNT(key->lct_index == i);
1555
1556                        value = key->lct_init(ctx, key);
1557                        if (unlikely(IS_ERR(value)))
1558                                return PTR_ERR(value);
1559
1560                        LASSERT(key->lct_owner != NULL);
1561                        if (!(ctx->lc_tags & LCT_NOREF))
1562                                try_module_get(key->lct_owner);
1563                        lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1564                        atomic_inc(&key->lct_used);
1565                        /*
1566                         * This is the only place in the code, where an
1567                         * element of ctx->lc_value[] array is set to non-NULL
1568                         * value.
1569                         */
1570                        ctx->lc_value[i] = value;
1571                        if (key->lct_exit != NULL)
1572                                ctx->lc_tags |= LCT_HAS_EXIT;
1573                }
1574                ctx->lc_version = key_set_version;
1575        }
1576        return 0;
1577}
1578
1579static int keys_init(struct lu_context *ctx)
1580{
1581        OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1582        if (likely(ctx->lc_value != NULL))
1583                return keys_fill(ctx);
1584
1585        return -ENOMEM;
1586}
1587
1588/**
1589 * Initialize context data-structure. Create values for all keys.
1590 */
1591int lu_context_init(struct lu_context *ctx, __u32 tags)
1592{
1593        int     rc;
1594
1595        memset(ctx, 0, sizeof *ctx);
1596        ctx->lc_state = LCS_INITIALIZED;
1597        ctx->lc_tags = tags;
1598        if (tags & LCT_REMEMBER) {
1599                spin_lock(&lu_keys_guard);
1600                list_add(&ctx->lc_remember, &lu_context_remembered);
1601                spin_unlock(&lu_keys_guard);
1602        } else {
1603                INIT_LIST_HEAD(&ctx->lc_remember);
1604        }
1605
1606        rc = keys_init(ctx);
1607        if (rc != 0)
1608                lu_context_fini(ctx);
1609
1610        return rc;
1611}
1612EXPORT_SYMBOL(lu_context_init);
1613
1614/**
1615 * Finalize context data-structure. Destroy key values.
1616 */
1617void lu_context_fini(struct lu_context *ctx)
1618{
1619        LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1620        ctx->lc_state = LCS_FINALIZED;
1621
1622        if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1623                LASSERT(list_empty(&ctx->lc_remember));
1624                keys_fini(ctx);
1625
1626        } else { /* could race with key degister */
1627                spin_lock(&lu_keys_guard);
1628                keys_fini(ctx);
1629                list_del_init(&ctx->lc_remember);
1630                spin_unlock(&lu_keys_guard);
1631        }
1632}
1633EXPORT_SYMBOL(lu_context_fini);
1634
1635/**
1636 * Called before entering context.
1637 */
1638void lu_context_enter(struct lu_context *ctx)
1639{
1640        LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1641        ctx->lc_state = LCS_ENTERED;
1642}
1643EXPORT_SYMBOL(lu_context_enter);
1644
1645/**
1646 * Called after exiting from \a ctx
1647 */
1648void lu_context_exit(struct lu_context *ctx)
1649{
1650        int i;
1651
1652        LINVRNT(ctx->lc_state == LCS_ENTERED);
1653        ctx->lc_state = LCS_LEFT;
1654        if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1655                for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1656                        if (ctx->lc_value[i] != NULL) {
1657                                struct lu_context_key *key;
1658
1659                                key = lu_keys[i];
1660                                LASSERT(key != NULL);
1661                                if (key->lct_exit != NULL)
1662                                        key->lct_exit(ctx,
1663                                                      key, ctx->lc_value[i]);
1664                        }
1665                }
1666        }
1667}
1668EXPORT_SYMBOL(lu_context_exit);
1669
1670/**
1671 * Allocate for context all missing keys that were registered after context
1672 * creation. key_set_version is only changed in rare cases when modules
1673 * are loaded and removed.
1674 */
1675int lu_context_refill(struct lu_context *ctx)
1676{
1677        return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1678}
1679EXPORT_SYMBOL(lu_context_refill);
1680
1681/**
1682 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1683 * obd being added. Currently, this is only used on client side, specifically
1684 * for echo device client, for other stack (like ptlrpc threads), context are
1685 * predefined when the lu_device type are registered, during the module probe
1686 * phase.
1687 */
1688__u32 lu_context_tags_default = 0;
1689__u32 lu_session_tags_default = 0;
1690
1691void lu_context_tags_update(__u32 tags)
1692{
1693        spin_lock(&lu_keys_guard);
1694        lu_context_tags_default |= tags;
1695        key_set_version++;
1696        spin_unlock(&lu_keys_guard);
1697}
1698EXPORT_SYMBOL(lu_context_tags_update);
1699
1700void lu_context_tags_clear(__u32 tags)
1701{
1702        spin_lock(&lu_keys_guard);
1703        lu_context_tags_default &= ~tags;
1704        key_set_version++;
1705        spin_unlock(&lu_keys_guard);
1706}
1707EXPORT_SYMBOL(lu_context_tags_clear);
1708
1709void lu_session_tags_update(__u32 tags)
1710{
1711        spin_lock(&lu_keys_guard);
1712        lu_session_tags_default |= tags;
1713        key_set_version++;
1714        spin_unlock(&lu_keys_guard);
1715}
1716EXPORT_SYMBOL(lu_session_tags_update);
1717
1718void lu_session_tags_clear(__u32 tags)
1719{
1720        spin_lock(&lu_keys_guard);
1721        lu_session_tags_default &= ~tags;
1722        key_set_version++;
1723        spin_unlock(&lu_keys_guard);
1724}
1725EXPORT_SYMBOL(lu_session_tags_clear);
1726
1727int lu_env_init(struct lu_env *env, __u32 tags)
1728{
1729        int result;
1730
1731        env->le_ses = NULL;
1732        result = lu_context_init(&env->le_ctx, tags);
1733        if (likely(result == 0))
1734                lu_context_enter(&env->le_ctx);
1735        return result;
1736}
1737EXPORT_SYMBOL(lu_env_init);
1738
1739void lu_env_fini(struct lu_env *env)
1740{
1741        lu_context_exit(&env->le_ctx);
1742        lu_context_fini(&env->le_ctx);
1743        env->le_ses = NULL;
1744}
1745EXPORT_SYMBOL(lu_env_fini);
1746
1747int lu_env_refill(struct lu_env *env)
1748{
1749        int result;
1750
1751        result = lu_context_refill(&env->le_ctx);
1752        if (result == 0 && env->le_ses != NULL)
1753                result = lu_context_refill(env->le_ses);
1754        return result;
1755}
1756EXPORT_SYMBOL(lu_env_refill);
1757
1758/**
1759 * Currently, this API will only be used by echo client.
1760 * Because echo client and normal lustre client will share
1761 * same cl_env cache. So echo client needs to refresh
1762 * the env context after it get one from the cache, especially
1763 * when normal client and echo client co-exist in the same client.
1764 */
1765int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1766                          __u32 stags)
1767{
1768        int    result;
1769
1770        if ((env->le_ctx.lc_tags & ctags) != ctags) {
1771                env->le_ctx.lc_version = 0;
1772                env->le_ctx.lc_tags |= ctags;
1773        }
1774
1775        if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1776                env->le_ses->lc_version = 0;
1777                env->le_ses->lc_tags |= stags;
1778        }
1779
1780        result = lu_env_refill(env);
1781
1782        return result;
1783}
1784EXPORT_SYMBOL(lu_env_refill_by_tags);
1785
1786static struct shrinker *lu_site_shrinker = NULL;
1787
1788typedef struct lu_site_stats{
1789        unsigned        lss_populated;
1790        unsigned        lss_max_search;
1791        unsigned        lss_total;
1792        unsigned        lss_busy;
1793} lu_site_stats_t;
1794
1795static void lu_site_stats_get(cfs_hash_t *hs,
1796                              lu_site_stats_t *stats, int populated)
1797{
1798        cfs_hash_bd_t bd;
1799        int        i;
1800
1801        cfs_hash_for_each_bucket(hs, &bd, i) {
1802                struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1803                struct hlist_head       *hhead;
1804
1805                cfs_hash_bd_lock(hs, &bd, 1);
1806                stats->lss_busy  += bkt->lsb_busy;
1807                stats->lss_total += cfs_hash_bd_count_get(&bd);
1808                stats->lss_max_search = max((int)stats->lss_max_search,
1809                                            cfs_hash_bd_depmax_get(&bd));
1810                if (!populated) {
1811                        cfs_hash_bd_unlock(hs, &bd, 1);
1812                        continue;
1813                }
1814
1815                cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1816                        if (!hlist_empty(hhead))
1817                                stats->lss_populated++;
1818                }
1819                cfs_hash_bd_unlock(hs, &bd, 1);
1820        }
1821}
1822
1823
1824/*
1825 * There exists a potential lock inversion deadlock scenario when using
1826 * Lustre on top of ZFS. This occurs between one of ZFS's
1827 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1828 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1829 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1830 * lock. Obviously neither thread will wake and drop their respective hold
1831 * on their lock.
1832 *
1833 * To prevent this from happening we must ensure the lu_sites_guard lock is
1834 * not taken while down this code path. ZFS reliably does not set the
1835 * __GFP_FS bit in its code paths, so this can be used to determine if it
1836 * is safe to take the lu_sites_guard lock.
1837 *
1838 * Ideally we should accurately return the remaining number of cached
1839 * objects without taking the  lu_sites_guard lock, but this is not
1840 * possible in the current implementation.
1841 */
1842static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1843{
1844        lu_site_stats_t stats;
1845        struct lu_site *s;
1846        struct lu_site *tmp;
1847        int cached = 0;
1848        int remain = shrink_param(sc, nr_to_scan);
1849        LIST_HEAD(splice);
1850
1851        if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
1852                if (remain != 0)
1853                        return -1;
1854                else
1855                        /* We must not take the lu_sites_guard lock when
1856                         * __GFP_FS is *not* set because of the deadlock
1857                         * possibility detailed above. Additionally,
1858                         * since we cannot determine the number of
1859                         * objects in the cache without taking this
1860                         * lock, we're in a particularly tough spot. As
1861                         * a result, we'll just lie and say our cache is
1862                         * empty. This _should_ be ok, as we can't
1863                         * reclaim objects when __GFP_FS is *not* set
1864                         * anyways.
1865                         */
1866                        return 0;
1867        }
1868
1869        CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1870
1871        mutex_lock(&lu_sites_guard);
1872        list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1873                if (shrink_param(sc, nr_to_scan) != 0) {
1874                        remain = lu_site_purge(&lu_shrink_env, s, remain);
1875                        /*
1876                         * Move just shrunk site to the tail of site list to
1877                         * assure shrinking fairness.
1878                         */
1879                        list_move_tail(&s->ls_linkage, &splice);
1880                }
1881
1882                memset(&stats, 0, sizeof(stats));
1883                lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1884                cached += stats.lss_total - stats.lss_busy;
1885                if (shrink_param(sc, nr_to_scan) && remain <= 0)
1886                        break;
1887        }
1888        list_splice(&splice, lu_sites.prev);
1889        mutex_unlock(&lu_sites_guard);
1890
1891        cached = (cached / 100) * sysctl_vfs_cache_pressure;
1892        if (shrink_param(sc, nr_to_scan) == 0)
1893                CDEBUG(D_INODE, "%d objects cached\n", cached);
1894        return cached;
1895}
1896
1897/*
1898 * Debugging stuff.
1899 */
1900
1901/**
1902 * Environment to be used in debugger, contains all tags.
1903 */
1904struct lu_env lu_debugging_env;
1905
1906/**
1907 * Debugging printer function using printk().
1908 */
1909int lu_printk_printer(const struct lu_env *env,
1910                      void *unused, const char *format, ...)
1911{
1912        va_list args;
1913
1914        va_start(args, format);
1915        vprintk(format, args);
1916        va_end(args);
1917        return 0;
1918}
1919
1920/**
1921 * Initialization of global lu_* data.
1922 */
1923int lu_global_init(void)
1924{
1925        int result;
1926
1927        CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1928
1929        result = lu_ref_global_init();
1930        if (result != 0)
1931                return result;
1932
1933        LU_CONTEXT_KEY_INIT(&lu_global_key);
1934        result = lu_context_key_register(&lu_global_key);
1935        if (result != 0)
1936                return result;
1937
1938        /*
1939         * At this level, we don't know what tags are needed, so allocate them
1940         * conservatively. This should not be too bad, because this
1941         * environment is global.
1942         */
1943        mutex_lock(&lu_sites_guard);
1944        result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1945        mutex_unlock(&lu_sites_guard);
1946        if (result != 0)
1947                return result;
1948
1949        /*
1950         * seeks estimation: 3 seeks to read a record from oi, one to read
1951         * inode, one for ea. Unfortunately setting this high value results in
1952         * lu_object/inode cache consuming all the memory.
1953         */
1954        lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
1955        if (lu_site_shrinker == NULL)
1956                return -ENOMEM;
1957
1958        return result;
1959}
1960
1961/**
1962 * Dual to lu_global_init().
1963 */
1964void lu_global_fini(void)
1965{
1966        if (lu_site_shrinker != NULL) {
1967                remove_shrinker(lu_site_shrinker);
1968                lu_site_shrinker = NULL;
1969        }
1970
1971        lu_context_key_degister(&lu_global_key);
1972
1973        /*
1974         * Tear shrinker environment down _after_ de-registering
1975         * lu_global_key, because the latter has a value in the former.
1976         */
1977        mutex_lock(&lu_sites_guard);
1978        lu_env_fini(&lu_shrink_env);
1979        mutex_unlock(&lu_sites_guard);
1980
1981        lu_ref_global_fini();
1982}
1983
1984static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1985{
1986#ifdef LPROCFS
1987        struct lprocfs_counter ret;
1988
1989        lprocfs_stats_collect(stats, idx, &ret);
1990        return (__u32)ret.lc_count;
1991#else
1992        return 0;
1993#endif
1994}
1995
1996/**
1997 * Output site statistical counters into a buffer. Suitable for
1998 * lprocfs_rd_*()-style functions.
1999 */
2000int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
2001{
2002        lu_site_stats_t stats;
2003
2004        memset(&stats, 0, sizeof(stats));
2005        lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2006
2007        return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2008                        stats.lss_busy,
2009                        stats.lss_total,
2010                        stats.lss_populated,
2011                        CFS_HASH_NHLIST(s->ls_obj_hash),
2012                        stats.lss_max_search,
2013                        ls_stats_read(s->ls_stats, LU_SS_CREATED),
2014                        ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2015                        ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2016                        ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2017                        ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2018                        ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2019}
2020EXPORT_SYMBOL(lu_site_stats_print);
2021
2022/**
2023 * Helper function to initialize a number of kmem slab caches at once.
2024 */
2025int lu_kmem_init(struct lu_kmem_descr *caches)
2026{
2027        int result;
2028        struct lu_kmem_descr *iter = caches;
2029
2030        for (result = 0; iter->ckd_cache != NULL; ++iter) {
2031                *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
2032                                                        iter->ckd_size,
2033                                                        0, 0, NULL);
2034                if (*iter->ckd_cache == NULL) {
2035                        result = -ENOMEM;
2036                        /* free all previously allocated caches */
2037                        lu_kmem_fini(caches);
2038                        break;
2039                }
2040        }
2041        return result;
2042}
2043EXPORT_SYMBOL(lu_kmem_init);
2044
2045/**
2046 * Helper function to finalize a number of kmem slab cached at once. Dual to
2047 * lu_kmem_init().
2048 */
2049void lu_kmem_fini(struct lu_kmem_descr *caches)
2050{
2051        for (; caches->ckd_cache != NULL; ++caches) {
2052                if (*caches->ckd_cache != NULL) {
2053                        kmem_cache_destroy(*caches->ckd_cache);
2054                        *caches->ckd_cache = NULL;
2055                }
2056        }
2057}
2058EXPORT_SYMBOL(lu_kmem_fini);
2059
2060/**
2061 * Temporary solution to be able to assign fid in ->do_create()
2062 * till we have fully-functional OST fids
2063 */
2064void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2065                          const struct lu_fid *fid)
2066{
2067        struct lu_site          *s = o->lo_dev->ld_site;
2068        struct lu_fid           *old = &o->lo_header->loh_fid;
2069        struct lu_site_bkt_data *bkt;
2070        struct lu_object        *shadow;
2071        wait_queue_t             waiter;
2072        cfs_hash_t              *hs;
2073        cfs_hash_bd_t            bd;
2074        __u64                    version = 0;
2075
2076        LASSERT(fid_is_zero(old));
2077
2078        hs = s->ls_obj_hash;
2079        cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2080        shadow = htable_lookup(s, &bd, fid, &waiter, &version);
2081        /* supposed to be unique */
2082        LASSERT(shadow == NULL);
2083        *old = *fid;
2084        bkt = cfs_hash_bd_extra_get(hs, &bd);
2085        cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2086        bkt->lsb_busy++;
2087        cfs_hash_bd_unlock(hs, &bd, 1);
2088}
2089EXPORT_SYMBOL(lu_object_assign_fid);
2090
2091/**
2092 * allocates object with 0 (non-assiged) fid
2093 * XXX: temporary solution to be able to assign fid in ->do_create()
2094 *      till we have fully-functional OST fids
2095 */
2096struct lu_object *lu_object_anon(const struct lu_env *env,
2097                                 struct lu_device *dev,
2098                                 const struct lu_object_conf *conf)
2099{
2100        struct lu_fid     fid;
2101        struct lu_object *o;
2102
2103        fid_zero(&fid);
2104        o = lu_object_alloc(env, dev, &fid, conf);
2105
2106        return o;
2107}
2108EXPORT_SYMBOL(lu_object_anon);
2109
2110struct lu_buf LU_BUF_NULL = {
2111        .lb_buf = NULL,
2112        .lb_len = 0
2113};
2114EXPORT_SYMBOL(LU_BUF_NULL);
2115
2116void lu_buf_free(struct lu_buf *buf)
2117{
2118        LASSERT(buf);
2119        if (buf->lb_buf) {
2120                LASSERT(buf->lb_len > 0);
2121                OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2122                buf->lb_buf = NULL;
2123                buf->lb_len = 0;
2124        }
2125}
2126EXPORT_SYMBOL(lu_buf_free);
2127
2128void lu_buf_alloc(struct lu_buf *buf, int size)
2129{
2130        LASSERT(buf);
2131        LASSERT(buf->lb_buf == NULL);
2132        LASSERT(buf->lb_len == 0);
2133        OBD_ALLOC_LARGE(buf->lb_buf, size);
2134        if (likely(buf->lb_buf))
2135                buf->lb_len = size;
2136}
2137EXPORT_SYMBOL(lu_buf_alloc);
2138
2139void lu_buf_realloc(struct lu_buf *buf, int size)
2140{
2141        lu_buf_free(buf);
2142        lu_buf_alloc(buf, size);
2143}
2144EXPORT_SYMBOL(lu_buf_realloc);
2145
2146struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len)
2147{
2148        if (buf->lb_buf == NULL && buf->lb_len == 0)
2149                lu_buf_alloc(buf, len);
2150
2151        if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2152                lu_buf_realloc(buf, len);
2153
2154        return buf;
2155}
2156EXPORT_SYMBOL(lu_buf_check_and_alloc);
2157
2158/**
2159 * Increase the size of the \a buf.
2160 * preserves old data in buffer
2161 * old buffer remains unchanged on error
2162 * \retval 0 or -ENOMEM
2163 */
2164int lu_buf_check_and_grow(struct lu_buf *buf, int len)
2165{
2166        char *ptr;
2167
2168        if (len <= buf->lb_len)
2169                return 0;
2170
2171        OBD_ALLOC_LARGE(ptr, len);
2172        if (ptr == NULL)
2173                return -ENOMEM;
2174
2175        /* Free the old buf */
2176        if (buf->lb_buf != NULL) {
2177                memcpy(ptr, buf->lb_buf, buf->lb_len);
2178                OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2179        }
2180
2181        buf->lb_buf = ptr;
2182        buf->lb_len = len;
2183        return 0;
2184}
2185EXPORT_SYMBOL(lu_buf_check_and_grow);
2186