linux/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.gnu.org/licenses/gpl-2.0.html
  19 *
  20 * GPL HEADER END
  21 */
  22/*
  23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  24 * Use is subject to license terms.
  25 *
  26 * Copyright (c) 2010, 2012, Intel Corporation.
  27 */
  28/*
  29 * This file is part of Lustre, http://www.lustre.org/
  30 * Lustre is a trademark of Sun Microsystems, Inc.
  31 *
  32 * lustre/ldlm/ldlm_extent.c
  33 *
  34 * Author: Peter Braam <braam@clusterfs.com>
  35 * Author: Phil Schwan <phil@clusterfs.com>
  36 */
  37
  38/**
  39 * This file contains implementation of EXTENT lock type
  40 *
  41 * EXTENT lock type is for locking a contiguous range of values, represented
  42 * by 64-bit starting and ending offsets (inclusive). There are several extent
  43 * lock modes, some of which may be mutually incompatible. Extent locks are
  44 * considered incompatible if their modes are incompatible and their extents
  45 * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
  46 */
  47
  48#define DEBUG_SUBSYSTEM S_LDLM
  49#include "../../include/linux/libcfs/libcfs.h"
  50#include "../include/lustre_dlm.h"
  51#include "../include/obd_support.h"
  52#include "../include/obd.h"
  53#include "../include/obd_class.h"
  54#include "../include/lustre_lib.h"
  55#include "ldlm_internal.h"
  56
  57/* When a lock is cancelled by a client, the KMS may undergo change if this
  58 * is the "highest lock".  This function returns the new KMS value.
  59 * Caller must hold lr_lock already.
  60 *
  61 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes!
  62 */
  63__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
  64{
  65        struct ldlm_resource *res = lock->l_resource;
  66        struct list_head *tmp;
  67        struct ldlm_lock *lck;
  68        __u64 kms = 0;
  69
  70        /* don't let another thread in ldlm_extent_shift_kms race in
  71         * just after we finish and take our lock into account in its
  72         * calculation of the kms
  73         */
  74        ldlm_set_kms_ignore(lock);
  75
  76        list_for_each(tmp, &res->lr_granted) {
  77                lck = list_entry(tmp, struct ldlm_lock, l_res_link);
  78
  79                if (ldlm_is_kms_ignore(lck))
  80                        continue;
  81
  82                if (lck->l_policy_data.l_extent.end >= old_kms)
  83                        return old_kms;
  84
  85                /* This extent _has_ to be smaller than old_kms (checked above)
  86                 * so kms can only ever be smaller or the same as old_kms.
  87                 */
  88                if (lck->l_policy_data.l_extent.end + 1 > kms)
  89                        kms = lck->l_policy_data.l_extent.end + 1;
  90        }
  91        LASSERTF(kms <= old_kms, "kms %llu old_kms %llu\n", kms, old_kms);
  92
  93        return kms;
  94}
  95EXPORT_SYMBOL(ldlm_extent_shift_kms);
  96
  97struct kmem_cache *ldlm_interval_slab;
  98
  99/* interval tree, for LDLM_EXTENT. */
 100static void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l)
 101{
 102        LASSERT(!l->l_tree_node);
 103        LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
 104
 105        list_add_tail(&l->l_sl_policy, &n->li_group);
 106        l->l_tree_node = n;
 107}
 108
 109struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
 110{
 111        struct ldlm_interval *node;
 112
 113        LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
 114        node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS);
 115        if (!node)
 116                return NULL;
 117
 118        INIT_LIST_HEAD(&node->li_group);
 119        ldlm_interval_attach(node, lock);
 120        return node;
 121}
 122
 123void ldlm_interval_free(struct ldlm_interval *node)
 124{
 125        if (node) {
 126                LASSERT(list_empty(&node->li_group));
 127                LASSERT(!interval_is_intree(&node->li_node));
 128                kmem_cache_free(ldlm_interval_slab, node);
 129        }
 130}
 131
 132struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
 133{
 134        struct ldlm_interval *n = l->l_tree_node;
 135
 136        if (!n)
 137                return NULL;
 138
 139        LASSERT(!list_empty(&n->li_group));
 140        l->l_tree_node = NULL;
 141        list_del_init(&l->l_sl_policy);
 142
 143        return list_empty(&n->li_group) ? n : NULL;
 144}
 145
 146static inline int lock_mode_to_index(enum ldlm_mode mode)
 147{
 148        int index;
 149
 150        LASSERT(mode != 0);
 151        LASSERT(is_power_of_2(mode));
 152        for (index = -1; mode; index++)
 153                mode >>= 1;
 154        LASSERT(index < LCK_MODE_NUM);
 155        return index;
 156}
 157
 158/** Add newly granted lock into interval tree for the resource. */
 159void ldlm_extent_add_lock(struct ldlm_resource *res,
 160                          struct ldlm_lock *lock)
 161{
 162        struct interval_node *found, **root;
 163        struct ldlm_interval *node;
 164        struct ldlm_extent *extent;
 165        int idx, rc;
 166
 167        LASSERT(lock->l_granted_mode == lock->l_req_mode);
 168
 169        node = lock->l_tree_node;
 170        LASSERT(node);
 171        LASSERT(!interval_is_intree(&node->li_node));
 172
 173        idx = lock_mode_to_index(lock->l_granted_mode);
 174        LASSERT(lock->l_granted_mode == 1 << idx);
 175        LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
 176
 177        /* node extent initialize */
 178        extent = &lock->l_policy_data.l_extent;
 179        rc = interval_set(&node->li_node, extent->start, extent->end);
 180        LASSERT(!rc);
 181
 182        root = &res->lr_itree[idx].lit_root;
 183        found = interval_insert(&node->li_node, root);
 184        if (found) { /* The policy group found. */
 185                struct ldlm_interval *tmp;
 186
 187                tmp = ldlm_interval_detach(lock);
 188                ldlm_interval_free(tmp);
 189                ldlm_interval_attach(to_ldlm_interval(found), lock);
 190        }
 191        res->lr_itree[idx].lit_size++;
 192
 193        /* even though we use interval tree to manage the extent lock, we also
 194         * add the locks into grant list, for debug purpose, ..
 195         */
 196        ldlm_resource_add_lock(res, &res->lr_granted, lock);
 197
 198        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
 199                struct ldlm_lock *lck;
 200
 201                list_for_each_entry_reverse(lck, &res->lr_granted,
 202                                            l_res_link) {
 203                        if (lck == lock)
 204                                continue;
 205                        if (lockmode_compat(lck->l_granted_mode,
 206                                            lock->l_granted_mode))
 207                                continue;
 208                        if (ldlm_extent_overlap(&lck->l_req_extent,
 209                                                &lock->l_req_extent)) {
 210                                CDEBUG(D_ERROR, "granting conflicting lock %p %p\n",
 211                                       lck, lock);
 212                                ldlm_resource_dump(D_ERROR, res);
 213                                LBUG();
 214                        }
 215                }
 216        }
 217}
 218
 219/** Remove cancelled lock from resource interval tree. */
 220void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
 221{
 222        struct ldlm_resource *res = lock->l_resource;
 223        struct ldlm_interval *node = lock->l_tree_node;
 224        struct ldlm_interval_tree *tree;
 225        int idx;
 226
 227        if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
 228                return;
 229
 230        idx = lock_mode_to_index(lock->l_granted_mode);
 231        LASSERT(lock->l_granted_mode == 1 << idx);
 232        tree = &res->lr_itree[idx];
 233
 234        LASSERT(tree->lit_root); /* assure the tree is not null */
 235
 236        tree->lit_size--;
 237        node = ldlm_interval_detach(lock);
 238        if (node) {
 239                interval_erase(&node->li_node, &tree->lit_root);
 240                ldlm_interval_free(node);
 241        }
 242}
 243
 244void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
 245                                      union ldlm_policy_data *lpolicy)
 246{
 247        lpolicy->l_extent.start = wpolicy->l_extent.start;
 248        lpolicy->l_extent.end = wpolicy->l_extent.end;
 249        lpolicy->l_extent.gid = wpolicy->l_extent.gid;
 250}
 251
 252void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
 253                                      union ldlm_wire_policy_data *wpolicy)
 254{
 255        memset(wpolicy, 0, sizeof(*wpolicy));
 256        wpolicy->l_extent.start = lpolicy->l_extent.start;
 257        wpolicy->l_extent.end = lpolicy->l_extent.end;
 258        wpolicy->l_extent.gid = lpolicy->l_extent.gid;
 259}
 260