linux/fs/ocfs2/dlm/dlmmaster.c
<<
>>
Prefs
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * dlmmod.c
   5 *
   6 * standalone DLM module
   7 *
   8 * Copyright (C) 2004 Oracle.  All rights reserved.
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public
  12 * License as published by the Free Software Foundation; either
  13 * version 2 of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public
  21 * License along with this program; if not, write to the
  22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23 * Boston, MA 021110-1307, USA.
  24 *
  25 */
  26
  27
  28#include <linux/module.h>
  29#include <linux/fs.h>
  30#include <linux/types.h>
  31#include <linux/slab.h>
  32#include <linux/highmem.h>
  33#include <linux/init.h>
  34#include <linux/sysctl.h>
  35#include <linux/random.h>
  36#include <linux/blkdev.h>
  37#include <linux/socket.h>
  38#include <linux/inet.h>
  39#include <linux/spinlock.h>
  40#include <linux/delay.h>
  41
  42
  43#include "cluster/heartbeat.h"
  44#include "cluster/nodemanager.h"
  45#include "cluster/tcp.h"
  46
  47#include "dlmapi.h"
  48#include "dlmcommon.h"
  49#include "dlmdomain.h"
  50#include "dlmdebug.h"
  51
  52#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
  53#include "cluster/masklog.h"
  54
  55static void dlm_mle_node_down(struct dlm_ctxt *dlm,
  56                              struct dlm_master_list_entry *mle,
  57                              struct o2nm_node *node,
  58                              int idx);
  59static void dlm_mle_node_up(struct dlm_ctxt *dlm,
  60                            struct dlm_master_list_entry *mle,
  61                            struct o2nm_node *node,
  62                            int idx);
  63
  64static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
  65static int dlm_do_assert_master(struct dlm_ctxt *dlm,
  66                                struct dlm_lock_resource *res,
  67                                void *nodemap, u32 flags);
  68static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
  69
  70static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
  71                                struct dlm_master_list_entry *mle,
  72                                const char *name,
  73                                unsigned int namelen)
  74{
  75        if (dlm != mle->dlm)
  76                return 0;
  77
  78        if (namelen != mle->mnamelen ||
  79            memcmp(name, mle->mname, namelen) != 0)
  80                return 0;
  81
  82        return 1;
  83}
  84
  85static struct kmem_cache *dlm_lockres_cache = NULL;
  86static struct kmem_cache *dlm_lockname_cache = NULL;
  87static struct kmem_cache *dlm_mle_cache = NULL;
  88
  89static void dlm_mle_release(struct kref *kref);
  90static void dlm_init_mle(struct dlm_master_list_entry *mle,
  91                        enum dlm_mle_type type,
  92                        struct dlm_ctxt *dlm,
  93                        struct dlm_lock_resource *res,
  94                        const char *name,
  95                        unsigned int namelen);
  96static void dlm_put_mle(struct dlm_master_list_entry *mle);
  97static void __dlm_put_mle(struct dlm_master_list_entry *mle);
  98static int dlm_find_mle(struct dlm_ctxt *dlm,
  99                        struct dlm_master_list_entry **mle,
 100                        char *name, unsigned int namelen);
 101
 102static int dlm_do_master_request(struct dlm_lock_resource *res,
 103                                 struct dlm_master_list_entry *mle, int to);
 104
 105
 106static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
 107                                     struct dlm_lock_resource *res,
 108                                     struct dlm_master_list_entry *mle,
 109                                     int *blocked);
 110static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
 111                                    struct dlm_lock_resource *res,
 112                                    struct dlm_master_list_entry *mle,
 113                                    int blocked);
 114static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
 115                                 struct dlm_lock_resource *res,
 116                                 struct dlm_master_list_entry *mle,
 117                                 struct dlm_master_list_entry **oldmle,
 118                                 const char *name, unsigned int namelen,
 119                                 u8 new_master, u8 master);
 120
 121static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
 122                                    struct dlm_lock_resource *res);
 123static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
 124                                      struct dlm_lock_resource *res);
 125static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
 126                                       struct dlm_lock_resource *res,
 127                                       u8 target);
 128static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
 129                                       struct dlm_lock_resource *res);
 130
 131
 132int dlm_is_host_down(int errno)
 133{
 134        switch (errno) {
 135                case -EBADF:
 136                case -ECONNREFUSED:
 137                case -ENOTCONN:
 138                case -ECONNRESET:
 139                case -EPIPE:
 140                case -EHOSTDOWN:
 141                case -EHOSTUNREACH:
 142                case -ETIMEDOUT:
 143                case -ECONNABORTED:
 144                case -ENETDOWN:
 145                case -ENETUNREACH:
 146                case -ENETRESET:
 147                case -ESHUTDOWN:
 148                case -ENOPROTOOPT:
 149                case -EINVAL:   /* if returned from our tcp code,
 150                                   this means there is no socket */
 151                        return 1;
 152        }
 153        return 0;
 154}
 155
 156
 157/*
 158 * MASTER LIST FUNCTIONS
 159 */
 160
 161
 162/*
 163 * regarding master list entries and heartbeat callbacks:
 164 *
 165 * in order to avoid sleeping and allocation that occurs in
 166 * heartbeat, master list entries are simply attached to the
 167 * dlm's established heartbeat callbacks.  the mle is attached
 168 * when it is created, and since the dlm->spinlock is held at
 169 * that time, any heartbeat event will be properly discovered
 170 * by the mle.  the mle needs to be detached from the
 171 * dlm->mle_hb_events list as soon as heartbeat events are no
 172 * longer useful to the mle, and before the mle is freed.
 173 *
 174 * as a general rule, heartbeat events are no longer needed by
 175 * the mle once an "answer" regarding the lock master has been
 176 * received.
 177 */
 178static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
 179                                              struct dlm_master_list_entry *mle)
 180{
 181        assert_spin_locked(&dlm->spinlock);
 182
 183        list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
 184}
 185
 186
 187static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
 188                                              struct dlm_master_list_entry *mle)
 189{
 190        if (!list_empty(&mle->hb_events))
 191                list_del_init(&mle->hb_events);
 192}
 193
 194
 195static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
 196                                            struct dlm_master_list_entry *mle)
 197{
 198        spin_lock(&dlm->spinlock);
 199        __dlm_mle_detach_hb_events(dlm, mle);
 200        spin_unlock(&dlm->spinlock);
 201}
 202
 203static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
 204{
 205        struct dlm_ctxt *dlm;
 206        dlm = mle->dlm;
 207
 208        assert_spin_locked(&dlm->spinlock);
 209        assert_spin_locked(&dlm->master_lock);
 210        mle->inuse++;
 211        kref_get(&mle->mle_refs);
 212}
 213
 214static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
 215{
 216        struct dlm_ctxt *dlm;
 217        dlm = mle->dlm;
 218
 219        spin_lock(&dlm->spinlock);
 220        spin_lock(&dlm->master_lock);
 221        mle->inuse--;
 222        __dlm_put_mle(mle);
 223        spin_unlock(&dlm->master_lock);
 224        spin_unlock(&dlm->spinlock);
 225
 226}
 227
 228/* remove from list and free */
 229static void __dlm_put_mle(struct dlm_master_list_entry *mle)
 230{
 231        struct dlm_ctxt *dlm;
 232        dlm = mle->dlm;
 233
 234        assert_spin_locked(&dlm->spinlock);
 235        assert_spin_locked(&dlm->master_lock);
 236        if (!atomic_read(&mle->mle_refs.refcount)) {
 237                /* this may or may not crash, but who cares.
 238                 * it's a BUG. */
 239                mlog(ML_ERROR, "bad mle: %p\n", mle);
 240                dlm_print_one_mle(mle);
 241                BUG();
 242        } else
 243                kref_put(&mle->mle_refs, dlm_mle_release);
 244}
 245
 246
 247/* must not have any spinlocks coming in */
 248static void dlm_put_mle(struct dlm_master_list_entry *mle)
 249{
 250        struct dlm_ctxt *dlm;
 251        dlm = mle->dlm;
 252
 253        spin_lock(&dlm->spinlock);
 254        spin_lock(&dlm->master_lock);
 255        __dlm_put_mle(mle);
 256        spin_unlock(&dlm->master_lock);
 257        spin_unlock(&dlm->spinlock);
 258}
 259
 260static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
 261{
 262        kref_get(&mle->mle_refs);
 263}
 264
 265static void dlm_init_mle(struct dlm_master_list_entry *mle,
 266                        enum dlm_mle_type type,
 267                        struct dlm_ctxt *dlm,
 268                        struct dlm_lock_resource *res,
 269                        const char *name,
 270                        unsigned int namelen)
 271{
 272        assert_spin_locked(&dlm->spinlock);
 273
 274        mle->dlm = dlm;
 275        mle->type = type;
 276        INIT_HLIST_NODE(&mle->master_hash_node);
 277        INIT_LIST_HEAD(&mle->hb_events);
 278        memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
 279        spin_lock_init(&mle->spinlock);
 280        init_waitqueue_head(&mle->wq);
 281        atomic_set(&mle->woken, 0);
 282        kref_init(&mle->mle_refs);
 283        memset(mle->response_map, 0, sizeof(mle->response_map));
 284        mle->master = O2NM_MAX_NODES;
 285        mle->new_master = O2NM_MAX_NODES;
 286        mle->inuse = 0;
 287
 288        BUG_ON(mle->type != DLM_MLE_BLOCK &&
 289               mle->type != DLM_MLE_MASTER &&
 290               mle->type != DLM_MLE_MIGRATION);
 291
 292        if (mle->type == DLM_MLE_MASTER) {
 293                BUG_ON(!res);
 294                mle->mleres = res;
 295                memcpy(mle->mname, res->lockname.name, res->lockname.len);
 296                mle->mnamelen = res->lockname.len;
 297                mle->mnamehash = res->lockname.hash;
 298        } else {
 299                BUG_ON(!name);
 300                mle->mleres = NULL;
 301                memcpy(mle->mname, name, namelen);
 302                mle->mnamelen = namelen;
 303                mle->mnamehash = dlm_lockid_hash(name, namelen);
 304        }
 305
 306        atomic_inc(&dlm->mle_tot_count[mle->type]);
 307        atomic_inc(&dlm->mle_cur_count[mle->type]);
 308
 309        /* copy off the node_map and register hb callbacks on our copy */
 310        memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
 311        memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
 312        clear_bit(dlm->node_num, mle->vote_map);
 313        clear_bit(dlm->node_num, mle->node_map);
 314
 315        /* attach the mle to the domain node up/down events */
 316        __dlm_mle_attach_hb_events(dlm, mle);
 317}
 318
 319void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 320{
 321        assert_spin_locked(&dlm->spinlock);
 322        assert_spin_locked(&dlm->master_lock);
 323
 324        if (!hlist_unhashed(&mle->master_hash_node))
 325                hlist_del_init(&mle->master_hash_node);
 326}
 327
 328void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 329{
 330        struct hlist_head *bucket;
 331
 332        assert_spin_locked(&dlm->master_lock);
 333
 334        bucket = dlm_master_hash(dlm, mle->mnamehash);
 335        hlist_add_head(&mle->master_hash_node, bucket);
 336}
 337
 338/* returns 1 if found, 0 if not */
 339static int dlm_find_mle(struct dlm_ctxt *dlm,
 340                        struct dlm_master_list_entry **mle,
 341                        char *name, unsigned int namelen)
 342{
 343        struct dlm_master_list_entry *tmpmle;
 344        struct hlist_head *bucket;
 345        unsigned int hash;
 346
 347        assert_spin_locked(&dlm->master_lock);
 348
 349        hash = dlm_lockid_hash(name, namelen);
 350        bucket = dlm_master_hash(dlm, hash);
 351        hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
 352                if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
 353                        continue;
 354                dlm_get_mle(tmpmle);
 355                *mle = tmpmle;
 356                return 1;
 357        }
 358        return 0;
 359}
 360
 361void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
 362{
 363        struct dlm_master_list_entry *mle;
 364
 365        assert_spin_locked(&dlm->spinlock);
 366
 367        list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
 368                if (node_up)
 369                        dlm_mle_node_up(dlm, mle, NULL, idx);
 370                else
 371                        dlm_mle_node_down(dlm, mle, NULL, idx);
 372        }
 373}
 374
 375static void dlm_mle_node_down(struct dlm_ctxt *dlm,
 376                              struct dlm_master_list_entry *mle,
 377                              struct o2nm_node *node, int idx)
 378{
 379        spin_lock(&mle->spinlock);
 380
 381        if (!test_bit(idx, mle->node_map))
 382                mlog(0, "node %u already removed from nodemap!\n", idx);
 383        else
 384                clear_bit(idx, mle->node_map);
 385
 386        spin_unlock(&mle->spinlock);
 387}
 388
 389static void dlm_mle_node_up(struct dlm_ctxt *dlm,
 390                            struct dlm_master_list_entry *mle,
 391                            struct o2nm_node *node, int idx)
 392{
 393        spin_lock(&mle->spinlock);
 394
 395        if (test_bit(idx, mle->node_map))
 396                mlog(0, "node %u already in node map!\n", idx);
 397        else
 398                set_bit(idx, mle->node_map);
 399
 400        spin_unlock(&mle->spinlock);
 401}
 402
 403
 404int dlm_init_mle_cache(void)
 405{
 406        dlm_mle_cache = kmem_cache_create("o2dlm_mle",
 407                                          sizeof(struct dlm_master_list_entry),
 408                                          0, SLAB_HWCACHE_ALIGN,
 409                                          NULL);
 410        if (dlm_mle_cache == NULL)
 411                return -ENOMEM;
 412        return 0;
 413}
 414
 415void dlm_destroy_mle_cache(void)
 416{
 417        if (dlm_mle_cache)
 418                kmem_cache_destroy(dlm_mle_cache);
 419}
 420
 421static void dlm_mle_release(struct kref *kref)
 422{
 423        struct dlm_master_list_entry *mle;
 424        struct dlm_ctxt *dlm;
 425
 426        mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
 427        dlm = mle->dlm;
 428
 429        assert_spin_locked(&dlm->spinlock);
 430        assert_spin_locked(&dlm->master_lock);
 431
 432        mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
 433             mle->type);
 434
 435        /* remove from list if not already */
 436        __dlm_unlink_mle(dlm, mle);
 437
 438        /* detach the mle from the domain node up/down events */
 439        __dlm_mle_detach_hb_events(dlm, mle);
 440
 441        atomic_dec(&dlm->mle_cur_count[mle->type]);
 442
 443        /* NOTE: kfree under spinlock here.
 444         * if this is bad, we can move this to a freelist. */
 445        kmem_cache_free(dlm_mle_cache, mle);
 446}
 447
 448
 449/*
 450 * LOCK RESOURCE FUNCTIONS
 451 */
 452
 453int dlm_init_master_caches(void)
 454{
 455        dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
 456                                              sizeof(struct dlm_lock_resource),
 457                                              0, SLAB_HWCACHE_ALIGN, NULL);
 458        if (!dlm_lockres_cache)
 459                goto bail;
 460
 461        dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
 462                                               DLM_LOCKID_NAME_MAX, 0,
 463                                               SLAB_HWCACHE_ALIGN, NULL);
 464        if (!dlm_lockname_cache)
 465                goto bail;
 466
 467        return 0;
 468bail:
 469        dlm_destroy_master_caches();
 470        return -ENOMEM;
 471}
 472
 473void dlm_destroy_master_caches(void)
 474{
 475        if (dlm_lockname_cache)
 476                kmem_cache_destroy(dlm_lockname_cache);
 477
 478        if (dlm_lockres_cache)
 479                kmem_cache_destroy(dlm_lockres_cache);
 480}
 481
 482static void dlm_lockres_release(struct kref *kref)
 483{
 484        struct dlm_lock_resource *res;
 485        struct dlm_ctxt *dlm;
 486
 487        res = container_of(kref, struct dlm_lock_resource, refs);
 488        dlm = res->dlm;
 489
 490        /* This should not happen -- all lockres' have a name
 491         * associated with them at init time. */
 492        BUG_ON(!res->lockname.name);
 493
 494        mlog(0, "destroying lockres %.*s\n", res->lockname.len,
 495             res->lockname.name);
 496
 497        spin_lock(&dlm->track_lock);
 498        if (!list_empty(&res->tracking))
 499                list_del_init(&res->tracking);
 500        else {
 501                mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
 502                     res->lockname.len, res->lockname.name);
 503                dlm_print_one_lock_resource(res);
 504        }
 505        spin_unlock(&dlm->track_lock);
 506
 507        atomic_dec(&dlm->res_cur_count);
 508
 509        if (!hlist_unhashed(&res->hash_node) ||
 510            !list_empty(&res->granted) ||
 511            !list_empty(&res->converting) ||
 512            !list_empty(&res->blocked) ||
 513            !list_empty(&res->dirty) ||
 514            !list_empty(&res->recovering) ||
 515            !list_empty(&res->purge)) {
 516                mlog(ML_ERROR,
 517                     "Going to BUG for resource %.*s."
 518                     "  We're on a list! [%c%c%c%c%c%c%c]\n",
 519                     res->lockname.len, res->lockname.name,
 520                     !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
 521                     !list_empty(&res->granted) ? 'G' : ' ',
 522                     !list_empty(&res->converting) ? 'C' : ' ',
 523                     !list_empty(&res->blocked) ? 'B' : ' ',
 524                     !list_empty(&res->dirty) ? 'D' : ' ',
 525                     !list_empty(&res->recovering) ? 'R' : ' ',
 526                     !list_empty(&res->purge) ? 'P' : ' ');
 527
 528                dlm_print_one_lock_resource(res);
 529        }
 530
 531        /* By the time we're ready to blow this guy away, we shouldn't
 532         * be on any lists. */
 533        BUG_ON(!hlist_unhashed(&res->hash_node));
 534        BUG_ON(!list_empty(&res->granted));
 535        BUG_ON(!list_empty(&res->converting));
 536        BUG_ON(!list_empty(&res->blocked));
 537        BUG_ON(!list_empty(&res->dirty));
 538        BUG_ON(!list_empty(&res->recovering));
 539        BUG_ON(!list_empty(&res->purge));
 540
 541        kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
 542
 543        kmem_cache_free(dlm_lockres_cache, res);
 544}
 545
 546void dlm_lockres_put(struct dlm_lock_resource *res)
 547{
 548        kref_put(&res->refs, dlm_lockres_release);
 549}
 550
 551static void dlm_init_lockres(struct dlm_ctxt *dlm,
 552                             struct dlm_lock_resource *res,
 553                             const char *name, unsigned int namelen)
 554{
 555        char *qname;
 556
 557        /* If we memset here, we lose our reference to the kmalloc'd
 558         * res->lockname.name, so be sure to init every field
 559         * correctly! */
 560
 561        qname = (char *) res->lockname.name;
 562        memcpy(qname, name, namelen);
 563
 564        res->lockname.len = namelen;
 565        res->lockname.hash = dlm_lockid_hash(name, namelen);
 566
 567        init_waitqueue_head(&res->wq);
 568        spin_lock_init(&res->spinlock);
 569        INIT_HLIST_NODE(&res->hash_node);
 570        INIT_LIST_HEAD(&res->granted);
 571        INIT_LIST_HEAD(&res->converting);
 572        INIT_LIST_HEAD(&res->blocked);
 573        INIT_LIST_HEAD(&res->dirty);
 574        INIT_LIST_HEAD(&res->recovering);
 575        INIT_LIST_HEAD(&res->purge);
 576        INIT_LIST_HEAD(&res->tracking);
 577        atomic_set(&res->asts_reserved, 0);
 578        res->migration_pending = 0;
 579        res->inflight_locks = 0;
 580
 581        res->dlm = dlm;
 582
 583        kref_init(&res->refs);
 584
 585        atomic_inc(&dlm->res_tot_count);
 586        atomic_inc(&dlm->res_cur_count);
 587
 588        /* just for consistency */
 589        spin_lock(&res->spinlock);
 590        dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
 591        spin_unlock(&res->spinlock);
 592
 593        res->state = DLM_LOCK_RES_IN_PROGRESS;
 594
 595        res->last_used = 0;
 596
 597        spin_lock(&dlm->spinlock);
 598        list_add_tail(&res->tracking, &dlm->tracking_list);
 599        spin_unlock(&dlm->spinlock);
 600
 601        memset(res->lvb, 0, DLM_LVB_LEN);
 602        memset(res->refmap, 0, sizeof(res->refmap));
 603}
 604
 605struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
 606                                   const char *name,
 607                                   unsigned int namelen)
 608{
 609        struct dlm_lock_resource *res = NULL;
 610
 611        res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
 612        if (!res)
 613                goto error;
 614
 615        res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
 616        if (!res->lockname.name)
 617                goto error;
 618
 619        dlm_init_lockres(dlm, res, name, namelen);
 620        return res;
 621
 622error:
 623        if (res && res->lockname.name)
 624                kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
 625
 626        if (res)
 627                kmem_cache_free(dlm_lockres_cache, res);
 628        return NULL;
 629}
 630
 631void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
 632                                struct dlm_lock_resource *res, int bit)
 633{
 634        assert_spin_locked(&res->spinlock);
 635
 636        mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
 637             res->lockname.name, bit, __builtin_return_address(0));
 638
 639        set_bit(bit, res->refmap);
 640}
 641
 642void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
 643                                  struct dlm_lock_resource *res, int bit)
 644{
 645        assert_spin_locked(&res->spinlock);
 646
 647        mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
 648             res->lockname.name, bit, __builtin_return_address(0));
 649
 650        clear_bit(bit, res->refmap);
 651}
 652
 653
 654void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
 655                                   struct dlm_lock_resource *res)
 656{
 657        assert_spin_locked(&res->spinlock);
 658
 659        res->inflight_locks++;
 660
 661        mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
 662             res->lockname.len, res->lockname.name, res->inflight_locks,
 663             __builtin_return_address(0));
 664}
 665
 666void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
 667                                   struct dlm_lock_resource *res)
 668{
 669        assert_spin_locked(&res->spinlock);
 670
 671        BUG_ON(res->inflight_locks == 0);
 672
 673        res->inflight_locks--;
 674
 675        mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
 676             res->lockname.len, res->lockname.name, res->inflight_locks,
 677             __builtin_return_address(0));
 678
 679        wake_up(&res->wq);
 680}
 681
 682/*
 683 * lookup a lock resource by name.
 684 * may already exist in the hashtable.
 685 * lockid is null terminated
 686 *
 687 * if not, allocate enough for the lockres and for
 688 * the temporary structure used in doing the mastering.
 689 *
 690 * also, do a lookup in the dlm->master_list to see
 691 * if another node has begun mastering the same lock.
 692 * if so, there should be a block entry in there
 693 * for this name, and we should *not* attempt to master
 694 * the lock here.   need to wait around for that node
 695 * to assert_master (or die).
 696 *
 697 */
 698struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
 699                                          const char *lockid,
 700                                          int namelen,
 701                                          int flags)
 702{
 703        struct dlm_lock_resource *tmpres=NULL, *res=NULL;
 704        struct dlm_master_list_entry *mle = NULL;
 705        struct dlm_master_list_entry *alloc_mle = NULL;
 706        int blocked = 0;
 707        int ret, nodenum;
 708        struct dlm_node_iter iter;
 709        unsigned int hash;
 710        int tries = 0;
 711        int bit, wait_on_recovery = 0;
 712
 713        BUG_ON(!lockid);
 714
 715        hash = dlm_lockid_hash(lockid, namelen);
 716
 717        mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
 718
 719lookup:
 720        spin_lock(&dlm->spinlock);
 721        tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
 722        if (tmpres) {
 723                spin_unlock(&dlm->spinlock);
 724                spin_lock(&tmpres->spinlock);
 725                /* Wait on the thread that is mastering the resource */
 726                if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
 727                        __dlm_wait_on_lockres(tmpres);
 728                        BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
 729                        spin_unlock(&tmpres->spinlock);
 730                        dlm_lockres_put(tmpres);
 731                        tmpres = NULL;
 732                        goto lookup;
 733                }
 734
 735                /* Wait on the resource purge to complete before continuing */
 736                if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
 737                        BUG_ON(tmpres->owner == dlm->node_num);
 738                        __dlm_wait_on_lockres_flags(tmpres,
 739                                                    DLM_LOCK_RES_DROPPING_REF);
 740                        spin_unlock(&tmpres->spinlock);
 741                        dlm_lockres_put(tmpres);
 742                        tmpres = NULL;
 743                        goto lookup;
 744                }
 745
 746                /* Grab inflight ref to pin the resource */
 747                dlm_lockres_grab_inflight_ref(dlm, tmpres);
 748
 749                spin_unlock(&tmpres->spinlock);
 750                if (res)
 751                        dlm_lockres_put(res);
 752                res = tmpres;
 753                goto leave;
 754        }
 755
 756        if (!res) {
 757                spin_unlock(&dlm->spinlock);
 758                mlog(0, "allocating a new resource\n");
 759                /* nothing found and we need to allocate one. */
 760                alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
 761                if (!alloc_mle)
 762                        goto leave;
 763                res = dlm_new_lockres(dlm, lockid, namelen);
 764                if (!res)
 765                        goto leave;
 766                goto lookup;
 767        }
 768
 769        mlog(0, "no lockres found, allocated our own: %p\n", res);
 770
 771        if (flags & LKM_LOCAL) {
 772                /* caller knows it's safe to assume it's not mastered elsewhere
 773                 * DONE!  return right away */
 774                spin_lock(&res->spinlock);
 775                dlm_change_lockres_owner(dlm, res, dlm->node_num);
 776                __dlm_insert_lockres(dlm, res);
 777                dlm_lockres_grab_inflight_ref(dlm, res);
 778                spin_unlock(&res->spinlock);
 779                spin_unlock(&dlm->spinlock);
 780                /* lockres still marked IN_PROGRESS */
 781                goto wake_waiters;
 782        }
 783
 784        /* check master list to see if another node has started mastering it */
 785        spin_lock(&dlm->master_lock);
 786
 787        /* if we found a block, wait for lock to be mastered by another node */
 788        blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
 789        if (blocked) {
 790                int mig;
 791                if (mle->type == DLM_MLE_MASTER) {
 792                        mlog(ML_ERROR, "master entry for nonexistent lock!\n");
 793                        BUG();
 794                }
 795                mig = (mle->type == DLM_MLE_MIGRATION);
 796                /* if there is a migration in progress, let the migration
 797                 * finish before continuing.  we can wait for the absence
 798                 * of the MIGRATION mle: either the migrate finished or
 799                 * one of the nodes died and the mle was cleaned up.
 800                 * if there is a BLOCK here, but it already has a master
 801                 * set, we are too late.  the master does not have a ref
 802                 * for us in the refmap.  detach the mle and drop it.
 803                 * either way, go back to the top and start over. */
 804                if (mig || mle->master != O2NM_MAX_NODES) {
 805                        BUG_ON(mig && mle->master == dlm->node_num);
 806                        /* we arrived too late.  the master does not
 807                         * have a ref for us. retry. */
 808                        mlog(0, "%s:%.*s: late on %s\n",
 809                             dlm->name, namelen, lockid,
 810                             mig ?  "MIGRATION" : "BLOCK");
 811                        spin_unlock(&dlm->master_lock);
 812                        spin_unlock(&dlm->spinlock);
 813
 814                        /* master is known, detach */
 815                        if (!mig)
 816                                dlm_mle_detach_hb_events(dlm, mle);
 817                        dlm_put_mle(mle);
 818                        mle = NULL;
 819                        /* this is lame, but we can't wait on either
 820                         * the mle or lockres waitqueue here */
 821                        if (mig)
 822                                msleep(100);
 823                        goto lookup;
 824                }
 825        } else {
 826                /* go ahead and try to master lock on this node */
 827                mle = alloc_mle;
 828                /* make sure this does not get freed below */
 829                alloc_mle = NULL;
 830                dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
 831                set_bit(dlm->node_num, mle->maybe_map);
 832                __dlm_insert_mle(dlm, mle);
 833
 834                /* still holding the dlm spinlock, check the recovery map
 835                 * to see if there are any nodes that still need to be
 836                 * considered.  these will not appear in the mle nodemap
 837                 * but they might own this lockres.  wait on them. */
 838                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
 839                if (bit < O2NM_MAX_NODES) {
 840                        mlog(0, "%s: res %.*s, At least one node (%d) "
 841                             "to recover before lock mastery can begin\n",
 842                             dlm->name, namelen, (char *)lockid, bit);
 843                        wait_on_recovery = 1;
 844                }
 845        }
 846
 847        /* at this point there is either a DLM_MLE_BLOCK or a
 848         * DLM_MLE_MASTER on the master list, so it's safe to add the
 849         * lockres to the hashtable.  anyone who finds the lock will
 850         * still have to wait on the IN_PROGRESS. */
 851
 852        /* finally add the lockres to its hash bucket */
 853        __dlm_insert_lockres(dlm, res);
 854
 855        /* Grab inflight ref to pin the resource */
 856        spin_lock(&res->spinlock);
 857        dlm_lockres_grab_inflight_ref(dlm, res);
 858        spin_unlock(&res->spinlock);
 859
 860        /* get an extra ref on the mle in case this is a BLOCK
 861         * if so, the creator of the BLOCK may try to put the last
 862         * ref at this time in the assert master handler, so we
 863         * need an extra one to keep from a bad ptr deref. */
 864        dlm_get_mle_inuse(mle);
 865        spin_unlock(&dlm->master_lock);
 866        spin_unlock(&dlm->spinlock);
 867
 868redo_request:
 869        while (wait_on_recovery) {
 870                /* any cluster changes that occurred after dropping the
 871                 * dlm spinlock would be detectable be a change on the mle,
 872                 * so we only need to clear out the recovery map once. */
 873                if (dlm_is_recovery_lock(lockid, namelen)) {
 874                        mlog(0, "%s: Recovery map is not empty, but must "
 875                             "master $RECOVERY lock now\n", dlm->name);
 876                        if (!dlm_pre_master_reco_lockres(dlm, res))
 877                                wait_on_recovery = 0;
 878                        else {
 879                                mlog(0, "%s: waiting 500ms for heartbeat state "
 880                                    "change\n", dlm->name);
 881                                msleep(500);
 882                        }
 883                        continue;
 884                }
 885
 886                dlm_kick_recovery_thread(dlm);
 887                msleep(1000);
 888                dlm_wait_for_recovery(dlm);
 889
 890                spin_lock(&dlm->spinlock);
 891                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
 892                if (bit < O2NM_MAX_NODES) {
 893                        mlog(0, "%s: res %.*s, At least one node (%d) "
 894                             "to recover before lock mastery can begin\n",
 895                             dlm->name, namelen, (char *)lockid, bit);
 896                        wait_on_recovery = 1;
 897                } else
 898                        wait_on_recovery = 0;
 899                spin_unlock(&dlm->spinlock);
 900
 901                if (wait_on_recovery)
 902                        dlm_wait_for_node_recovery(dlm, bit, 10000);
 903        }
 904
 905        /* must wait for lock to be mastered elsewhere */
 906        if (blocked)
 907                goto wait;
 908
 909        ret = -EINVAL;
 910        dlm_node_iter_init(mle->vote_map, &iter);
 911        while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
 912                ret = dlm_do_master_request(res, mle, nodenum);
 913                if (ret < 0)
 914                        mlog_errno(ret);
 915                if (mle->master != O2NM_MAX_NODES) {
 916                        /* found a master ! */
 917                        if (mle->master <= nodenum)
 918                                break;
 919                        /* if our master request has not reached the master
 920                         * yet, keep going until it does.  this is how the
 921                         * master will know that asserts are needed back to
 922                         * the lower nodes. */
 923                        mlog(0, "%s: res %.*s, Requests only up to %u but "
 924                             "master is %u, keep going\n", dlm->name, namelen,
 925                             lockid, nodenum, mle->master);
 926                }
 927        }
 928
 929wait:
 930        /* keep going until the response map includes all nodes */
 931        ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
 932        if (ret < 0) {
 933                wait_on_recovery = 1;
 934                mlog(0, "%s: res %.*s, Node map changed, redo the master "
 935                     "request now, blocked=%d\n", dlm->name, res->lockname.len,
 936                     res->lockname.name, blocked);
 937                if (++tries > 20) {
 938                        mlog(ML_ERROR, "%s: res %.*s, Spinning on "
 939                             "dlm_wait_for_lock_mastery, blocked = %d\n",
 940                             dlm->name, res->lockname.len,
 941                             res->lockname.name, blocked);
 942                        dlm_print_one_lock_resource(res);
 943                        dlm_print_one_mle(mle);
 944                        tries = 0;
 945                }
 946                goto redo_request;
 947        }
 948
 949        mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
 950             res->lockname.name, res->owner);
 951        /* make sure we never continue without this */
 952        BUG_ON(res->owner == O2NM_MAX_NODES);
 953
 954        /* master is known, detach if not already detached */
 955        dlm_mle_detach_hb_events(dlm, mle);
 956        dlm_put_mle(mle);
 957        /* put the extra ref */
 958        dlm_put_mle_inuse(mle);
 959
 960wake_waiters:
 961        spin_lock(&res->spinlock);
 962        res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
 963        spin_unlock(&res->spinlock);
 964        wake_up(&res->wq);
 965
 966leave:
 967        /* need to free the unused mle */
 968        if (alloc_mle)
 969                kmem_cache_free(dlm_mle_cache, alloc_mle);
 970
 971        return res;
 972}
 973
 974
 975#define DLM_MASTERY_TIMEOUT_MS   5000
 976
 977static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
 978                                     struct dlm_lock_resource *res,
 979                                     struct dlm_master_list_entry *mle,
 980                                     int *blocked)
 981{
 982        u8 m;
 983        int ret, bit;
 984        int map_changed, voting_done;
 985        int assert, sleep;
 986
 987recheck:
 988        ret = 0;
 989        assert = 0;
 990
 991        /* check if another node has already become the owner */
 992        spin_lock(&res->spinlock);
 993        if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
 994                mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
 995                     res->lockname.len, res->lockname.name, res->owner);
 996                spin_unlock(&res->spinlock);
 997                /* this will cause the master to re-assert across
 998                 * the whole cluster, freeing up mles */
 999                if (res->owner != dlm->node_num) {
1000                        ret = dlm_do_master_request(res, mle, res->owner);
1001                        if (ret < 0) {
1002                                /* give recovery a chance to run */
1003                                mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1004                                msleep(500);
1005                                goto recheck;
1006                        }
1007                }
1008                ret = 0;
1009                goto leave;
1010        }
1011        spin_unlock(&res->spinlock);
1012
1013        spin_lock(&mle->spinlock);
1014        m = mle->master;
1015        map_changed = (memcmp(mle->vote_map, mle->node_map,
1016                              sizeof(mle->vote_map)) != 0);
1017        voting_done = (memcmp(mle->vote_map, mle->response_map,
1018                             sizeof(mle->vote_map)) == 0);
1019
1020        /* restart if we hit any errors */
1021        if (map_changed) {
1022                int b;
1023                mlog(0, "%s: %.*s: node map changed, restarting\n",
1024                     dlm->name, res->lockname.len, res->lockname.name);
1025                ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1026                b = (mle->type == DLM_MLE_BLOCK);
1027                if ((*blocked && !b) || (!*blocked && b)) {
1028                        mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1029                             dlm->name, res->lockname.len, res->lockname.name,
1030                             *blocked, b);
1031                        *blocked = b;
1032                }
1033                spin_unlock(&mle->spinlock);
1034                if (ret < 0) {
1035                        mlog_errno(ret);
1036                        goto leave;
1037                }
1038                mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1039                     "rechecking now\n", dlm->name, res->lockname.len,
1040                     res->lockname.name);
1041                goto recheck;
1042        } else {
1043                if (!voting_done) {
1044                        mlog(0, "map not changed and voting not done "
1045                             "for %s:%.*s\n", dlm->name, res->lockname.len,
1046                             res->lockname.name);
1047                }
1048        }
1049
1050        if (m != O2NM_MAX_NODES) {
1051                /* another node has done an assert!
1052                 * all done! */
1053                sleep = 0;
1054        } else {
1055                sleep = 1;
1056                /* have all nodes responded? */
1057                if (voting_done && !*blocked) {
1058                        bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1059                        if (dlm->node_num <= bit) {
1060                                /* my node number is lowest.
1061                                 * now tell other nodes that I am
1062                                 * mastering this. */
1063                                mle->master = dlm->node_num;
1064                                /* ref was grabbed in get_lock_resource
1065                                 * will be dropped in dlmlock_master */
1066                                assert = 1;
1067                                sleep = 0;
1068                        }
1069                        /* if voting is done, but we have not received
1070                         * an assert master yet, we must sleep */
1071                }
1072        }
1073
1074        spin_unlock(&mle->spinlock);
1075
1076        /* sleep if we haven't finished voting yet */
1077        if (sleep) {
1078                unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1079
1080                /*
1081                if (atomic_read(&mle->mle_refs.refcount) < 2)
1082                        mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1083                        atomic_read(&mle->mle_refs.refcount),
1084                        res->lockname.len, res->lockname.name);
1085                */
1086                atomic_set(&mle->woken, 0);
1087                (void)wait_event_timeout(mle->wq,
1088                                         (atomic_read(&mle->woken) == 1),
1089                                         timeo);
1090                if (res->owner == O2NM_MAX_NODES) {
1091                        mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1092                             res->lockname.len, res->lockname.name);
1093                        goto recheck;
1094                }
1095                mlog(0, "done waiting, master is %u\n", res->owner);
1096                ret = 0;
1097                goto leave;
1098        }
1099
1100        ret = 0;   /* done */
1101        if (assert) {
1102                m = dlm->node_num;
1103                mlog(0, "about to master %.*s here, this=%u\n",
1104                     res->lockname.len, res->lockname.name, m);
1105                ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1106                if (ret) {
1107                        /* This is a failure in the network path,
1108                         * not in the response to the assert_master
1109                         * (any nonzero response is a BUG on this node).
1110                         * Most likely a socket just got disconnected
1111                         * due to node death. */
1112                        mlog_errno(ret);
1113                }
1114                /* no longer need to restart lock mastery.
1115                 * all living nodes have been contacted. */
1116                ret = 0;
1117        }
1118
1119        /* set the lockres owner */
1120        spin_lock(&res->spinlock);
1121        /* mastery reference obtained either during
1122         * assert_master_handler or in get_lock_resource */
1123        dlm_change_lockres_owner(dlm, res, m);
1124        spin_unlock(&res->spinlock);
1125
1126leave:
1127        return ret;
1128}
1129
1130struct dlm_bitmap_diff_iter
1131{
1132        int curnode;
1133        unsigned long *orig_bm;
1134        unsigned long *cur_bm;
1135        unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1136};
1137
1138enum dlm_node_state_change
1139{
1140        NODE_DOWN = -1,
1141        NODE_NO_CHANGE = 0,
1142        NODE_UP
1143};
1144
1145static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1146                                      unsigned long *orig_bm,
1147                                      unsigned long *cur_bm)
1148{
1149        unsigned long p1, p2;
1150        int i;
1151
1152        iter->curnode = -1;
1153        iter->orig_bm = orig_bm;
1154        iter->cur_bm = cur_bm;
1155
1156        for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1157                p1 = *(iter->orig_bm + i);
1158                p2 = *(iter->cur_bm + i);
1159                iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1160        }
1161}
1162
1163static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1164                                     enum dlm_node_state_change *state)
1165{
1166        int bit;
1167
1168        if (iter->curnode >= O2NM_MAX_NODES)
1169                return -ENOENT;
1170
1171        bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1172                            iter->curnode+1);
1173        if (bit >= O2NM_MAX_NODES) {
1174                iter->curnode = O2NM_MAX_NODES;
1175                return -ENOENT;
1176        }
1177
1178        /* if it was there in the original then this node died */
1179        if (test_bit(bit, iter->orig_bm))
1180                *state = NODE_DOWN;
1181        else
1182                *state = NODE_UP;
1183
1184        iter->curnode = bit;
1185        return bit;
1186}
1187
1188
1189static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1190                                    struct dlm_lock_resource *res,
1191                                    struct dlm_master_list_entry *mle,
1192                                    int blocked)
1193{
1194        struct dlm_bitmap_diff_iter bdi;
1195        enum dlm_node_state_change sc;
1196        int node;
1197        int ret = 0;
1198
1199        mlog(0, "something happened such that the "
1200             "master process may need to be restarted!\n");
1201
1202        assert_spin_locked(&mle->spinlock);
1203
1204        dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1205        node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1206        while (node >= 0) {
1207                if (sc == NODE_UP) {
1208                        /* a node came up.  clear any old vote from
1209                         * the response map and set it in the vote map
1210                         * then restart the mastery. */
1211                        mlog(ML_NOTICE, "node %d up while restarting\n", node);
1212
1213                        /* redo the master request, but only for the new node */
1214                        mlog(0, "sending request to new node\n");
1215                        clear_bit(node, mle->response_map);
1216                        set_bit(node, mle->vote_map);
1217                } else {
1218                        mlog(ML_ERROR, "node down! %d\n", node);
1219                        if (blocked) {
1220                                int lowest = find_next_bit(mle->maybe_map,
1221                                                       O2NM_MAX_NODES, 0);
1222
1223                                /* act like it was never there */
1224                                clear_bit(node, mle->maybe_map);
1225
1226                                if (node == lowest) {
1227                                        mlog(0, "expected master %u died"
1228                                            " while this node was blocked "
1229                                            "waiting on it!\n", node);
1230                                        lowest = find_next_bit(mle->maybe_map,
1231                                                        O2NM_MAX_NODES,
1232                                                        lowest+1);
1233                                        if (lowest < O2NM_MAX_NODES) {
1234                                                mlog(0, "%s:%.*s:still "
1235                                                     "blocked. waiting on %u "
1236                                                     "now\n", dlm->name,
1237                                                     res->lockname.len,
1238                                                     res->lockname.name,
1239                                                     lowest);
1240                                        } else {
1241                                                /* mle is an MLE_BLOCK, but
1242                                                 * there is now nothing left to
1243                                                 * block on.  we need to return
1244                                                 * all the way back out and try
1245                                                 * again with an MLE_MASTER.
1246                                                 * dlm_do_local_recovery_cleanup
1247                                                 * has already run, so the mle
1248                                                 * refcount is ok */
1249                                                mlog(0, "%s:%.*s: no "
1250                                                     "longer blocking. try to "
1251                                                     "master this here\n",
1252                                                     dlm->name,
1253                                                     res->lockname.len,
1254                                                     res->lockname.name);
1255                                                mle->type = DLM_MLE_MASTER;
1256                                                mle->mleres = res;
1257                                        }
1258                                }
1259                        }
1260
1261                        /* now blank out everything, as if we had never
1262                         * contacted anyone */
1263                        memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1264                        memset(mle->response_map, 0, sizeof(mle->response_map));
1265                        /* reset the vote_map to the current node_map */
1266                        memcpy(mle->vote_map, mle->node_map,
1267                               sizeof(mle->node_map));
1268                        /* put myself into the maybe map */
1269                        if (mle->type != DLM_MLE_BLOCK)
1270                                set_bit(dlm->node_num, mle->maybe_map);
1271                }
1272                ret = -EAGAIN;
1273                node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1274        }
1275        return ret;
1276}
1277
1278
1279/*
1280 * DLM_MASTER_REQUEST_MSG
1281 *
1282 * returns: 0 on success,
1283 *          -errno on a network error
1284 *
1285 * on error, the caller should assume the target node is "dead"
1286 *
1287 */
1288
1289static int dlm_do_master_request(struct dlm_lock_resource *res,
1290                                 struct dlm_master_list_entry *mle, int to)
1291{
1292        struct dlm_ctxt *dlm = mle->dlm;
1293        struct dlm_master_request request;
1294        int ret, response=0, resend;
1295
1296        memset(&request, 0, sizeof(request));
1297        request.node_idx = dlm->node_num;
1298
1299        BUG_ON(mle->type == DLM_MLE_MIGRATION);
1300
1301        request.namelen = (u8)mle->mnamelen;
1302        memcpy(request.name, mle->mname, request.namelen);
1303
1304again:
1305        ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1306                                 sizeof(request), to, &response);
1307        if (ret < 0)  {
1308                if (ret == -ESRCH) {
1309                        /* should never happen */
1310                        mlog(ML_ERROR, "TCP stack not ready!\n");
1311                        BUG();
1312                } else if (ret == -EINVAL) {
1313                        mlog(ML_ERROR, "bad args passed to o2net!\n");
1314                        BUG();
1315                } else if (ret == -ENOMEM) {
1316                        mlog(ML_ERROR, "out of memory while trying to send "
1317                             "network message!  retrying\n");
1318                        /* this is totally crude */
1319                        msleep(50);
1320                        goto again;
1321                } else if (!dlm_is_host_down(ret)) {
1322                        /* not a network error. bad. */
1323                        mlog_errno(ret);
1324                        mlog(ML_ERROR, "unhandled error!");
1325                        BUG();
1326                }
1327                /* all other errors should be network errors,
1328                 * and likely indicate node death */
1329                mlog(ML_ERROR, "link to %d went down!\n", to);
1330                goto out;
1331        }
1332
1333        ret = 0;
1334        resend = 0;
1335        spin_lock(&mle->spinlock);
1336        switch (response) {
1337                case DLM_MASTER_RESP_YES:
1338                        set_bit(to, mle->response_map);
1339                        mlog(0, "node %u is the master, response=YES\n", to);
1340                        mlog(0, "%s:%.*s: master node %u now knows I have a "
1341                             "reference\n", dlm->name, res->lockname.len,
1342                             res->lockname.name, to);
1343                        mle->master = to;
1344                        break;
1345                case DLM_MASTER_RESP_NO:
1346                        mlog(0, "node %u not master, response=NO\n", to);
1347                        set_bit(to, mle->response_map);
1348                        break;
1349                case DLM_MASTER_RESP_MAYBE:
1350                        mlog(0, "node %u not master, response=MAYBE\n", to);
1351                        set_bit(to, mle->response_map);
1352                        set_bit(to, mle->maybe_map);
1353                        break;
1354                case DLM_MASTER_RESP_ERROR:
1355                        mlog(0, "node %u hit an error, resending\n", to);
1356                        resend = 1;
1357                        response = 0;
1358                        break;
1359                default:
1360                        mlog(ML_ERROR, "bad response! %u\n", response);
1361                        BUG();
1362        }
1363        spin_unlock(&mle->spinlock);
1364        if (resend) {
1365                /* this is also totally crude */
1366                msleep(50);
1367                goto again;
1368        }
1369
1370out:
1371        return ret;
1372}
1373
1374/*
1375 * locks that can be taken here:
1376 * dlm->spinlock
1377 * res->spinlock
1378 * mle->spinlock
1379 * dlm->master_list
1380 *
1381 * if possible, TRIM THIS DOWN!!!
1382 */
1383int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1384                               void **ret_data)
1385{
1386        u8 response = DLM_MASTER_RESP_MAYBE;
1387        struct dlm_ctxt *dlm = data;
1388        struct dlm_lock_resource *res = NULL;
1389        struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1390        struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1391        char *name;
1392        unsigned int namelen, hash;
1393        int found, ret;
1394        int set_maybe;
1395        int dispatch_assert = 0;
1396
1397        if (!dlm_grab(dlm))
1398                return DLM_MASTER_RESP_NO;
1399
1400        if (!dlm_domain_fully_joined(dlm)) {
1401                response = DLM_MASTER_RESP_NO;
1402                goto send_response;
1403        }
1404
1405        name = request->name;
1406        namelen = request->namelen;
1407        hash = dlm_lockid_hash(name, namelen);
1408
1409        if (namelen > DLM_LOCKID_NAME_MAX) {
1410                response = DLM_IVBUFLEN;
1411                goto send_response;
1412        }
1413
1414way_up_top:
1415        spin_lock(&dlm->spinlock);
1416        res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1417        if (res) {
1418                spin_unlock(&dlm->spinlock);
1419
1420                /* take care of the easy cases up front */
1421                spin_lock(&res->spinlock);
1422                if (res->state & (DLM_LOCK_RES_RECOVERING|
1423                                  DLM_LOCK_RES_MIGRATING)) {
1424                        spin_unlock(&res->spinlock);
1425                        mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1426                             "being recovered/migrated\n");
1427                        response = DLM_MASTER_RESP_ERROR;
1428                        if (mle)
1429                                kmem_cache_free(dlm_mle_cache, mle);
1430                        goto send_response;
1431                }
1432
1433                if (res->owner == dlm->node_num) {
1434                        dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1435                        spin_unlock(&res->spinlock);
1436                        response = DLM_MASTER_RESP_YES;
1437                        if (mle)
1438                                kmem_cache_free(dlm_mle_cache, mle);
1439
1440                        /* this node is the owner.
1441                         * there is some extra work that needs to
1442                         * happen now.  the requesting node has
1443                         * caused all nodes up to this one to
1444                         * create mles.  this node now needs to
1445                         * go back and clean those up. */
1446                        dispatch_assert = 1;
1447                        goto send_response;
1448                } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1449                        spin_unlock(&res->spinlock);
1450                        // mlog(0, "node %u is the master\n", res->owner);
1451                        response = DLM_MASTER_RESP_NO;
1452                        if (mle)
1453                                kmem_cache_free(dlm_mle_cache, mle);
1454                        goto send_response;
1455                }
1456
1457                /* ok, there is no owner.  either this node is
1458                 * being blocked, or it is actively trying to
1459                 * master this lock. */
1460                if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1461                        mlog(ML_ERROR, "lock with no owner should be "
1462                             "in-progress!\n");
1463                        BUG();
1464                }
1465
1466                // mlog(0, "lockres is in progress...\n");
1467                spin_lock(&dlm->master_lock);
1468                found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1469                if (!found) {
1470                        mlog(ML_ERROR, "no mle found for this lock!\n");
1471                        BUG();
1472                }
1473                set_maybe = 1;
1474                spin_lock(&tmpmle->spinlock);
1475                if (tmpmle->type == DLM_MLE_BLOCK) {
1476                        // mlog(0, "this node is waiting for "
1477                        // "lockres to be mastered\n");
1478                        response = DLM_MASTER_RESP_NO;
1479                } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1480                        mlog(0, "node %u is master, but trying to migrate to "
1481                             "node %u.\n", tmpmle->master, tmpmle->new_master);
1482                        if (tmpmle->master == dlm->node_num) {
1483                                mlog(ML_ERROR, "no owner on lockres, but this "
1484                                     "node is trying to migrate it to %u?!\n",
1485                                     tmpmle->new_master);
1486                                BUG();
1487                        } else {
1488                                /* the real master can respond on its own */
1489                                response = DLM_MASTER_RESP_NO;
1490                        }
1491                } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1492                        set_maybe = 0;
1493                        if (tmpmle->master == dlm->node_num) {
1494                                response = DLM_MASTER_RESP_YES;
1495                                /* this node will be the owner.
1496                                 * go back and clean the mles on any
1497                                 * other nodes */
1498                                dispatch_assert = 1;
1499                                dlm_lockres_set_refmap_bit(dlm, res,
1500                                                           request->node_idx);
1501                        } else
1502                                response = DLM_MASTER_RESP_NO;
1503                } else {
1504                        // mlog(0, "this node is attempting to "
1505                        // "master lockres\n");
1506                        response = DLM_MASTER_RESP_MAYBE;
1507                }
1508                if (set_maybe)
1509                        set_bit(request->node_idx, tmpmle->maybe_map);
1510                spin_unlock(&tmpmle->spinlock);
1511
1512                spin_unlock(&dlm->master_lock);
1513                spin_unlock(&res->spinlock);
1514
1515                /* keep the mle attached to heartbeat events */
1516                dlm_put_mle(tmpmle);
1517                if (mle)
1518                        kmem_cache_free(dlm_mle_cache, mle);
1519                goto send_response;
1520        }
1521
1522        /*
1523         * lockres doesn't exist on this node
1524         * if there is an MLE_BLOCK, return NO
1525         * if there is an MLE_MASTER, return MAYBE
1526         * otherwise, add an MLE_BLOCK, return NO
1527         */
1528        spin_lock(&dlm->master_lock);
1529        found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1530        if (!found) {
1531                /* this lockid has never been seen on this node yet */
1532                // mlog(0, "no mle found\n");
1533                if (!mle) {
1534                        spin_unlock(&dlm->master_lock);
1535                        spin_unlock(&dlm->spinlock);
1536
1537                        mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1538                        if (!mle) {
1539                                response = DLM_MASTER_RESP_ERROR;
1540                                mlog_errno(-ENOMEM);
1541                                goto send_response;
1542                        }
1543                        goto way_up_top;
1544                }
1545
1546                // mlog(0, "this is second time thru, already allocated, "
1547                // "add the block.\n");
1548                dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1549                set_bit(request->node_idx, mle->maybe_map);
1550                __dlm_insert_mle(dlm, mle);
1551                response = DLM_MASTER_RESP_NO;
1552        } else {
1553                // mlog(0, "mle was found\n");
1554                set_maybe = 1;
1555                spin_lock(&tmpmle->spinlock);
1556                if (tmpmle->master == dlm->node_num) {
1557                        mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1558                        BUG();
1559                }
1560                if (tmpmle->type == DLM_MLE_BLOCK)
1561                        response = DLM_MASTER_RESP_NO;
1562                else if (tmpmle->type == DLM_MLE_MIGRATION) {
1563                        mlog(0, "migration mle was found (%u->%u)\n",
1564                             tmpmle->master, tmpmle->new_master);
1565                        /* real master can respond on its own */
1566                        response = DLM_MASTER_RESP_NO;
1567                } else
1568                        response = DLM_MASTER_RESP_MAYBE;
1569                if (set_maybe)
1570                        set_bit(request->node_idx, tmpmle->maybe_map);
1571                spin_unlock(&tmpmle->spinlock);
1572        }
1573        spin_unlock(&dlm->master_lock);
1574        spin_unlock(&dlm->spinlock);
1575
1576        if (found) {
1577                /* keep the mle attached to heartbeat events */
1578                dlm_put_mle(tmpmle);
1579        }
1580send_response:
1581        /*
1582         * __dlm_lookup_lockres() grabbed a reference to this lockres.
1583         * The reference is released by dlm_assert_master_worker() under
1584         * the call to dlm_dispatch_assert_master().  If
1585         * dlm_assert_master_worker() isn't called, we drop it here.
1586         */
1587        if (dispatch_assert) {
1588                if (response != DLM_MASTER_RESP_YES)
1589                        mlog(ML_ERROR, "invalid response %d\n", response);
1590                if (!res) {
1591                        mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1592                        BUG();
1593                }
1594                mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1595                             dlm->node_num, res->lockname.len, res->lockname.name);
1596                ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1597                                                 DLM_ASSERT_MASTER_MLE_CLEANUP);
1598                if (ret < 0) {
1599                        mlog(ML_ERROR, "failed to dispatch assert master work\n");
1600                        response = DLM_MASTER_RESP_ERROR;
1601                        dlm_lockres_put(res);
1602                }
1603        } else {
1604                if (res)
1605                        dlm_lockres_put(res);
1606        }
1607
1608        dlm_put(dlm);
1609        return response;
1610}
1611
1612/*
1613 * DLM_ASSERT_MASTER_MSG
1614 */
1615
1616
1617/*
1618 * NOTE: this can be used for debugging
1619 * can periodically run all locks owned by this node
1620 * and re-assert across the cluster...
1621 */
1622static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1623                                struct dlm_lock_resource *res,
1624                                void *nodemap, u32 flags)
1625{
1626        struct dlm_assert_master assert;
1627        int to, tmpret;
1628        struct dlm_node_iter iter;
1629        int ret = 0;
1630        int reassert;
1631        const char *lockname = res->lockname.name;
1632        unsigned int namelen = res->lockname.len;
1633
1634        BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1635
1636        spin_lock(&res->spinlock);
1637        res->state |= DLM_LOCK_RES_SETREF_INPROG;
1638        spin_unlock(&res->spinlock);
1639
1640again:
1641        reassert = 0;
1642
1643        /* note that if this nodemap is empty, it returns 0 */
1644        dlm_node_iter_init(nodemap, &iter);
1645        while ((to = dlm_node_iter_next(&iter)) >= 0) {
1646                int r = 0;
1647                struct dlm_master_list_entry *mle = NULL;
1648
1649                mlog(0, "sending assert master to %d (%.*s)\n", to,
1650                     namelen, lockname);
1651                memset(&assert, 0, sizeof(assert));
1652                assert.node_idx = dlm->node_num;
1653                assert.namelen = namelen;
1654                memcpy(assert.name, lockname, namelen);
1655                assert.flags = cpu_to_be32(flags);
1656
1657                tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1658                                            &assert, sizeof(assert), to, &r);
1659                if (tmpret < 0) {
1660                        mlog(ML_ERROR, "Error %d when sending message %u (key "
1661                             "0x%x) to node %u\n", tmpret,
1662                             DLM_ASSERT_MASTER_MSG, dlm->key, to);
1663                        if (!dlm_is_host_down(tmpret)) {
1664                                mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1665                                BUG();
1666                        }
1667                        /* a node died.  finish out the rest of the nodes. */
1668                        mlog(0, "link to %d went down!\n", to);
1669                        /* any nonzero status return will do */
1670                        ret = tmpret;
1671                        r = 0;
1672                } else if (r < 0) {
1673                        /* ok, something horribly messed.  kill thyself. */
1674                        mlog(ML_ERROR,"during assert master of %.*s to %u, "
1675                             "got %d.\n", namelen, lockname, to, r);
1676                        spin_lock(&dlm->spinlock);
1677                        spin_lock(&dlm->master_lock);
1678                        if (dlm_find_mle(dlm, &mle, (char *)lockname,
1679                                         namelen)) {
1680                                dlm_print_one_mle(mle);
1681                                __dlm_put_mle(mle);
1682                        }
1683                        spin_unlock(&dlm->master_lock);
1684                        spin_unlock(&dlm->spinlock);
1685                        BUG();
1686                }
1687
1688                if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1689                    !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1690                                mlog(ML_ERROR, "%.*s: very strange, "
1691                                     "master MLE but no lockres on %u\n",
1692                                     namelen, lockname, to);
1693                }
1694
1695                if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1696                        mlog(0, "%.*s: node %u create mles on other "
1697                             "nodes and requests a re-assert\n",
1698                             namelen, lockname, to);
1699                        reassert = 1;
1700                }
1701                if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1702                        mlog(0, "%.*s: node %u has a reference to this "
1703                             "lockres, set the bit in the refmap\n",
1704                             namelen, lockname, to);
1705                        spin_lock(&res->spinlock);
1706                        dlm_lockres_set_refmap_bit(dlm, res, to);
1707                        spin_unlock(&res->spinlock);
1708                }
1709        }
1710
1711        if (reassert)
1712                goto again;
1713
1714        spin_lock(&res->spinlock);
1715        res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1716        spin_unlock(&res->spinlock);
1717        wake_up(&res->wq);
1718
1719        return ret;
1720}
1721
1722/*
1723 * locks that can be taken here:
1724 * dlm->spinlock
1725 * res->spinlock
1726 * mle->spinlock
1727 * dlm->master_list
1728 *
1729 * if possible, TRIM THIS DOWN!!!
1730 */
1731int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1732                              void **ret_data)
1733{
1734        struct dlm_ctxt *dlm = data;
1735        struct dlm_master_list_entry *mle = NULL;
1736        struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1737        struct dlm_lock_resource *res = NULL;
1738        char *name;
1739        unsigned int namelen, hash;
1740        u32 flags;
1741        int master_request = 0, have_lockres_ref = 0;
1742        int ret = 0;
1743
1744        if (!dlm_grab(dlm))
1745                return 0;
1746
1747        name = assert->name;
1748        namelen = assert->namelen;
1749        hash = dlm_lockid_hash(name, namelen);
1750        flags = be32_to_cpu(assert->flags);
1751
1752        if (namelen > DLM_LOCKID_NAME_MAX) {
1753                mlog(ML_ERROR, "Invalid name length!");
1754                goto done;
1755        }
1756
1757        spin_lock(&dlm->spinlock);
1758
1759        if (flags)
1760                mlog(0, "assert_master with flags: %u\n", flags);
1761
1762        /* find the MLE */
1763        spin_lock(&dlm->master_lock);
1764        if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1765                /* not an error, could be master just re-asserting */
1766                mlog(0, "just got an assert_master from %u, but no "
1767                     "MLE for it! (%.*s)\n", assert->node_idx,
1768                     namelen, name);
1769        } else {
1770                int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1771                if (bit >= O2NM_MAX_NODES) {
1772                        /* not necessarily an error, though less likely.
1773                         * could be master just re-asserting. */
1774                        mlog(0, "no bits set in the maybe_map, but %u "
1775                             "is asserting! (%.*s)\n", assert->node_idx,
1776                             namelen, name);
1777                } else if (bit != assert->node_idx) {
1778                        if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1779                                mlog(0, "master %u was found, %u should "
1780                                     "back off\n", assert->node_idx, bit);
1781                        } else {
1782                                /* with the fix for bug 569, a higher node
1783                                 * number winning the mastery will respond
1784                                 * YES to mastery requests, but this node
1785                                 * had no way of knowing.  let it pass. */
1786                                mlog(0, "%u is the lowest node, "
1787                                     "%u is asserting. (%.*s)  %u must "
1788                                     "have begun after %u won.\n", bit,
1789                                     assert->node_idx, namelen, name, bit,
1790                                     assert->node_idx);
1791                        }
1792                }
1793                if (mle->type == DLM_MLE_MIGRATION) {
1794                        if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1795                                mlog(0, "%s:%.*s: got cleanup assert"
1796                                     " from %u for migration\n",
1797                                     dlm->name, namelen, name,
1798                                     assert->node_idx);
1799                        } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1800                                mlog(0, "%s:%.*s: got unrelated assert"
1801                                     " from %u for migration, ignoring\n",
1802                                     dlm->name, namelen, name,
1803                                     assert->node_idx);
1804                                __dlm_put_mle(mle);
1805                                spin_unlock(&dlm->master_lock);
1806                                spin_unlock(&dlm->spinlock);
1807                                goto done;
1808                        }
1809                }
1810        }
1811        spin_unlock(&dlm->master_lock);
1812
1813        /* ok everything checks out with the MLE
1814         * now check to see if there is a lockres */
1815        res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1816        if (res) {
1817                spin_lock(&res->spinlock);
1818                if (res->state & DLM_LOCK_RES_RECOVERING)  {
1819                        mlog(ML_ERROR, "%u asserting but %.*s is "
1820                             "RECOVERING!\n", assert->node_idx, namelen, name);
1821                        goto kill;
1822                }
1823                if (!mle) {
1824                        if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1825                            res->owner != assert->node_idx) {
1826                                mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1827                                     "but current owner is %u! (%.*s)\n",
1828                                     assert->node_idx, res->owner, namelen,
1829                                     name);
1830                                __dlm_print_one_lock_resource(res);
1831                                BUG();
1832                        }
1833                } else if (mle->type != DLM_MLE_MIGRATION) {
1834                        if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1835                                /* owner is just re-asserting */
1836                                if (res->owner == assert->node_idx) {
1837                                        mlog(0, "owner %u re-asserting on "
1838                                             "lock %.*s\n", assert->node_idx,
1839                                             namelen, name);
1840                                        goto ok;
1841                                }
1842                                mlog(ML_ERROR, "got assert_master from "
1843                                     "node %u, but %u is the owner! "
1844                                     "(%.*s)\n", assert->node_idx,
1845                                     res->owner, namelen, name);
1846                                goto kill;
1847                        }
1848                        if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1849                                mlog(ML_ERROR, "got assert from %u, but lock "
1850                                     "with no owner should be "
1851                                     "in-progress! (%.*s)\n",
1852                                     assert->node_idx,
1853                                     namelen, name);
1854                                goto kill;
1855                        }
1856                } else /* mle->type == DLM_MLE_MIGRATION */ {
1857                        /* should only be getting an assert from new master */
1858                        if (assert->node_idx != mle->new_master) {
1859                                mlog(ML_ERROR, "got assert from %u, but "
1860                                     "new master is %u, and old master "
1861                                     "was %u (%.*s)\n",
1862                                     assert->node_idx, mle->new_master,
1863                                     mle->master, namelen, name);
1864                                goto kill;
1865                        }
1866
1867                }
1868ok:
1869                spin_unlock(&res->spinlock);
1870        }
1871
1872        // mlog(0, "woo!  got an assert_master from node %u!\n",
1873        //           assert->node_idx);
1874        if (mle) {
1875                int extra_ref = 0;
1876                int nn = -1;
1877                int rr, err = 0;
1878
1879                spin_lock(&mle->spinlock);
1880                if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1881                        extra_ref = 1;
1882                else {
1883                        /* MASTER mle: if any bits set in the response map
1884                         * then the calling node needs to re-assert to clear
1885                         * up nodes that this node contacted */
1886                        while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1887                                                    nn+1)) < O2NM_MAX_NODES) {
1888                                if (nn != dlm->node_num && nn != assert->node_idx) {
1889                                        master_request = 1;
1890                                        break;
1891                                }
1892                        }
1893                }
1894                mle->master = assert->node_idx;
1895                atomic_set(&mle->woken, 1);
1896                wake_up(&mle->wq);
1897                spin_unlock(&mle->spinlock);
1898
1899                if (res) {
1900                        int wake = 0;
1901                        spin_lock(&res->spinlock);
1902                        if (mle->type == DLM_MLE_MIGRATION) {
1903                                mlog(0, "finishing off migration of lockres %.*s, "
1904                                        "from %u to %u\n",
1905                                        res->lockname.len, res->lockname.name,
1906                                        dlm->node_num, mle->new_master);
1907                                res->state &= ~DLM_LOCK_RES_MIGRATING;
1908                                wake = 1;
1909                                dlm_change_lockres_owner(dlm, res, mle->new_master);
1910                                BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1911                        } else {
1912                                dlm_change_lockres_owner(dlm, res, mle->master);
1913                        }
1914                        spin_unlock(&res->spinlock);
1915                        have_lockres_ref = 1;
1916                        if (wake)
1917                                wake_up(&res->wq);
1918                }
1919
1920                /* master is known, detach if not already detached.
1921                 * ensures that only one assert_master call will happen
1922                 * on this mle. */
1923                spin_lock(&dlm->master_lock);
1924
1925                rr = atomic_read(&mle->mle_refs.refcount);
1926                if (mle->inuse > 0) {
1927                        if (extra_ref && rr < 3)
1928                                err = 1;
1929                        else if (!extra_ref && rr < 2)
1930                                err = 1;
1931                } else {
1932                        if (extra_ref && rr < 2)
1933                                err = 1;
1934                        else if (!extra_ref && rr < 1)
1935                                err = 1;
1936                }
1937                if (err) {
1938                        mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1939                             "that will mess up this node, refs=%d, extra=%d, "
1940                             "inuse=%d\n", dlm->name, namelen, name,
1941                             assert->node_idx, rr, extra_ref, mle->inuse);
1942                        dlm_print_one_mle(mle);
1943                }
1944                __dlm_unlink_mle(dlm, mle);
1945                __dlm_mle_detach_hb_events(dlm, mle);
1946                __dlm_put_mle(mle);
1947                if (extra_ref) {
1948                        /* the assert master message now balances the extra
1949                         * ref given by the master / migration request message.
1950                         * if this is the last put, it will be removed
1951                         * from the list. */
1952                        __dlm_put_mle(mle);
1953                }
1954                spin_unlock(&dlm->master_lock);
1955        } else if (res) {
1956                if (res->owner != assert->node_idx) {
1957                        mlog(0, "assert_master from %u, but current "
1958                             "owner is %u (%.*s), no mle\n", assert->node_idx,
1959                             res->owner, namelen, name);
1960                }
1961        }
1962        spin_unlock(&dlm->spinlock);
1963
1964done:
1965        ret = 0;
1966        if (res) {
1967                spin_lock(&res->spinlock);
1968                res->state |= DLM_LOCK_RES_SETREF_INPROG;
1969                spin_unlock(&res->spinlock);
1970                *ret_data = (void *)res;
1971        }
1972        dlm_put(dlm);
1973        if (master_request) {
1974                mlog(0, "need to tell master to reassert\n");
1975                /* positive. negative would shoot down the node. */
1976                ret |= DLM_ASSERT_RESPONSE_REASSERT;
1977                if (!have_lockres_ref) {
1978                        mlog(ML_ERROR, "strange, got assert from %u, MASTER "
1979                             "mle present here for %s:%.*s, but no lockres!\n",
1980                             assert->node_idx, dlm->name, namelen, name);
1981                }
1982        }
1983        if (have_lockres_ref) {
1984                /* let the master know we have a reference to the lockres */
1985                ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
1986                mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
1987                     dlm->name, namelen, name, assert->node_idx);
1988        }
1989        return ret;
1990
1991kill:
1992        /* kill the caller! */
1993        mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
1994             "and killing the other node now!  This node is OK and can continue.\n");
1995        __dlm_print_one_lock_resource(res);
1996        spin_unlock(&res->spinlock);
1997        spin_unlock(&dlm->spinlock);
1998        *ret_data = (void *)res;
1999        dlm_put(dlm);
2000        return -EINVAL;
2001}
2002
2003void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2004{
2005        struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2006
2007        if (ret_data) {
2008                spin_lock(&res->spinlock);
2009                res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2010                spin_unlock(&res->spinlock);
2011                wake_up(&res->wq);
2012                dlm_lockres_put(res);
2013        }
2014        return;
2015}
2016
2017int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2018                               struct dlm_lock_resource *res,
2019                               int ignore_higher, u8 request_from, u32 flags)
2020{
2021        struct dlm_work_item *item;
2022        item = kzalloc(sizeof(*item), GFP_ATOMIC);
2023        if (!item)
2024                return -ENOMEM;
2025
2026
2027        /* queue up work for dlm_assert_master_worker */
2028        dlm_grab(dlm);  /* get an extra ref for the work item */
2029        dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2030        item->u.am.lockres = res; /* already have a ref */
2031        /* can optionally ignore node numbers higher than this node */
2032        item->u.am.ignore_higher = ignore_higher;
2033        item->u.am.request_from = request_from;
2034        item->u.am.flags = flags;
2035
2036        if (ignore_higher)
2037                mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2038                     res->lockname.name);
2039
2040        spin_lock(&dlm->work_lock);
2041        list_add_tail(&item->list, &dlm->work_list);
2042        spin_unlock(&dlm->work_lock);
2043
2044        queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2045        return 0;
2046}
2047
2048static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2049{
2050        struct dlm_ctxt *dlm = data;
2051        int ret = 0;
2052        struct dlm_lock_resource *res;
2053        unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2054        int ignore_higher;
2055        int bit;
2056        u8 request_from;
2057        u32 flags;
2058
2059        dlm = item->dlm;
2060        res = item->u.am.lockres;
2061        ignore_higher = item->u.am.ignore_higher;
2062        request_from = item->u.am.request_from;
2063        flags = item->u.am.flags;
2064
2065        spin_lock(&dlm->spinlock);
2066        memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2067        spin_unlock(&dlm->spinlock);
2068
2069        clear_bit(dlm->node_num, nodemap);
2070        if (ignore_higher) {
2071                /* if is this just to clear up mles for nodes below
2072                 * this node, do not send the message to the original
2073                 * caller or any node number higher than this */
2074                clear_bit(request_from, nodemap);
2075                bit = dlm->node_num;
2076                while (1) {
2077                        bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2078                                            bit+1);
2079                        if (bit >= O2NM_MAX_NODES)
2080                                break;
2081                        clear_bit(bit, nodemap);
2082                }
2083        }
2084
2085        /*
2086         * If we're migrating this lock to someone else, we are no
2087         * longer allowed to assert out own mastery.  OTOH, we need to
2088         * prevent migration from starting while we're still asserting
2089         * our dominance.  The reserved ast delays migration.
2090         */
2091        spin_lock(&res->spinlock);
2092        if (res->state & DLM_LOCK_RES_MIGRATING) {
2093                mlog(0, "Someone asked us to assert mastery, but we're "
2094                     "in the middle of migration.  Skipping assert, "
2095                     "the new master will handle that.\n");
2096                spin_unlock(&res->spinlock);
2097                goto put;
2098        } else
2099                __dlm_lockres_reserve_ast(res);
2100        spin_unlock(&res->spinlock);
2101
2102        /* this call now finishes out the nodemap
2103         * even if one or more nodes die */
2104        mlog(0, "worker about to master %.*s here, this=%u\n",
2105                     res->lockname.len, res->lockname.name, dlm->node_num);
2106        ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2107        if (ret < 0) {
2108                /* no need to restart, we are done */
2109                if (!dlm_is_host_down(ret))
2110                        mlog_errno(ret);
2111        }
2112
2113        /* Ok, we've asserted ourselves.  Let's let migration start. */
2114        dlm_lockres_release_ast(dlm, res);
2115
2116put:
2117        dlm_lockres_put(res);
2118
2119        mlog(0, "finished with dlm_assert_master_worker\n");
2120}
2121
2122/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2123 * We cannot wait for node recovery to complete to begin mastering this
2124 * lockres because this lockres is used to kick off recovery! ;-)
2125 * So, do a pre-check on all living nodes to see if any of those nodes
2126 * think that $RECOVERY is currently mastered by a dead node.  If so,
2127 * we wait a short time to allow that node to get notified by its own
2128 * heartbeat stack, then check again.  All $RECOVERY lock resources
2129 * mastered by dead nodes are purged when the hearbeat callback is
2130 * fired, so we can know for sure that it is safe to continue once
2131 * the node returns a live node or no node.  */
2132static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2133                                       struct dlm_lock_resource *res)
2134{
2135        struct dlm_node_iter iter;
2136        int nodenum;
2137        int ret = 0;
2138        u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2139
2140        spin_lock(&dlm->spinlock);
2141        dlm_node_iter_init(dlm->domain_map, &iter);
2142        spin_unlock(&dlm->spinlock);
2143
2144        while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2145                /* do not send to self */
2146                if (nodenum == dlm->node_num)
2147                        continue;
2148                ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2149                if (ret < 0) {
2150                        mlog_errno(ret);
2151                        if (!dlm_is_host_down(ret))
2152                                BUG();
2153                        /* host is down, so answer for that node would be
2154                         * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2155                        ret = 0;
2156                }
2157
2158                if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2159                        /* check to see if this master is in the recovery map */
2160                        spin_lock(&dlm->spinlock);
2161                        if (test_bit(master, dlm->recovery_map)) {
2162                                mlog(ML_NOTICE, "%s: node %u has not seen "
2163                                     "node %u go down yet, and thinks the "
2164                                     "dead node is mastering the recovery "
2165                                     "lock.  must wait.\n", dlm->name,
2166                                     nodenum, master);
2167                                ret = -EAGAIN;
2168                        }
2169                        spin_unlock(&dlm->spinlock);
2170                        mlog(0, "%s: reco lock master is %u\n", dlm->name,
2171                             master);
2172                        break;
2173                }
2174        }
2175        return ret;
2176}
2177
2178/*
2179 * DLM_DEREF_LOCKRES_MSG
2180 */
2181
2182int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2183{
2184        struct dlm_deref_lockres deref;
2185        int ret = 0, r;
2186        const char *lockname;
2187        unsigned int namelen;
2188
2189        lockname = res->lockname.name;
2190        namelen = res->lockname.len;
2191        BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2192
2193        memset(&deref, 0, sizeof(deref));
2194        deref.node_idx = dlm->node_num;
2195        deref.namelen = namelen;
2196        memcpy(deref.name, lockname, namelen);
2197
2198        ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2199                                 &deref, sizeof(deref), res->owner, &r);
2200        if (ret < 0)
2201                mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2202                     dlm->name, namelen, lockname, ret, res->owner);
2203        else if (r < 0) {
2204                /* BAD.  other node says I did not have a ref. */
2205                mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2206                     dlm->name, namelen, lockname, res->owner, r);
2207                dlm_print_one_lock_resource(res);
2208                BUG();
2209        }
2210        return ret;
2211}
2212
2213int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2214                              void **ret_data)
2215{
2216        struct dlm_ctxt *dlm = data;
2217        struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2218        struct dlm_lock_resource *res = NULL;
2219        char *name;
2220        unsigned int namelen;
2221        int ret = -EINVAL;
2222        u8 node;
2223        unsigned int hash;
2224        struct dlm_work_item *item;
2225        int cleared = 0;
2226        int dispatch = 0;
2227
2228        if (!dlm_grab(dlm))
2229                return 0;
2230
2231        name = deref->name;
2232        namelen = deref->namelen;
2233        node = deref->node_idx;
2234
2235        if (namelen > DLM_LOCKID_NAME_MAX) {
2236                mlog(ML_ERROR, "Invalid name length!");
2237                goto done;
2238        }
2239        if (deref->node_idx >= O2NM_MAX_NODES) {
2240                mlog(ML_ERROR, "Invalid node number: %u\n", node);
2241                goto done;
2242        }
2243
2244        hash = dlm_lockid_hash(name, namelen);
2245
2246        spin_lock(&dlm->spinlock);
2247        res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2248        if (!res) {
2249                spin_unlock(&dlm->spinlock);
2250                mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2251                     dlm->name, namelen, name);
2252                goto done;
2253        }
2254        spin_unlock(&dlm->spinlock);
2255
2256        spin_lock(&res->spinlock);
2257        if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2258                dispatch = 1;
2259        else {
2260                BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2261                if (test_bit(node, res->refmap)) {
2262                        dlm_lockres_clear_refmap_bit(dlm, res, node);
2263                        cleared = 1;
2264                }
2265        }
2266        spin_unlock(&res->spinlock);
2267
2268        if (!dispatch) {
2269                if (cleared)
2270                        dlm_lockres_calc_usage(dlm, res);
2271                else {
2272                        mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2273                        "but it is already dropped!\n", dlm->name,
2274                        res->lockname.len, res->lockname.name, node);
2275                        dlm_print_one_lock_resource(res);
2276                }
2277                ret = 0;
2278                goto done;
2279        }
2280
2281        item = kzalloc(sizeof(*item), GFP_NOFS);
2282        if (!item) {
2283                ret = -ENOMEM;
2284                mlog_errno(ret);
2285                goto done;
2286        }
2287
2288        dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2289        item->u.dl.deref_res = res;
2290        item->u.dl.deref_node = node;
2291
2292        spin_lock(&dlm->work_lock);
2293        list_add_tail(&item->list, &dlm->work_list);
2294        spin_unlock(&dlm->work_lock);
2295
2296        queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2297        return 0;
2298
2299done:
2300        if (res)
2301                dlm_lockres_put(res);
2302        dlm_put(dlm);
2303
2304        return ret;
2305}
2306
2307static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2308{
2309        struct dlm_ctxt *dlm;
2310        struct dlm_lock_resource *res;
2311        u8 node;
2312        u8 cleared = 0;
2313
2314        dlm = item->dlm;
2315        res = item->u.dl.deref_res;
2316        node = item->u.dl.deref_node;
2317
2318        spin_lock(&res->spinlock);
2319        BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2320        if (test_bit(node, res->refmap)) {
2321                __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2322                dlm_lockres_clear_refmap_bit(dlm, res, node);
2323                cleared = 1;
2324        }
2325        spin_unlock(&res->spinlock);
2326
2327        if (cleared) {
2328                mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2329                     dlm->name, res->lockname.len, res->lockname.name, node);
2330                dlm_lockres_calc_usage(dlm, res);
2331        } else {
2332                mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2333                     "but it is already dropped!\n", dlm->name,
2334                     res->lockname.len, res->lockname.name, node);
2335                dlm_print_one_lock_resource(res);
2336        }
2337
2338        dlm_lockres_put(res);
2339}
2340
2341/*
2342 * A migrateable resource is one that is :
2343 * 1. locally mastered, and,
2344 * 2. zero local locks, and,
2345 * 3. one or more non-local locks, or, one or more references
2346 * Returns 1 if yes, 0 if not.
2347 */
2348static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2349                                      struct dlm_lock_resource *res)
2350{
2351        enum dlm_lockres_list idx;
2352        int nonlocal = 0, node_ref;
2353        struct list_head *queue;
2354        struct dlm_lock *lock;
2355        u64 cookie;
2356
2357        assert_spin_locked(&res->spinlock);
2358
2359        /* delay migration when the lockres is in MIGRATING state */
2360        if (res->state & DLM_LOCK_RES_MIGRATING)
2361                return 0;
2362
2363        if (res->owner != dlm->node_num)
2364                return 0;
2365
2366        for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2367                queue = dlm_list_idx_to_ptr(res, idx);
2368                list_for_each_entry(lock, queue, list) {
2369                        if (lock->ml.node != dlm->node_num) {
2370                                nonlocal++;
2371                                continue;
2372                        }
2373                        cookie = be64_to_cpu(lock->ml.cookie);
2374                        mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
2375                             "%s list\n", dlm->name, res->lockname.len,
2376                             res->lockname.name,
2377                             dlm_get_lock_cookie_node(cookie),
2378                             dlm_get_lock_cookie_seq(cookie),
2379                             dlm_list_in_text(idx));
2380                        return 0;
2381                }
2382        }
2383
2384        if (!nonlocal) {
2385                node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2386                if (node_ref >= O2NM_MAX_NODES)
2387                        return 0;
2388        }
2389
2390        mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
2391             res->lockname.name);
2392
2393        return 1;
2394}
2395
2396/*
2397 * DLM_MIGRATE_LOCKRES
2398 */
2399
2400
2401static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2402                               struct dlm_lock_resource *res, u8 target)
2403{
2404        struct dlm_master_list_entry *mle = NULL;
2405        struct dlm_master_list_entry *oldmle = NULL;
2406        struct dlm_migratable_lockres *mres = NULL;
2407        int ret = 0;
2408        const char *name;
2409        unsigned int namelen;
2410        int mle_added = 0;
2411        int wake = 0;
2412
2413        if (!dlm_grab(dlm))
2414                return -EINVAL;
2415
2416        BUG_ON(target == O2NM_MAX_NODES);
2417
2418        name = res->lockname.name;
2419        namelen = res->lockname.len;
2420
2421        mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
2422             target);
2423
2424        /* preallocate up front. if this fails, abort */
2425        ret = -ENOMEM;
2426        mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2427        if (!mres) {
2428                mlog_errno(ret);
2429                goto leave;
2430        }
2431
2432        mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2433        if (!mle) {
2434                mlog_errno(ret);
2435                goto leave;
2436        }
2437        ret = 0;
2438
2439        /*
2440         * clear any existing master requests and
2441         * add the migration mle to the list
2442         */
2443        spin_lock(&dlm->spinlock);
2444        spin_lock(&dlm->master_lock);
2445        ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2446                                    namelen, target, dlm->node_num);
2447        spin_unlock(&dlm->master_lock);
2448        spin_unlock(&dlm->spinlock);
2449
2450        if (ret == -EEXIST) {
2451                mlog(0, "another process is already migrating it\n");
2452                goto fail;
2453        }
2454        mle_added = 1;
2455
2456        /*
2457         * set the MIGRATING flag and flush asts
2458         * if we fail after this we need to re-dirty the lockres
2459         */
2460        if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2461                mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2462                     "the target went down.\n", res->lockname.len,
2463                     res->lockname.name, target);
2464                spin_lock(&res->spinlock);
2465                res->state &= ~DLM_LOCK_RES_MIGRATING;
2466                wake = 1;
2467                spin_unlock(&res->spinlock);
2468                ret = -EINVAL;
2469        }
2470
2471fail:
2472        if (oldmle) {
2473                /* master is known, detach if not already detached */
2474                dlm_mle_detach_hb_events(dlm, oldmle);
2475                dlm_put_mle(oldmle);
2476        }
2477
2478        if (ret < 0) {
2479                if (mle_added) {
2480                        dlm_mle_detach_hb_events(dlm, mle);
2481                        dlm_put_mle(mle);
2482                } else if (mle) {
2483                        kmem_cache_free(dlm_mle_cache, mle);
2484                        mle = NULL;
2485                }
2486                goto leave;
2487        }
2488
2489        /*
2490         * at this point, we have a migration target, an mle
2491         * in the master list, and the MIGRATING flag set on
2492         * the lockres
2493         */
2494
2495        /* now that remote nodes are spinning on the MIGRATING flag,
2496         * ensure that all assert_master work is flushed. */
2497        flush_workqueue(dlm->dlm_worker);
2498
2499        /* get an extra reference on the mle.
2500         * otherwise the assert_master from the new
2501         * master will destroy this.
2502         * also, make sure that all callers of dlm_get_mle
2503         * take both dlm->spinlock and dlm->master_lock */
2504        spin_lock(&dlm->spinlock);
2505        spin_lock(&dlm->master_lock);
2506        dlm_get_mle_inuse(mle);
2507        spin_unlock(&dlm->master_lock);
2508        spin_unlock(&dlm->spinlock);
2509
2510        /* notify new node and send all lock state */
2511        /* call send_one_lockres with migration flag.
2512         * this serves as notice to the target node that a
2513         * migration is starting. */
2514        ret = dlm_send_one_lockres(dlm, res, mres, target,
2515                                   DLM_MRES_MIGRATION);
2516
2517        if (ret < 0) {
2518                mlog(0, "migration to node %u failed with %d\n",
2519                     target, ret);
2520                /* migration failed, detach and clean up mle */
2521                dlm_mle_detach_hb_events(dlm, mle);
2522                dlm_put_mle(mle);
2523                dlm_put_mle_inuse(mle);
2524                spin_lock(&res->spinlock);
2525                res->state &= ~DLM_LOCK_RES_MIGRATING;
2526                wake = 1;
2527                spin_unlock(&res->spinlock);
2528                if (dlm_is_host_down(ret))
2529                        dlm_wait_for_node_death(dlm, target,
2530                                                DLM_NODE_DEATH_WAIT_MAX);
2531                goto leave;
2532        }
2533
2534        /* at this point, the target sends a message to all nodes,
2535         * (using dlm_do_migrate_request).  this node is skipped since
2536         * we had to put an mle in the list to begin the process.  this
2537         * node now waits for target to do an assert master.  this node
2538         * will be the last one notified, ensuring that the migration
2539         * is complete everywhere.  if the target dies while this is
2540         * going on, some nodes could potentially see the target as the
2541         * master, so it is important that my recovery finds the migration
2542         * mle and sets the master to UNKNOWN. */
2543
2544
2545        /* wait for new node to assert master */
2546        while (1) {
2547                ret = wait_event_interruptible_timeout(mle->wq,
2548                                        (atomic_read(&mle->woken) == 1),
2549                                        msecs_to_jiffies(5000));
2550
2551                if (ret >= 0) {
2552                        if (atomic_read(&mle->woken) == 1 ||
2553                            res->owner == target)
2554                                break;
2555
2556                        mlog(0, "%s:%.*s: timed out during migration\n",
2557                             dlm->name, res->lockname.len, res->lockname.name);
2558                        /* avoid hang during shutdown when migrating lockres
2559                         * to a node which also goes down */
2560                        if (dlm_is_node_dead(dlm, target)) {
2561                                mlog(0, "%s:%.*s: expected migration "
2562                                     "target %u is no longer up, restarting\n",
2563                                     dlm->name, res->lockname.len,
2564                                     res->lockname.name, target);
2565                                ret = -EINVAL;
2566                                /* migration failed, detach and clean up mle */
2567                                dlm_mle_detach_hb_events(dlm, mle);
2568                                dlm_put_mle(mle);
2569                                dlm_put_mle_inuse(mle);
2570                                spin_lock(&res->spinlock);
2571                                res->state &= ~DLM_LOCK_RES_MIGRATING;
2572                                wake = 1;
2573                                spin_unlock(&res->spinlock);
2574                                goto leave;
2575                        }
2576                } else
2577                        mlog(0, "%s:%.*s: caught signal during migration\n",
2578                             dlm->name, res->lockname.len, res->lockname.name);
2579        }
2580
2581        /* all done, set the owner, clear the flag */
2582        spin_lock(&res->spinlock);
2583        dlm_set_lockres_owner(dlm, res, target);
2584        res->state &= ~DLM_LOCK_RES_MIGRATING;
2585        dlm_remove_nonlocal_locks(dlm, res);
2586        spin_unlock(&res->spinlock);
2587        wake_up(&res->wq);
2588
2589        /* master is known, detach if not already detached */
2590        dlm_mle_detach_hb_events(dlm, mle);
2591        dlm_put_mle_inuse(mle);
2592        ret = 0;
2593
2594        dlm_lockres_calc_usage(dlm, res);
2595
2596leave:
2597        /* re-dirty the lockres if we failed */
2598        if (ret < 0)
2599                dlm_kick_thread(dlm, res);
2600
2601        /* wake up waiters if the MIGRATING flag got set
2602         * but migration failed */
2603        if (wake)
2604                wake_up(&res->wq);
2605
2606        if (mres)
2607                free_page((unsigned long)mres);
2608
2609        dlm_put(dlm);
2610
2611        mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
2612             name, target, ret);
2613        return ret;
2614}
2615
2616#define DLM_MIGRATION_RETRY_MS  100
2617
2618/*
2619 * Should be called only after beginning the domain leave process.
2620 * There should not be any remaining locks on nonlocal lock resources,
2621 * and there should be no local locks left on locally mastered resources.
2622 *
2623 * Called with the dlm spinlock held, may drop it to do migration, but
2624 * will re-acquire before exit.
2625 *
2626 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2627 */
2628int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2629{
2630        int ret;
2631        int lock_dropped = 0;
2632        u8 target = O2NM_MAX_NODES;
2633
2634        assert_spin_locked(&dlm->spinlock);
2635
2636        spin_lock(&res->spinlock);
2637        if (dlm_is_lockres_migrateable(dlm, res))
2638                target = dlm_pick_migration_target(dlm, res);
2639        spin_unlock(&res->spinlock);
2640
2641        if (target == O2NM_MAX_NODES)
2642                goto leave;
2643
2644        /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2645        spin_unlock(&dlm->spinlock);
2646        lock_dropped = 1;
2647        ret = dlm_migrate_lockres(dlm, res, target);
2648        if (ret)
2649                mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2650                     dlm->name, res->lockname.len, res->lockname.name,
2651                     target, ret);
2652        spin_lock(&dlm->spinlock);
2653leave:
2654        return lock_dropped;
2655}
2656
2657int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2658{
2659        int ret;
2660        spin_lock(&dlm->ast_lock);
2661        spin_lock(&lock->spinlock);
2662        ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2663        spin_unlock(&lock->spinlock);
2664        spin_unlock(&dlm->ast_lock);
2665        return ret;
2666}
2667
2668static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2669                                     struct dlm_lock_resource *res,
2670                                     u8 mig_target)
2671{
2672        int can_proceed;
2673        spin_lock(&res->spinlock);
2674        can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2675        spin_unlock(&res->spinlock);
2676
2677        /* target has died, so make the caller break out of the
2678         * wait_event, but caller must recheck the domain_map */
2679        spin_lock(&dlm->spinlock);
2680        if (!test_bit(mig_target, dlm->domain_map))
2681                can_proceed = 1;
2682        spin_unlock(&dlm->spinlock);
2683        return can_proceed;
2684}
2685
2686static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2687                                struct dlm_lock_resource *res)
2688{
2689        int ret;
2690        spin_lock(&res->spinlock);
2691        ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2692        spin_unlock(&res->spinlock);
2693        return ret;
2694}
2695
2696
2697static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2698                                       struct dlm_lock_resource *res,
2699                                       u8 target)
2700{
2701        int ret = 0;
2702
2703        mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2704               res->lockname.len, res->lockname.name, dlm->node_num,
2705               target);
2706        /* need to set MIGRATING flag on lockres.  this is done by
2707         * ensuring that all asts have been flushed for this lockres. */
2708        spin_lock(&res->spinlock);
2709        BUG_ON(res->migration_pending);
2710        res->migration_pending = 1;
2711        /* strategy is to reserve an extra ast then release
2712         * it below, letting the release do all of the work */
2713        __dlm_lockres_reserve_ast(res);
2714        spin_unlock(&res->spinlock);
2715
2716        /* now flush all the pending asts */
2717        dlm_kick_thread(dlm, res);
2718        /* before waiting on DIRTY, block processes which may
2719         * try to dirty the lockres before MIGRATING is set */
2720        spin_lock(&res->spinlock);
2721        BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2722        res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2723        spin_unlock(&res->spinlock);
2724        /* now wait on any pending asts and the DIRTY state */
2725        wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2726        dlm_lockres_release_ast(dlm, res);
2727
2728        mlog(0, "about to wait on migration_wq, dirty=%s\n",
2729               res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2730        /* if the extra ref we just put was the final one, this
2731         * will pass thru immediately.  otherwise, we need to wait
2732         * for the last ast to finish. */
2733again:
2734        ret = wait_event_interruptible_timeout(dlm->migration_wq,
2735                   dlm_migration_can_proceed(dlm, res, target),
2736                   msecs_to_jiffies(1000));
2737        if (ret < 0) {
2738                mlog(0, "woken again: migrating? %s, dead? %s\n",
2739                       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2740                       test_bit(target, dlm->domain_map) ? "no":"yes");
2741        } else {
2742                mlog(0, "all is well: migrating? %s, dead? %s\n",
2743                       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2744                       test_bit(target, dlm->domain_map) ? "no":"yes");
2745        }
2746        if (!dlm_migration_can_proceed(dlm, res, target)) {
2747                mlog(0, "trying again...\n");
2748                goto again;
2749        }
2750
2751        ret = 0;
2752        /* did the target go down or die? */
2753        spin_lock(&dlm->spinlock);
2754        if (!test_bit(target, dlm->domain_map)) {
2755                mlog(ML_ERROR, "aha. migration target %u just went down\n",
2756                     target);
2757                ret = -EHOSTDOWN;
2758        }
2759        spin_unlock(&dlm->spinlock);
2760
2761        /*
2762         * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2763         * another try; otherwise, we are sure the MIGRATING state is there,
2764         * drop the unneded state which blocked threads trying to DIRTY
2765         */
2766        spin_lock(&res->spinlock);
2767        BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2768        res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2769        if (!ret)
2770                BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2771        spin_unlock(&res->spinlock);
2772
2773        /*
2774         * at this point:
2775         *
2776         *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2777         *   o there are no pending asts on this lockres
2778         *   o all processes trying to reserve an ast on this
2779         *     lockres must wait for the MIGRATING flag to clear
2780         */
2781        return ret;
2782}
2783
2784/* last step in the migration process.
2785 * original master calls this to free all of the dlm_lock
2786 * structures that used to be for other nodes. */
2787static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2788                                      struct dlm_lock_resource *res)
2789{
2790        struct list_head *queue = &res->granted;
2791        int i, bit;
2792        struct dlm_lock *lock, *next;
2793
2794        assert_spin_locked(&res->spinlock);
2795
2796        BUG_ON(res->owner == dlm->node_num);
2797
2798        for (i=0; i<3; i++) {
2799                list_for_each_entry_safe(lock, next, queue, list) {
2800                        if (lock->ml.node != dlm->node_num) {
2801                                mlog(0, "putting lock for node %u\n",
2802                                     lock->ml.node);
2803                                /* be extra careful */
2804                                BUG_ON(!list_empty(&lock->ast_list));
2805                                BUG_ON(!list_empty(&lock->bast_list));
2806                                BUG_ON(lock->ast_pending);
2807                                BUG_ON(lock->bast_pending);
2808                                dlm_lockres_clear_refmap_bit(dlm, res,
2809                                                             lock->ml.node);
2810                                list_del_init(&lock->list);
2811                                dlm_lock_put(lock);
2812                                /* In a normal unlock, we would have added a
2813                                 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2814                                dlm_lock_put(lock);
2815                        }
2816                }
2817                queue++;
2818        }
2819        bit = 0;
2820        while (1) {
2821                bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2822                if (bit >= O2NM_MAX_NODES)
2823                        break;
2824                /* do not clear the local node reference, if there is a
2825                 * process holding this, let it drop the ref itself */
2826                if (bit != dlm->node_num) {
2827                        mlog(0, "%s:%.*s: node %u had a ref to this "
2828                             "migrating lockres, clearing\n", dlm->name,
2829                             res->lockname.len, res->lockname.name, bit);
2830                        dlm_lockres_clear_refmap_bit(dlm, res, bit);
2831                }
2832                bit++;
2833        }
2834}
2835
2836/*
2837 * Pick a node to migrate the lock resource to. This function selects a
2838 * potential target based first on the locks and then on refmap. It skips
2839 * nodes that are in the process of exiting the domain.
2840 */
2841static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2842                                    struct dlm_lock_resource *res)
2843{
2844        enum dlm_lockres_list idx;
2845        struct list_head *queue = &res->granted;
2846        struct dlm_lock *lock;
2847        int noderef;
2848        u8 nodenum = O2NM_MAX_NODES;
2849
2850        assert_spin_locked(&dlm->spinlock);
2851        assert_spin_locked(&res->spinlock);
2852
2853        /* Go through all the locks */
2854        for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2855                queue = dlm_list_idx_to_ptr(res, idx);
2856                list_for_each_entry(lock, queue, list) {
2857                        if (lock->ml.node == dlm->node_num)
2858                                continue;
2859                        if (test_bit(lock->ml.node, dlm->exit_domain_map))
2860                                continue;
2861                        nodenum = lock->ml.node;
2862                        goto bail;
2863                }
2864        }
2865
2866        /* Go thru the refmap */
2867        noderef = -1;
2868        while (1) {
2869                noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
2870                                        noderef + 1);
2871                if (noderef >= O2NM_MAX_NODES)
2872                        break;
2873                if (noderef == dlm->node_num)
2874                        continue;
2875                if (test_bit(noderef, dlm->exit_domain_map))
2876                        continue;
2877                nodenum = noderef;
2878                goto bail;
2879        }
2880
2881bail:
2882        return nodenum;
2883}
2884
2885/* this is called by the new master once all lockres
2886 * data has been received */
2887static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2888                                  struct dlm_lock_resource *res,
2889                                  u8 master, u8 new_master,
2890                                  struct dlm_node_iter *iter)
2891{
2892        struct dlm_migrate_request migrate;
2893        int ret, skip, status = 0;
2894        int nodenum;
2895
2896        memset(&migrate, 0, sizeof(migrate));
2897        migrate.namelen = res->lockname.len;
2898        memcpy(migrate.name, res->lockname.name, migrate.namelen);
2899        migrate.new_master = new_master;
2900        migrate.master = master;
2901
2902        ret = 0;
2903
2904        /* send message to all nodes, except the master and myself */
2905        while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2906                if (nodenum == master ||
2907                    nodenum == new_master)
2908                        continue;
2909
2910                /* We could race exit domain. If exited, skip. */
2911                spin_lock(&dlm->spinlock);
2912                skip = (!test_bit(nodenum, dlm->domain_map));
2913                spin_unlock(&dlm->spinlock);
2914                if (skip) {
2915                        clear_bit(nodenum, iter->node_map);
2916                        continue;
2917                }
2918
2919                ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2920                                         &migrate, sizeof(migrate), nodenum,
2921                                         &status);
2922                if (ret < 0) {
2923                        mlog(ML_ERROR, "%s: res %.*s, Error %d send "
2924                             "MIGRATE_REQUEST to node %u\n", dlm->name,
2925                             migrate.namelen, migrate.name, ret, nodenum);
2926                        if (!dlm_is_host_down(ret)) {
2927                                mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2928                                BUG();
2929                        }
2930                        clear_bit(nodenum, iter->node_map);
2931                        ret = 0;
2932                } else if (status < 0) {
2933                        mlog(0, "migrate request (node %u) returned %d!\n",
2934                             nodenum, status);
2935                        ret = status;
2936                } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
2937                        /* during the migration request we short-circuited
2938                         * the mastery of the lockres.  make sure we have
2939                         * a mastery ref for nodenum */
2940                        mlog(0, "%s:%.*s: need ref for node %u\n",
2941                             dlm->name, res->lockname.len, res->lockname.name,
2942                             nodenum);
2943                        spin_lock(&res->spinlock);
2944                        dlm_lockres_set_refmap_bit(dlm, res, nodenum);
2945                        spin_unlock(&res->spinlock);
2946                }
2947        }
2948
2949        if (ret < 0)
2950                mlog_errno(ret);
2951
2952        mlog(0, "returning ret=%d\n", ret);
2953        return ret;
2954}
2955
2956
2957/* if there is an existing mle for this lockres, we now know who the master is.
2958 * (the one who sent us *this* message) we can clear it up right away.
2959 * since the process that put the mle on the list still has a reference to it,
2960 * we can unhash it now, set the master and wake the process.  as a result,
2961 * we will have no mle in the list to start with.  now we can add an mle for
2962 * the migration and this should be the only one found for those scanning the
2963 * list.  */
2964int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
2965                                void **ret_data)
2966{
2967        struct dlm_ctxt *dlm = data;
2968        struct dlm_lock_resource *res = NULL;
2969        struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
2970        struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
2971        const char *name;
2972        unsigned int namelen, hash;
2973        int ret = 0;
2974
2975        if (!dlm_grab(dlm))
2976                return -EINVAL;
2977
2978        name = migrate->name;
2979        namelen = migrate->namelen;
2980        hash = dlm_lockid_hash(name, namelen);
2981
2982        /* preallocate.. if this fails, abort */
2983        mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2984
2985        if (!mle) {
2986                ret = -ENOMEM;
2987                goto leave;
2988        }
2989
2990        /* check for pre-existing lock */
2991        spin_lock(&dlm->spinlock);
2992        res = __dlm_lookup_lockres(dlm, name, namelen, hash);
2993        if (res) {
2994                spin_lock(&res->spinlock);
2995                if (res->state & DLM_LOCK_RES_RECOVERING) {
2996                        /* if all is working ok, this can only mean that we got
2997                        * a migrate request from a node that we now see as
2998                        * dead.  what can we do here?  drop it to the floor? */
2999                        spin_unlock(&res->spinlock);
3000                        mlog(ML_ERROR, "Got a migrate request, but the "
3001                             "lockres is marked as recovering!");
3002                        kmem_cache_free(dlm_mle_cache, mle);
3003                        ret = -EINVAL; /* need a better solution */
3004                        goto unlock;
3005                }
3006                res->state |= DLM_LOCK_RES_MIGRATING;
3007                spin_unlock(&res->spinlock);
3008        }
3009
3010        spin_lock(&dlm->master_lock);
3011        /* ignore status.  only nonzero status would BUG. */
3012        ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3013                                    name, namelen,
3014                                    migrate->new_master,
3015                                    migrate->master);
3016
3017        spin_unlock(&dlm->master_lock);
3018unlock:
3019        spin_unlock(&dlm->spinlock);
3020
3021        if (oldmle) {
3022                /* master is known, detach if not already detached */
3023                dlm_mle_detach_hb_events(dlm, oldmle);
3024                dlm_put_mle(oldmle);
3025        }
3026
3027        if (res)
3028                dlm_lockres_put(res);
3029leave:
3030        dlm_put(dlm);
3031        return ret;
3032}
3033
3034/* must be holding dlm->spinlock and dlm->master_lock
3035 * when adding a migration mle, we can clear any other mles
3036 * in the master list because we know with certainty that
3037 * the master is "master".  so we remove any old mle from
3038 * the list after setting it's master field, and then add
3039 * the new migration mle.  this way we can hold with the rule
3040 * of having only one mle for a given lock name at all times. */
3041static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3042                                 struct dlm_lock_resource *res,
3043                                 struct dlm_master_list_entry *mle,
3044                                 struct dlm_master_list_entry **oldmle,
3045                                 const char *name, unsigned int namelen,
3046                                 u8 new_master, u8 master)
3047{
3048        int found;
3049        int ret = 0;
3050
3051        *oldmle = NULL;
3052
3053        assert_spin_locked(&dlm->spinlock);
3054        assert_spin_locked(&dlm->master_lock);
3055
3056        /* caller is responsible for any ref taken here on oldmle */
3057        found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3058        if (found) {
3059                struct dlm_master_list_entry *tmp = *oldmle;
3060                spin_lock(&tmp->spinlock);
3061                if (tmp->type == DLM_MLE_MIGRATION) {
3062                        if (master == dlm->node_num) {
3063                                /* ah another process raced me to it */
3064                                mlog(0, "tried to migrate %.*s, but some "
3065                                     "process beat me to it\n",
3066                                     namelen, name);
3067                                ret = -EEXIST;
3068                        } else {
3069                                /* bad.  2 NODES are trying to migrate! */
3070                                mlog(ML_ERROR, "migration error  mle: "
3071                                     "master=%u new_master=%u // request: "
3072                                     "master=%u new_master=%u // "
3073                                     "lockres=%.*s\n",
3074                                     tmp->master, tmp->new_master,
3075                                     master, new_master,
3076                                     namelen, name);
3077                                BUG();
3078                        }
3079                } else {
3080                        /* this is essentially what assert_master does */
3081                        tmp->master = master;
3082                        atomic_set(&tmp->woken, 1);
3083                        wake_up(&tmp->wq);
3084                        /* remove it so that only one mle will be found */
3085                        __dlm_unlink_mle(dlm, tmp);
3086                        __dlm_mle_detach_hb_events(dlm, tmp);
3087                        ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3088                        mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3089                            "telling master to get ref for cleared out mle "
3090                            "during migration\n", dlm->name, namelen, name,
3091                            master, new_master);
3092                }
3093                spin_unlock(&tmp->spinlock);
3094        }
3095
3096        /* now add a migration mle to the tail of the list */
3097        dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3098        mle->new_master = new_master;
3099        /* the new master will be sending an assert master for this.
3100         * at that point we will get the refmap reference */
3101        mle->master = master;
3102        /* do this for consistency with other mle types */
3103        set_bit(new_master, mle->maybe_map);
3104        __dlm_insert_mle(dlm, mle);
3105
3106        return ret;
3107}
3108
3109/*
3110 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3111 */
3112static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3113                                        struct dlm_master_list_entry *mle)
3114{
3115        struct dlm_lock_resource *res;
3116
3117        /* Find the lockres associated to the mle and set its owner to UNK */
3118        res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3119                                   mle->mnamehash);
3120        if (res) {
3121                spin_unlock(&dlm->master_lock);
3122
3123                /* move lockres onto recovery list */
3124                spin_lock(&res->spinlock);
3125                dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3126                dlm_move_lockres_to_recovery_list(dlm, res);
3127                spin_unlock(&res->spinlock);
3128                dlm_lockres_put(res);
3129
3130                /* about to get rid of mle, detach from heartbeat */
3131                __dlm_mle_detach_hb_events(dlm, mle);
3132
3133                /* dump the mle */
3134                spin_lock(&dlm->master_lock);
3135                __dlm_put_mle(mle);
3136                spin_unlock(&dlm->master_lock);
3137        }
3138
3139        return res;
3140}
3141
3142static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3143                                    struct dlm_master_list_entry *mle)
3144{
3145        __dlm_mle_detach_hb_events(dlm, mle);
3146
3147        spin_lock(&mle->spinlock);
3148        __dlm_unlink_mle(dlm, mle);
3149        atomic_set(&mle->woken, 1);
3150        spin_unlock(&mle->spinlock);
3151
3152        wake_up(&mle->wq);
3153}
3154
3155static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3156                                struct dlm_master_list_entry *mle, u8 dead_node)
3157{
3158        int bit;
3159
3160        BUG_ON(mle->type != DLM_MLE_BLOCK);
3161
3162        spin_lock(&mle->spinlock);
3163        bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3164        if (bit != dead_node) {
3165                mlog(0, "mle found, but dead node %u would not have been "
3166                     "master\n", dead_node);
3167                spin_unlock(&mle->spinlock);
3168        } else {
3169                /* Must drop the refcount by one since the assert_master will
3170                 * never arrive. This may result in the mle being unlinked and
3171                 * freed, but there may still be a process waiting in the
3172                 * dlmlock path which is fine. */
3173                mlog(0, "node %u was expected master\n", dead_node);
3174                atomic_set(&mle->woken, 1);
3175                spin_unlock(&mle->spinlock);
3176                wake_up(&mle->wq);
3177
3178                /* Do not need events any longer, so detach from heartbeat */
3179                __dlm_mle_detach_hb_events(dlm, mle);
3180                __dlm_put_mle(mle);
3181        }
3182}
3183
3184void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3185{
3186        struct dlm_master_list_entry *mle;
3187        struct dlm_lock_resource *res;
3188        struct hlist_head *bucket;
3189        struct hlist_node *tmp;
3190        unsigned int i;
3191
3192        mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3193top:
3194        assert_spin_locked(&dlm->spinlock);
3195
3196        /* clean the master list */
3197        spin_lock(&dlm->master_lock);
3198        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3199                bucket = dlm_master_hash(dlm, i);
3200                hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3201                        BUG_ON(mle->type != DLM_MLE_BLOCK &&
3202                               mle->type != DLM_MLE_MASTER &&
3203                               mle->type != DLM_MLE_MIGRATION);
3204
3205                        /* MASTER mles are initiated locally. The waiting
3206                         * process will notice the node map change shortly.
3207                         * Let that happen as normal. */
3208                        if (mle->type == DLM_MLE_MASTER)
3209                                continue;
3210
3211                        /* BLOCK mles are initiated by other nodes. Need to
3212                         * clean up if the dead node would have been the
3213                         * master. */
3214                        if (mle->type == DLM_MLE_BLOCK) {
3215                                dlm_clean_block_mle(dlm, mle, dead_node);
3216                                continue;
3217                        }
3218
3219                        /* Everything else is a MIGRATION mle */
3220
3221                        /* The rule for MIGRATION mles is that the master
3222                         * becomes UNKNOWN if *either* the original or the new
3223                         * master dies. All UNKNOWN lockres' are sent to
3224                         * whichever node becomes the recovery master. The new
3225                         * master is responsible for determining if there is
3226                         * still a master for this lockres, or if he needs to
3227                         * take over mastery. Either way, this node should
3228                         * expect another message to resolve this. */
3229
3230                        if (mle->master != dead_node &&
3231                            mle->new_master != dead_node)
3232                                continue;
3233
3234                        /* If we have reached this point, this mle needs to be
3235                         * removed from the list and freed. */
3236                        dlm_clean_migration_mle(dlm, mle);
3237
3238                        mlog(0, "%s: node %u died during migration from "
3239                             "%u to %u!\n", dlm->name, dead_node, mle->master,
3240                             mle->new_master);
3241
3242                        /* If we find a lockres associated with the mle, we've
3243                         * hit this rare case that messes up our lock ordering.
3244                         * If so, we need to drop the master lock so that we can
3245                         * take the lockres lock, meaning that we will have to
3246                         * restart from the head of list. */
3247                        res = dlm_reset_mleres_owner(dlm, mle);
3248                        if (res)
3249                                /* restart */
3250                                goto top;
3251
3252                        /* This may be the last reference */
3253                        __dlm_put_mle(mle);
3254                }
3255        }
3256        spin_unlock(&dlm->master_lock);
3257}
3258
3259int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3260                         u8 old_master)
3261{
3262        struct dlm_node_iter iter;
3263        int ret = 0;
3264
3265        spin_lock(&dlm->spinlock);
3266        dlm_node_iter_init(dlm->domain_map, &iter);
3267        clear_bit(old_master, iter.node_map);
3268        clear_bit(dlm->node_num, iter.node_map);
3269        spin_unlock(&dlm->spinlock);
3270
3271        /* ownership of the lockres is changing.  account for the
3272         * mastery reference here since old_master will briefly have
3273         * a reference after the migration completes */
3274        spin_lock(&res->spinlock);
3275        dlm_lockres_set_refmap_bit(dlm, res, old_master);
3276        spin_unlock(&res->spinlock);
3277
3278        mlog(0, "now time to do a migrate request to other nodes\n");
3279        ret = dlm_do_migrate_request(dlm, res, old_master,
3280                                     dlm->node_num, &iter);
3281        if (ret < 0) {
3282                mlog_errno(ret);
3283                goto leave;
3284        }
3285
3286        mlog(0, "doing assert master of %.*s to all except the original node\n",
3287             res->lockname.len, res->lockname.name);
3288        /* this call now finishes out the nodemap
3289         * even if one or more nodes die */
3290        ret = dlm_do_assert_master(dlm, res, iter.node_map,
3291                                   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3292        if (ret < 0) {
3293                /* no longer need to retry.  all living nodes contacted. */
3294                mlog_errno(ret);
3295                ret = 0;
3296        }
3297
3298        memset(iter.node_map, 0, sizeof(iter.node_map));
3299        set_bit(old_master, iter.node_map);
3300        mlog(0, "doing assert master of %.*s back to %u\n",
3301             res->lockname.len, res->lockname.name, old_master);
3302        ret = dlm_do_assert_master(dlm, res, iter.node_map,
3303                                   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3304        if (ret < 0) {
3305                mlog(0, "assert master to original master failed "
3306                     "with %d.\n", ret);
3307                /* the only nonzero status here would be because of
3308                 * a dead original node.  we're done. */
3309                ret = 0;
3310        }
3311
3312        /* all done, set the owner, clear the flag */
3313        spin_lock(&res->spinlock);
3314        dlm_set_lockres_owner(dlm, res, dlm->node_num);
3315        res->state &= ~DLM_LOCK_RES_MIGRATING;
3316        spin_unlock(&res->spinlock);
3317        /* re-dirty it on the new master */
3318        dlm_kick_thread(dlm, res);
3319        wake_up(&res->wq);
3320leave:
3321        return ret;
3322}
3323
3324/*
3325 * LOCKRES AST REFCOUNT
3326 * this is integral to migration
3327 */
3328
3329/* for future intent to call an ast, reserve one ahead of time.
3330 * this should be called only after waiting on the lockres
3331 * with dlm_wait_on_lockres, and while still holding the
3332 * spinlock after the call. */
3333void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3334{
3335        assert_spin_locked(&res->spinlock);
3336        if (res->state & DLM_LOCK_RES_MIGRATING) {
3337                __dlm_print_one_lock_resource(res);
3338        }
3339        BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3340
3341        atomic_inc(&res->asts_reserved);
3342}
3343
3344/*
3345 * used to drop the reserved ast, either because it went unused,
3346 * or because the ast/bast was actually called.
3347 *
3348 * also, if there is a pending migration on this lockres,
3349 * and this was the last pending ast on the lockres,
3350 * atomically set the MIGRATING flag before we drop the lock.
3351 * this is how we ensure that migration can proceed with no
3352 * asts in progress.  note that it is ok if the state of the
3353 * queues is such that a lock should be granted in the future
3354 * or that a bast should be fired, because the new master will
3355 * shuffle the lists on this lockres as soon as it is migrated.
3356 */
3357void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3358                             struct dlm_lock_resource *res)
3359{
3360        if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3361                return;
3362
3363        if (!res->migration_pending) {
3364                spin_unlock(&res->spinlock);
3365                return;
3366        }
3367
3368        BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3369        res->migration_pending = 0;
3370        res->state |= DLM_LOCK_RES_MIGRATING;
3371        spin_unlock(&res->spinlock);
3372        wake_up(&res->wq);
3373        wake_up(&dlm->migration_wq);
3374}
3375
3376void dlm_force_free_mles(struct dlm_ctxt *dlm)
3377{
3378        int i;
3379        struct hlist_head *bucket;
3380        struct dlm_master_list_entry *mle;
3381        struct hlist_node *tmp;
3382
3383        /*
3384         * We notified all other nodes that we are exiting the domain and
3385         * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3386         * around we force free them and wake any processes that are waiting
3387         * on the mles
3388         */
3389        spin_lock(&dlm->spinlock);
3390        spin_lock(&dlm->master_lock);
3391
3392        BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3393        BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3394
3395        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3396                bucket = dlm_master_hash(dlm, i);
3397                hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3398                        if (mle->type != DLM_MLE_BLOCK) {
3399                                mlog(ML_ERROR, "bad mle: %p\n", mle);
3400                                dlm_print_one_mle(mle);
3401                        }
3402                        atomic_set(&mle->woken, 1);
3403                        wake_up(&mle->wq);
3404
3405                        __dlm_unlink_mle(dlm, mle);
3406                        __dlm_mle_detach_hb_events(dlm, mle);
3407                        __dlm_put_mle(mle);
3408                }
3409        }
3410        spin_unlock(&dlm->master_lock);
3411        spin_unlock(&dlm->spinlock);
3412}
3413