linux/fs/ocfs2/dlm/dlmmaster.c
<<
>>
Prefs
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * dlmmod.c
   5 *
   6 * standalone DLM module
   7 *
   8 * Copyright (C) 2004 Oracle.  All rights reserved.
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public
  12 * License as published by the Free Software Foundation; either
  13 * version 2 of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public
  21 * License along with this program; if not, write to the
  22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23 * Boston, MA 021110-1307, USA.
  24 *
  25 */
  26
  27
  28#include <linux/module.h>
  29#include <linux/fs.h>
  30#include <linux/types.h>
  31#include <linux/slab.h>
  32#include <linux/highmem.h>
  33#include <linux/init.h>
  34#include <linux/sysctl.h>
  35#include <linux/random.h>
  36#include <linux/blkdev.h>
  37#include <linux/socket.h>
  38#include <linux/inet.h>
  39#include <linux/spinlock.h>
  40#include <linux/delay.h>
  41
  42
  43#include "cluster/heartbeat.h"
  44#include "cluster/nodemanager.h"
  45#include "cluster/tcp.h"
  46
  47#include "dlmapi.h"
  48#include "dlmcommon.h"
  49#include "dlmdomain.h"
  50#include "dlmdebug.h"
  51
  52#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
  53#include "cluster/masklog.h"
  54
  55static void dlm_mle_node_down(struct dlm_ctxt *dlm,
  56                              struct dlm_master_list_entry *mle,
  57                              struct o2nm_node *node,
  58                              int idx);
  59static void dlm_mle_node_up(struct dlm_ctxt *dlm,
  60                            struct dlm_master_list_entry *mle,
  61                            struct o2nm_node *node,
  62                            int idx);
  63
  64static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
  65static int dlm_do_assert_master(struct dlm_ctxt *dlm,
  66                                struct dlm_lock_resource *res,
  67                                void *nodemap, u32 flags);
  68static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
  69
  70static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
  71                                struct dlm_master_list_entry *mle,
  72                                const char *name,
  73                                unsigned int namelen)
  74{
  75        if (dlm != mle->dlm)
  76                return 0;
  77
  78        if (namelen != mle->mnamelen ||
  79            memcmp(name, mle->mname, namelen) != 0)
  80                return 0;
  81
  82        return 1;
  83}
  84
  85static struct kmem_cache *dlm_lockres_cache;
  86static struct kmem_cache *dlm_lockname_cache;
  87static struct kmem_cache *dlm_mle_cache;
  88
  89static void dlm_mle_release(struct kref *kref);
  90static void dlm_init_mle(struct dlm_master_list_entry *mle,
  91                        enum dlm_mle_type type,
  92                        struct dlm_ctxt *dlm,
  93                        struct dlm_lock_resource *res,
  94                        const char *name,
  95                        unsigned int namelen);
  96static void dlm_put_mle(struct dlm_master_list_entry *mle);
  97static void __dlm_put_mle(struct dlm_master_list_entry *mle);
  98static int dlm_find_mle(struct dlm_ctxt *dlm,
  99                        struct dlm_master_list_entry **mle,
 100                        char *name, unsigned int namelen);
 101
 102static int dlm_do_master_request(struct dlm_lock_resource *res,
 103                                 struct dlm_master_list_entry *mle, int to);
 104
 105
 106static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
 107                                     struct dlm_lock_resource *res,
 108                                     struct dlm_master_list_entry *mle,
 109                                     int *blocked);
 110static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
 111                                    struct dlm_lock_resource *res,
 112                                    struct dlm_master_list_entry *mle,
 113                                    int blocked);
 114static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
 115                                 struct dlm_lock_resource *res,
 116                                 struct dlm_master_list_entry *mle,
 117                                 struct dlm_master_list_entry **oldmle,
 118                                 const char *name, unsigned int namelen,
 119                                 u8 new_master, u8 master);
 120
 121static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
 122                                    struct dlm_lock_resource *res);
 123static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
 124                                      struct dlm_lock_resource *res);
 125static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
 126                                       struct dlm_lock_resource *res,
 127                                       u8 target);
 128static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
 129                                       struct dlm_lock_resource *res);
 130
 131
 132int dlm_is_host_down(int errno)
 133{
 134        switch (errno) {
 135                case -EBADF:
 136                case -ECONNREFUSED:
 137                case -ENOTCONN:
 138                case -ECONNRESET:
 139                case -EPIPE:
 140                case -EHOSTDOWN:
 141                case -EHOSTUNREACH:
 142                case -ETIMEDOUT:
 143                case -ECONNABORTED:
 144                case -ENETDOWN:
 145                case -ENETUNREACH:
 146                case -ENETRESET:
 147                case -ESHUTDOWN:
 148                case -ENOPROTOOPT:
 149                case -EINVAL:   /* if returned from our tcp code,
 150                                   this means there is no socket */
 151                        return 1;
 152        }
 153        return 0;
 154}
 155
 156
 157/*
 158 * MASTER LIST FUNCTIONS
 159 */
 160
 161
 162/*
 163 * regarding master list entries and heartbeat callbacks:
 164 *
 165 * in order to avoid sleeping and allocation that occurs in
 166 * heartbeat, master list entries are simply attached to the
 167 * dlm's established heartbeat callbacks.  the mle is attached
 168 * when it is created, and since the dlm->spinlock is held at
 169 * that time, any heartbeat event will be properly discovered
 170 * by the mle.  the mle needs to be detached from the
 171 * dlm->mle_hb_events list as soon as heartbeat events are no
 172 * longer useful to the mle, and before the mle is freed.
 173 *
 174 * as a general rule, heartbeat events are no longer needed by
 175 * the mle once an "answer" regarding the lock master has been
 176 * received.
 177 */
 178static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
 179                                              struct dlm_master_list_entry *mle)
 180{
 181        assert_spin_locked(&dlm->spinlock);
 182
 183        list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
 184}
 185
 186
 187static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
 188                                              struct dlm_master_list_entry *mle)
 189{
 190        if (!list_empty(&mle->hb_events))
 191                list_del_init(&mle->hb_events);
 192}
 193
 194
 195static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
 196                                            struct dlm_master_list_entry *mle)
 197{
 198        spin_lock(&dlm->spinlock);
 199        __dlm_mle_detach_hb_events(dlm, mle);
 200        spin_unlock(&dlm->spinlock);
 201}
 202
 203static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
 204{
 205        struct dlm_ctxt *dlm;
 206        dlm = mle->dlm;
 207
 208        assert_spin_locked(&dlm->spinlock);
 209        assert_spin_locked(&dlm->master_lock);
 210        mle->inuse++;
 211        kref_get(&mle->mle_refs);
 212}
 213
 214static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
 215{
 216        struct dlm_ctxt *dlm;
 217        dlm = mle->dlm;
 218
 219        spin_lock(&dlm->spinlock);
 220        spin_lock(&dlm->master_lock);
 221        mle->inuse--;
 222        __dlm_put_mle(mle);
 223        spin_unlock(&dlm->master_lock);
 224        spin_unlock(&dlm->spinlock);
 225
 226}
 227
 228/* remove from list and free */
 229static void __dlm_put_mle(struct dlm_master_list_entry *mle)
 230{
 231        struct dlm_ctxt *dlm;
 232        dlm = mle->dlm;
 233
 234        assert_spin_locked(&dlm->spinlock);
 235        assert_spin_locked(&dlm->master_lock);
 236        if (!atomic_read(&mle->mle_refs.refcount)) {
 237                /* this may or may not crash, but who cares.
 238                 * it's a BUG. */
 239                mlog(ML_ERROR, "bad mle: %p\n", mle);
 240                dlm_print_one_mle(mle);
 241                BUG();
 242        } else
 243                kref_put(&mle->mle_refs, dlm_mle_release);
 244}
 245
 246
 247/* must not have any spinlocks coming in */
 248static void dlm_put_mle(struct dlm_master_list_entry *mle)
 249{
 250        struct dlm_ctxt *dlm;
 251        dlm = mle->dlm;
 252
 253        spin_lock(&dlm->spinlock);
 254        spin_lock(&dlm->master_lock);
 255        __dlm_put_mle(mle);
 256        spin_unlock(&dlm->master_lock);
 257        spin_unlock(&dlm->spinlock);
 258}
 259
 260static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
 261{
 262        kref_get(&mle->mle_refs);
 263}
 264
 265static void dlm_init_mle(struct dlm_master_list_entry *mle,
 266                        enum dlm_mle_type type,
 267                        struct dlm_ctxt *dlm,
 268                        struct dlm_lock_resource *res,
 269                        const char *name,
 270                        unsigned int namelen)
 271{
 272        assert_spin_locked(&dlm->spinlock);
 273
 274        mle->dlm = dlm;
 275        mle->type = type;
 276        INIT_HLIST_NODE(&mle->master_hash_node);
 277        INIT_LIST_HEAD(&mle->hb_events);
 278        memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
 279        spin_lock_init(&mle->spinlock);
 280        init_waitqueue_head(&mle->wq);
 281        atomic_set(&mle->woken, 0);
 282        kref_init(&mle->mle_refs);
 283        memset(mle->response_map, 0, sizeof(mle->response_map));
 284        mle->master = O2NM_MAX_NODES;
 285        mle->new_master = O2NM_MAX_NODES;
 286        mle->inuse = 0;
 287
 288        BUG_ON(mle->type != DLM_MLE_BLOCK &&
 289               mle->type != DLM_MLE_MASTER &&
 290               mle->type != DLM_MLE_MIGRATION);
 291
 292        if (mle->type == DLM_MLE_MASTER) {
 293                BUG_ON(!res);
 294                mle->mleres = res;
 295                memcpy(mle->mname, res->lockname.name, res->lockname.len);
 296                mle->mnamelen = res->lockname.len;
 297                mle->mnamehash = res->lockname.hash;
 298        } else {
 299                BUG_ON(!name);
 300                mle->mleres = NULL;
 301                memcpy(mle->mname, name, namelen);
 302                mle->mnamelen = namelen;
 303                mle->mnamehash = dlm_lockid_hash(name, namelen);
 304        }
 305
 306        atomic_inc(&dlm->mle_tot_count[mle->type]);
 307        atomic_inc(&dlm->mle_cur_count[mle->type]);
 308
 309        /* copy off the node_map and register hb callbacks on our copy */
 310        memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
 311        memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
 312        clear_bit(dlm->node_num, mle->vote_map);
 313        clear_bit(dlm->node_num, mle->node_map);
 314
 315        /* attach the mle to the domain node up/down events */
 316        __dlm_mle_attach_hb_events(dlm, mle);
 317}
 318
 319void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 320{
 321        assert_spin_locked(&dlm->spinlock);
 322        assert_spin_locked(&dlm->master_lock);
 323
 324        if (!hlist_unhashed(&mle->master_hash_node))
 325                hlist_del_init(&mle->master_hash_node);
 326}
 327
 328void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 329{
 330        struct hlist_head *bucket;
 331
 332        assert_spin_locked(&dlm->master_lock);
 333
 334        bucket = dlm_master_hash(dlm, mle->mnamehash);
 335        hlist_add_head(&mle->master_hash_node, bucket);
 336}
 337
 338/* returns 1 if found, 0 if not */
 339static int dlm_find_mle(struct dlm_ctxt *dlm,
 340                        struct dlm_master_list_entry **mle,
 341                        char *name, unsigned int namelen)
 342{
 343        struct dlm_master_list_entry *tmpmle;
 344        struct hlist_head *bucket;
 345        unsigned int hash;
 346
 347        assert_spin_locked(&dlm->master_lock);
 348
 349        hash = dlm_lockid_hash(name, namelen);
 350        bucket = dlm_master_hash(dlm, hash);
 351        hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
 352                if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
 353                        continue;
 354                dlm_get_mle(tmpmle);
 355                *mle = tmpmle;
 356                return 1;
 357        }
 358        return 0;
 359}
 360
 361void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
 362{
 363        struct dlm_master_list_entry *mle;
 364
 365        assert_spin_locked(&dlm->spinlock);
 366
 367        list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
 368                if (node_up)
 369                        dlm_mle_node_up(dlm, mle, NULL, idx);
 370                else
 371                        dlm_mle_node_down(dlm, mle, NULL, idx);
 372        }
 373}
 374
 375static void dlm_mle_node_down(struct dlm_ctxt *dlm,
 376                              struct dlm_master_list_entry *mle,
 377                              struct o2nm_node *node, int idx)
 378{
 379        spin_lock(&mle->spinlock);
 380
 381        if (!test_bit(idx, mle->node_map))
 382                mlog(0, "node %u already removed from nodemap!\n", idx);
 383        else
 384                clear_bit(idx, mle->node_map);
 385
 386        spin_unlock(&mle->spinlock);
 387}
 388
 389static void dlm_mle_node_up(struct dlm_ctxt *dlm,
 390                            struct dlm_master_list_entry *mle,
 391                            struct o2nm_node *node, int idx)
 392{
 393        spin_lock(&mle->spinlock);
 394
 395        if (test_bit(idx, mle->node_map))
 396                mlog(0, "node %u already in node map!\n", idx);
 397        else
 398                set_bit(idx, mle->node_map);
 399
 400        spin_unlock(&mle->spinlock);
 401}
 402
 403
 404int dlm_init_mle_cache(void)
 405{
 406        dlm_mle_cache = kmem_cache_create("o2dlm_mle",
 407                                          sizeof(struct dlm_master_list_entry),
 408                                          0, SLAB_HWCACHE_ALIGN,
 409                                          NULL);
 410        if (dlm_mle_cache == NULL)
 411                return -ENOMEM;
 412        return 0;
 413}
 414
 415void dlm_destroy_mle_cache(void)
 416{
 417        if (dlm_mle_cache)
 418                kmem_cache_destroy(dlm_mle_cache);
 419}
 420
 421static void dlm_mle_release(struct kref *kref)
 422{
 423        struct dlm_master_list_entry *mle;
 424        struct dlm_ctxt *dlm;
 425
 426        mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
 427        dlm = mle->dlm;
 428
 429        assert_spin_locked(&dlm->spinlock);
 430        assert_spin_locked(&dlm->master_lock);
 431
 432        mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
 433             mle->type);
 434
 435        /* remove from list if not already */
 436        __dlm_unlink_mle(dlm, mle);
 437
 438        /* detach the mle from the domain node up/down events */
 439        __dlm_mle_detach_hb_events(dlm, mle);
 440
 441        atomic_dec(&dlm->mle_cur_count[mle->type]);
 442
 443        /* NOTE: kfree under spinlock here.
 444         * if this is bad, we can move this to a freelist. */
 445        kmem_cache_free(dlm_mle_cache, mle);
 446}
 447
 448
 449/*
 450 * LOCK RESOURCE FUNCTIONS
 451 */
 452
 453int dlm_init_master_caches(void)
 454{
 455        dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
 456                                              sizeof(struct dlm_lock_resource),
 457                                              0, SLAB_HWCACHE_ALIGN, NULL);
 458        if (!dlm_lockres_cache)
 459                goto bail;
 460
 461        dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
 462                                               DLM_LOCKID_NAME_MAX, 0,
 463                                               SLAB_HWCACHE_ALIGN, NULL);
 464        if (!dlm_lockname_cache)
 465                goto bail;
 466
 467        return 0;
 468bail:
 469        dlm_destroy_master_caches();
 470        return -ENOMEM;
 471}
 472
 473void dlm_destroy_master_caches(void)
 474{
 475        if (dlm_lockname_cache) {
 476                kmem_cache_destroy(dlm_lockname_cache);
 477                dlm_lockname_cache = NULL;
 478        }
 479
 480        if (dlm_lockres_cache) {
 481                kmem_cache_destroy(dlm_lockres_cache);
 482                dlm_lockres_cache = NULL;
 483        }
 484}
 485
 486static void dlm_lockres_release(struct kref *kref)
 487{
 488        struct dlm_lock_resource *res;
 489        struct dlm_ctxt *dlm;
 490
 491        res = container_of(kref, struct dlm_lock_resource, refs);
 492        dlm = res->dlm;
 493
 494        /* This should not happen -- all lockres' have a name
 495         * associated with them at init time. */
 496        BUG_ON(!res->lockname.name);
 497
 498        mlog(0, "destroying lockres %.*s\n", res->lockname.len,
 499             res->lockname.name);
 500
 501        spin_lock(&dlm->track_lock);
 502        if (!list_empty(&res->tracking))
 503                list_del_init(&res->tracking);
 504        else {
 505                mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
 506                     res->lockname.len, res->lockname.name);
 507                dlm_print_one_lock_resource(res);
 508        }
 509        spin_unlock(&dlm->track_lock);
 510
 511        atomic_dec(&dlm->res_cur_count);
 512
 513        if (!hlist_unhashed(&res->hash_node) ||
 514            !list_empty(&res->granted) ||
 515            !list_empty(&res->converting) ||
 516            !list_empty(&res->blocked) ||
 517            !list_empty(&res->dirty) ||
 518            !list_empty(&res->recovering) ||
 519            !list_empty(&res->purge)) {
 520                mlog(ML_ERROR,
 521                     "Going to BUG for resource %.*s."
 522                     "  We're on a list! [%c%c%c%c%c%c%c]\n",
 523                     res->lockname.len, res->lockname.name,
 524                     !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
 525                     !list_empty(&res->granted) ? 'G' : ' ',
 526                     !list_empty(&res->converting) ? 'C' : ' ',
 527                     !list_empty(&res->blocked) ? 'B' : ' ',
 528                     !list_empty(&res->dirty) ? 'D' : ' ',
 529                     !list_empty(&res->recovering) ? 'R' : ' ',
 530                     !list_empty(&res->purge) ? 'P' : ' ');
 531
 532                dlm_print_one_lock_resource(res);
 533        }
 534
 535        /* By the time we're ready to blow this guy away, we shouldn't
 536         * be on any lists. */
 537        BUG_ON(!hlist_unhashed(&res->hash_node));
 538        BUG_ON(!list_empty(&res->granted));
 539        BUG_ON(!list_empty(&res->converting));
 540        BUG_ON(!list_empty(&res->blocked));
 541        BUG_ON(!list_empty(&res->dirty));
 542        BUG_ON(!list_empty(&res->recovering));
 543        BUG_ON(!list_empty(&res->purge));
 544
 545        kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
 546
 547        kmem_cache_free(dlm_lockres_cache, res);
 548}
 549
 550void dlm_lockres_put(struct dlm_lock_resource *res)
 551{
 552        kref_put(&res->refs, dlm_lockres_release);
 553}
 554
 555static void dlm_init_lockres(struct dlm_ctxt *dlm,
 556                             struct dlm_lock_resource *res,
 557                             const char *name, unsigned int namelen)
 558{
 559        char *qname;
 560
 561        /* If we memset here, we lose our reference to the kmalloc'd
 562         * res->lockname.name, so be sure to init every field
 563         * correctly! */
 564
 565        qname = (char *) res->lockname.name;
 566        memcpy(qname, name, namelen);
 567
 568        res->lockname.len = namelen;
 569        res->lockname.hash = dlm_lockid_hash(name, namelen);
 570
 571        init_waitqueue_head(&res->wq);
 572        spin_lock_init(&res->spinlock);
 573        INIT_HLIST_NODE(&res->hash_node);
 574        INIT_LIST_HEAD(&res->granted);
 575        INIT_LIST_HEAD(&res->converting);
 576        INIT_LIST_HEAD(&res->blocked);
 577        INIT_LIST_HEAD(&res->dirty);
 578        INIT_LIST_HEAD(&res->recovering);
 579        INIT_LIST_HEAD(&res->purge);
 580        INIT_LIST_HEAD(&res->tracking);
 581        atomic_set(&res->asts_reserved, 0);
 582        res->migration_pending = 0;
 583        res->inflight_locks = 0;
 584        res->inflight_assert_workers = 0;
 585
 586        res->dlm = dlm;
 587
 588        kref_init(&res->refs);
 589
 590        atomic_inc(&dlm->res_tot_count);
 591        atomic_inc(&dlm->res_cur_count);
 592
 593        /* just for consistency */
 594        spin_lock(&res->spinlock);
 595        dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
 596        spin_unlock(&res->spinlock);
 597
 598        res->state = DLM_LOCK_RES_IN_PROGRESS;
 599
 600        res->last_used = 0;
 601
 602        spin_lock(&dlm->spinlock);
 603        list_add_tail(&res->tracking, &dlm->tracking_list);
 604        spin_unlock(&dlm->spinlock);
 605
 606        memset(res->lvb, 0, DLM_LVB_LEN);
 607        memset(res->refmap, 0, sizeof(res->refmap));
 608}
 609
 610struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
 611                                   const char *name,
 612                                   unsigned int namelen)
 613{
 614        struct dlm_lock_resource *res = NULL;
 615
 616        res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
 617        if (!res)
 618                goto error;
 619
 620        res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
 621        if (!res->lockname.name)
 622                goto error;
 623
 624        dlm_init_lockres(dlm, res, name, namelen);
 625        return res;
 626
 627error:
 628        if (res && res->lockname.name)
 629                kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
 630
 631        if (res)
 632                kmem_cache_free(dlm_lockres_cache, res);
 633        return NULL;
 634}
 635
 636void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
 637                                struct dlm_lock_resource *res, int bit)
 638{
 639        assert_spin_locked(&res->spinlock);
 640
 641        mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
 642             res->lockname.name, bit, __builtin_return_address(0));
 643
 644        set_bit(bit, res->refmap);
 645}
 646
 647void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
 648                                  struct dlm_lock_resource *res, int bit)
 649{
 650        assert_spin_locked(&res->spinlock);
 651
 652        mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
 653             res->lockname.name, bit, __builtin_return_address(0));
 654
 655        clear_bit(bit, res->refmap);
 656}
 657
 658
 659void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
 660                                   struct dlm_lock_resource *res)
 661{
 662        assert_spin_locked(&res->spinlock);
 663
 664        res->inflight_locks++;
 665
 666        mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
 667             res->lockname.len, res->lockname.name, res->inflight_locks,
 668             __builtin_return_address(0));
 669}
 670
 671void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
 672                                   struct dlm_lock_resource *res)
 673{
 674        assert_spin_locked(&res->spinlock);
 675
 676        BUG_ON(res->inflight_locks == 0);
 677
 678        res->inflight_locks--;
 679
 680        mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
 681             res->lockname.len, res->lockname.name, res->inflight_locks,
 682             __builtin_return_address(0));
 683
 684        wake_up(&res->wq);
 685}
 686
 687void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
 688                struct dlm_lock_resource *res)
 689{
 690        assert_spin_locked(&res->spinlock);
 691        res->inflight_assert_workers++;
 692        mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
 693                        dlm->name, res->lockname.len, res->lockname.name,
 694                        res->inflight_assert_workers);
 695}
 696
 697static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
 698                struct dlm_lock_resource *res)
 699{
 700        spin_lock(&res->spinlock);
 701        __dlm_lockres_grab_inflight_worker(dlm, res);
 702        spin_unlock(&res->spinlock);
 703}
 704
 705static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
 706                struct dlm_lock_resource *res)
 707{
 708        assert_spin_locked(&res->spinlock);
 709        BUG_ON(res->inflight_assert_workers == 0);
 710        res->inflight_assert_workers--;
 711        mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
 712                        dlm->name, res->lockname.len, res->lockname.name,
 713                        res->inflight_assert_workers);
 714}
 715
 716static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
 717                struct dlm_lock_resource *res)
 718{
 719        spin_lock(&res->spinlock);
 720        __dlm_lockres_drop_inflight_worker(dlm, res);
 721        spin_unlock(&res->spinlock);
 722}
 723
 724/*
 725 * lookup a lock resource by name.
 726 * may already exist in the hashtable.
 727 * lockid is null terminated
 728 *
 729 * if not, allocate enough for the lockres and for
 730 * the temporary structure used in doing the mastering.
 731 *
 732 * also, do a lookup in the dlm->master_list to see
 733 * if another node has begun mastering the same lock.
 734 * if so, there should be a block entry in there
 735 * for this name, and we should *not* attempt to master
 736 * the lock here.   need to wait around for that node
 737 * to assert_master (or die).
 738 *
 739 */
 740struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
 741                                          const char *lockid,
 742                                          int namelen,
 743                                          int flags)
 744{
 745        struct dlm_lock_resource *tmpres=NULL, *res=NULL;
 746        struct dlm_master_list_entry *mle = NULL;
 747        struct dlm_master_list_entry *alloc_mle = NULL;
 748        int blocked = 0;
 749        int ret, nodenum;
 750        struct dlm_node_iter iter;
 751        unsigned int hash;
 752        int tries = 0;
 753        int bit, wait_on_recovery = 0;
 754
 755        BUG_ON(!lockid);
 756
 757        hash = dlm_lockid_hash(lockid, namelen);
 758
 759        mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
 760
 761lookup:
 762        spin_lock(&dlm->spinlock);
 763        tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
 764        if (tmpres) {
 765                spin_unlock(&dlm->spinlock);
 766                spin_lock(&tmpres->spinlock);
 767                /* Wait on the thread that is mastering the resource */
 768                if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
 769                        __dlm_wait_on_lockres(tmpres);
 770                        BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
 771                        spin_unlock(&tmpres->spinlock);
 772                        dlm_lockres_put(tmpres);
 773                        tmpres = NULL;
 774                        goto lookup;
 775                }
 776
 777                /* Wait on the resource purge to complete before continuing */
 778                if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
 779                        BUG_ON(tmpres->owner == dlm->node_num);
 780                        __dlm_wait_on_lockres_flags(tmpres,
 781                                                    DLM_LOCK_RES_DROPPING_REF);
 782                        spin_unlock(&tmpres->spinlock);
 783                        dlm_lockres_put(tmpres);
 784                        tmpres = NULL;
 785                        goto lookup;
 786                }
 787
 788                /* Grab inflight ref to pin the resource */
 789                dlm_lockres_grab_inflight_ref(dlm, tmpres);
 790
 791                spin_unlock(&tmpres->spinlock);
 792                if (res)
 793                        dlm_lockres_put(res);
 794                res = tmpres;
 795                goto leave;
 796        }
 797
 798        if (!res) {
 799                spin_unlock(&dlm->spinlock);
 800                mlog(0, "allocating a new resource\n");
 801                /* nothing found and we need to allocate one. */
 802                alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
 803                if (!alloc_mle)
 804                        goto leave;
 805                res = dlm_new_lockres(dlm, lockid, namelen);
 806                if (!res)
 807                        goto leave;
 808                goto lookup;
 809        }
 810
 811        mlog(0, "no lockres found, allocated our own: %p\n", res);
 812
 813        if (flags & LKM_LOCAL) {
 814                /* caller knows it's safe to assume it's not mastered elsewhere
 815                 * DONE!  return right away */
 816                spin_lock(&res->spinlock);
 817                dlm_change_lockres_owner(dlm, res, dlm->node_num);
 818                __dlm_insert_lockres(dlm, res);
 819                dlm_lockres_grab_inflight_ref(dlm, res);
 820                spin_unlock(&res->spinlock);
 821                spin_unlock(&dlm->spinlock);
 822                /* lockres still marked IN_PROGRESS */
 823                goto wake_waiters;
 824        }
 825
 826        /* check master list to see if another node has started mastering it */
 827        spin_lock(&dlm->master_lock);
 828
 829        /* if we found a block, wait for lock to be mastered by another node */
 830        blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
 831        if (blocked) {
 832                int mig;
 833                if (mle->type == DLM_MLE_MASTER) {
 834                        mlog(ML_ERROR, "master entry for nonexistent lock!\n");
 835                        BUG();
 836                }
 837                mig = (mle->type == DLM_MLE_MIGRATION);
 838                /* if there is a migration in progress, let the migration
 839                 * finish before continuing.  we can wait for the absence
 840                 * of the MIGRATION mle: either the migrate finished or
 841                 * one of the nodes died and the mle was cleaned up.
 842                 * if there is a BLOCK here, but it already has a master
 843                 * set, we are too late.  the master does not have a ref
 844                 * for us in the refmap.  detach the mle and drop it.
 845                 * either way, go back to the top and start over. */
 846                if (mig || mle->master != O2NM_MAX_NODES) {
 847                        BUG_ON(mig && mle->master == dlm->node_num);
 848                        /* we arrived too late.  the master does not
 849                         * have a ref for us. retry. */
 850                        mlog(0, "%s:%.*s: late on %s\n",
 851                             dlm->name, namelen, lockid,
 852                             mig ?  "MIGRATION" : "BLOCK");
 853                        spin_unlock(&dlm->master_lock);
 854                        spin_unlock(&dlm->spinlock);
 855
 856                        /* master is known, detach */
 857                        if (!mig)
 858                                dlm_mle_detach_hb_events(dlm, mle);
 859                        dlm_put_mle(mle);
 860                        mle = NULL;
 861                        /* this is lame, but we can't wait on either
 862                         * the mle or lockres waitqueue here */
 863                        if (mig)
 864                                msleep(100);
 865                        goto lookup;
 866                }
 867        } else {
 868                /* go ahead and try to master lock on this node */
 869                mle = alloc_mle;
 870                /* make sure this does not get freed below */
 871                alloc_mle = NULL;
 872                dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
 873                set_bit(dlm->node_num, mle->maybe_map);
 874                __dlm_insert_mle(dlm, mle);
 875
 876                /* still holding the dlm spinlock, check the recovery map
 877                 * to see if there are any nodes that still need to be
 878                 * considered.  these will not appear in the mle nodemap
 879                 * but they might own this lockres.  wait on them. */
 880                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
 881                if (bit < O2NM_MAX_NODES) {
 882                        mlog(0, "%s: res %.*s, At least one node (%d) "
 883                             "to recover before lock mastery can begin\n",
 884                             dlm->name, namelen, (char *)lockid, bit);
 885                        wait_on_recovery = 1;
 886                }
 887        }
 888
 889        /* at this point there is either a DLM_MLE_BLOCK or a
 890         * DLM_MLE_MASTER on the master list, so it's safe to add the
 891         * lockres to the hashtable.  anyone who finds the lock will
 892         * still have to wait on the IN_PROGRESS. */
 893
 894        /* finally add the lockres to its hash bucket */
 895        __dlm_insert_lockres(dlm, res);
 896
 897        /* Grab inflight ref to pin the resource */
 898        spin_lock(&res->spinlock);
 899        dlm_lockres_grab_inflight_ref(dlm, res);
 900        spin_unlock(&res->spinlock);
 901
 902        /* get an extra ref on the mle in case this is a BLOCK
 903         * if so, the creator of the BLOCK may try to put the last
 904         * ref at this time in the assert master handler, so we
 905         * need an extra one to keep from a bad ptr deref. */
 906        dlm_get_mle_inuse(mle);
 907        spin_unlock(&dlm->master_lock);
 908        spin_unlock(&dlm->spinlock);
 909
 910redo_request:
 911        while (wait_on_recovery) {
 912                /* any cluster changes that occurred after dropping the
 913                 * dlm spinlock would be detectable be a change on the mle,
 914                 * so we only need to clear out the recovery map once. */
 915                if (dlm_is_recovery_lock(lockid, namelen)) {
 916                        mlog(0, "%s: Recovery map is not empty, but must "
 917                             "master $RECOVERY lock now\n", dlm->name);
 918                        if (!dlm_pre_master_reco_lockres(dlm, res))
 919                                wait_on_recovery = 0;
 920                        else {
 921                                mlog(0, "%s: waiting 500ms for heartbeat state "
 922                                    "change\n", dlm->name);
 923                                msleep(500);
 924                        }
 925                        continue;
 926                }
 927
 928                dlm_kick_recovery_thread(dlm);
 929                msleep(1000);
 930                dlm_wait_for_recovery(dlm);
 931
 932                spin_lock(&dlm->spinlock);
 933                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
 934                if (bit < O2NM_MAX_NODES) {
 935                        mlog(0, "%s: res %.*s, At least one node (%d) "
 936                             "to recover before lock mastery can begin\n",
 937                             dlm->name, namelen, (char *)lockid, bit);
 938                        wait_on_recovery = 1;
 939                } else
 940                        wait_on_recovery = 0;
 941                spin_unlock(&dlm->spinlock);
 942
 943                if (wait_on_recovery)
 944                        dlm_wait_for_node_recovery(dlm, bit, 10000);
 945        }
 946
 947        /* must wait for lock to be mastered elsewhere */
 948        if (blocked)
 949                goto wait;
 950
 951        ret = -EINVAL;
 952        dlm_node_iter_init(mle->vote_map, &iter);
 953        while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
 954                ret = dlm_do_master_request(res, mle, nodenum);
 955                if (ret < 0)
 956                        mlog_errno(ret);
 957                if (mle->master != O2NM_MAX_NODES) {
 958                        /* found a master ! */
 959                        if (mle->master <= nodenum)
 960                                break;
 961                        /* if our master request has not reached the master
 962                         * yet, keep going until it does.  this is how the
 963                         * master will know that asserts are needed back to
 964                         * the lower nodes. */
 965                        mlog(0, "%s: res %.*s, Requests only up to %u but "
 966                             "master is %u, keep going\n", dlm->name, namelen,
 967                             lockid, nodenum, mle->master);
 968                }
 969        }
 970
 971wait:
 972        /* keep going until the response map includes all nodes */
 973        ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
 974        if (ret < 0) {
 975                wait_on_recovery = 1;
 976                mlog(0, "%s: res %.*s, Node map changed, redo the master "
 977                     "request now, blocked=%d\n", dlm->name, res->lockname.len,
 978                     res->lockname.name, blocked);
 979                if (++tries > 20) {
 980                        mlog(ML_ERROR, "%s: res %.*s, Spinning on "
 981                             "dlm_wait_for_lock_mastery, blocked = %d\n",
 982                             dlm->name, res->lockname.len,
 983                             res->lockname.name, blocked);
 984                        dlm_print_one_lock_resource(res);
 985                        dlm_print_one_mle(mle);
 986                        tries = 0;
 987                }
 988                goto redo_request;
 989        }
 990
 991        mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
 992             res->lockname.name, res->owner);
 993        /* make sure we never continue without this */
 994        BUG_ON(res->owner == O2NM_MAX_NODES);
 995
 996        /* master is known, detach if not already detached */
 997        dlm_mle_detach_hb_events(dlm, mle);
 998        dlm_put_mle(mle);
 999        /* put the extra ref */
1000        dlm_put_mle_inuse(mle);
1001
1002wake_waiters:
1003        spin_lock(&res->spinlock);
1004        res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1005        spin_unlock(&res->spinlock);
1006        wake_up(&res->wq);
1007
1008leave:
1009        /* need to free the unused mle */
1010        if (alloc_mle)
1011                kmem_cache_free(dlm_mle_cache, alloc_mle);
1012
1013        return res;
1014}
1015
1016
1017#define DLM_MASTERY_TIMEOUT_MS   5000
1018
1019static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1020                                     struct dlm_lock_resource *res,
1021                                     struct dlm_master_list_entry *mle,
1022                                     int *blocked)
1023{
1024        u8 m;
1025        int ret, bit;
1026        int map_changed, voting_done;
1027        int assert, sleep;
1028
1029recheck:
1030        ret = 0;
1031        assert = 0;
1032
1033        /* check if another node has already become the owner */
1034        spin_lock(&res->spinlock);
1035        if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1036                mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1037                     res->lockname.len, res->lockname.name, res->owner);
1038                spin_unlock(&res->spinlock);
1039                /* this will cause the master to re-assert across
1040                 * the whole cluster, freeing up mles */
1041                if (res->owner != dlm->node_num) {
1042                        ret = dlm_do_master_request(res, mle, res->owner);
1043                        if (ret < 0) {
1044                                /* give recovery a chance to run */
1045                                mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1046                                msleep(500);
1047                                goto recheck;
1048                        }
1049                }
1050                ret = 0;
1051                goto leave;
1052        }
1053        spin_unlock(&res->spinlock);
1054
1055        spin_lock(&mle->spinlock);
1056        m = mle->master;
1057        map_changed = (memcmp(mle->vote_map, mle->node_map,
1058                              sizeof(mle->vote_map)) != 0);
1059        voting_done = (memcmp(mle->vote_map, mle->response_map,
1060                             sizeof(mle->vote_map)) == 0);
1061
1062        /* restart if we hit any errors */
1063        if (map_changed) {
1064                int b;
1065                mlog(0, "%s: %.*s: node map changed, restarting\n",
1066                     dlm->name, res->lockname.len, res->lockname.name);
1067                ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1068                b = (mle->type == DLM_MLE_BLOCK);
1069                if ((*blocked && !b) || (!*blocked && b)) {
1070                        mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1071                             dlm->name, res->lockname.len, res->lockname.name,
1072                             *blocked, b);
1073                        *blocked = b;
1074                }
1075                spin_unlock(&mle->spinlock);
1076                if (ret < 0) {
1077                        mlog_errno(ret);
1078                        goto leave;
1079                }
1080                mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1081                     "rechecking now\n", dlm->name, res->lockname.len,
1082                     res->lockname.name);
1083                goto recheck;
1084        } else {
1085                if (!voting_done) {
1086                        mlog(0, "map not changed and voting not done "
1087                             "for %s:%.*s\n", dlm->name, res->lockname.len,
1088                             res->lockname.name);
1089                }
1090        }
1091
1092        if (m != O2NM_MAX_NODES) {
1093                /* another node has done an assert!
1094                 * all done! */
1095                sleep = 0;
1096        } else {
1097                sleep = 1;
1098                /* have all nodes responded? */
1099                if (voting_done && !*blocked) {
1100                        bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1101                        if (dlm->node_num <= bit) {
1102                                /* my node number is lowest.
1103                                 * now tell other nodes that I am
1104                                 * mastering this. */
1105                                mle->master = dlm->node_num;
1106                                /* ref was grabbed in get_lock_resource
1107                                 * will be dropped in dlmlock_master */
1108                                assert = 1;
1109                                sleep = 0;
1110                        }
1111                        /* if voting is done, but we have not received
1112                         * an assert master yet, we must sleep */
1113                }
1114        }
1115
1116        spin_unlock(&mle->spinlock);
1117
1118        /* sleep if we haven't finished voting yet */
1119        if (sleep) {
1120                unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1121
1122                /*
1123                if (atomic_read(&mle->mle_refs.refcount) < 2)
1124                        mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1125                        atomic_read(&mle->mle_refs.refcount),
1126                        res->lockname.len, res->lockname.name);
1127                */
1128                atomic_set(&mle->woken, 0);
1129                (void)wait_event_timeout(mle->wq,
1130                                         (atomic_read(&mle->woken) == 1),
1131                                         timeo);
1132                if (res->owner == O2NM_MAX_NODES) {
1133                        mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1134                             res->lockname.len, res->lockname.name);
1135                        goto recheck;
1136                }
1137                mlog(0, "done waiting, master is %u\n", res->owner);
1138                ret = 0;
1139                goto leave;
1140        }
1141
1142        ret = 0;   /* done */
1143        if (assert) {
1144                m = dlm->node_num;
1145                mlog(0, "about to master %.*s here, this=%u\n",
1146                     res->lockname.len, res->lockname.name, m);
1147                ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1148                if (ret) {
1149                        /* This is a failure in the network path,
1150                         * not in the response to the assert_master
1151                         * (any nonzero response is a BUG on this node).
1152                         * Most likely a socket just got disconnected
1153                         * due to node death. */
1154                        mlog_errno(ret);
1155                }
1156                /* no longer need to restart lock mastery.
1157                 * all living nodes have been contacted. */
1158                ret = 0;
1159        }
1160
1161        /* set the lockres owner */
1162        spin_lock(&res->spinlock);
1163        /* mastery reference obtained either during
1164         * assert_master_handler or in get_lock_resource */
1165        dlm_change_lockres_owner(dlm, res, m);
1166        spin_unlock(&res->spinlock);
1167
1168leave:
1169        return ret;
1170}
1171
1172struct dlm_bitmap_diff_iter
1173{
1174        int curnode;
1175        unsigned long *orig_bm;
1176        unsigned long *cur_bm;
1177        unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1178};
1179
1180enum dlm_node_state_change
1181{
1182        NODE_DOWN = -1,
1183        NODE_NO_CHANGE = 0,
1184        NODE_UP
1185};
1186
1187static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1188                                      unsigned long *orig_bm,
1189                                      unsigned long *cur_bm)
1190{
1191        unsigned long p1, p2;
1192        int i;
1193
1194        iter->curnode = -1;
1195        iter->orig_bm = orig_bm;
1196        iter->cur_bm = cur_bm;
1197
1198        for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1199                p1 = *(iter->orig_bm + i);
1200                p2 = *(iter->cur_bm + i);
1201                iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1202        }
1203}
1204
1205static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1206                                     enum dlm_node_state_change *state)
1207{
1208        int bit;
1209
1210        if (iter->curnode >= O2NM_MAX_NODES)
1211                return -ENOENT;
1212
1213        bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1214                            iter->curnode+1);
1215        if (bit >= O2NM_MAX_NODES) {
1216                iter->curnode = O2NM_MAX_NODES;
1217                return -ENOENT;
1218        }
1219
1220        /* if it was there in the original then this node died */
1221        if (test_bit(bit, iter->orig_bm))
1222                *state = NODE_DOWN;
1223        else
1224                *state = NODE_UP;
1225
1226        iter->curnode = bit;
1227        return bit;
1228}
1229
1230
1231static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1232                                    struct dlm_lock_resource *res,
1233                                    struct dlm_master_list_entry *mle,
1234                                    int blocked)
1235{
1236        struct dlm_bitmap_diff_iter bdi;
1237        enum dlm_node_state_change sc;
1238        int node;
1239        int ret = 0;
1240
1241        mlog(0, "something happened such that the "
1242             "master process may need to be restarted!\n");
1243
1244        assert_spin_locked(&mle->spinlock);
1245
1246        dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1247        node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1248        while (node >= 0) {
1249                if (sc == NODE_UP) {
1250                        /* a node came up.  clear any old vote from
1251                         * the response map and set it in the vote map
1252                         * then restart the mastery. */
1253                        mlog(ML_NOTICE, "node %d up while restarting\n", node);
1254
1255                        /* redo the master request, but only for the new node */
1256                        mlog(0, "sending request to new node\n");
1257                        clear_bit(node, mle->response_map);
1258                        set_bit(node, mle->vote_map);
1259                } else {
1260                        mlog(ML_ERROR, "node down! %d\n", node);
1261                        if (blocked) {
1262                                int lowest = find_next_bit(mle->maybe_map,
1263                                                       O2NM_MAX_NODES, 0);
1264
1265                                /* act like it was never there */
1266                                clear_bit(node, mle->maybe_map);
1267
1268                                if (node == lowest) {
1269                                        mlog(0, "expected master %u died"
1270                                            " while this node was blocked "
1271                                            "waiting on it!\n", node);
1272                                        lowest = find_next_bit(mle->maybe_map,
1273                                                        O2NM_MAX_NODES,
1274                                                        lowest+1);
1275                                        if (lowest < O2NM_MAX_NODES) {
1276                                                mlog(0, "%s:%.*s:still "
1277                                                     "blocked. waiting on %u "
1278                                                     "now\n", dlm->name,
1279                                                     res->lockname.len,
1280                                                     res->lockname.name,
1281                                                     lowest);
1282                                        } else {
1283                                                /* mle is an MLE_BLOCK, but
1284                                                 * there is now nothing left to
1285                                                 * block on.  we need to return
1286                                                 * all the way back out and try
1287                                                 * again with an MLE_MASTER.
1288                                                 * dlm_do_local_recovery_cleanup
1289                                                 * has already run, so the mle
1290                                                 * refcount is ok */
1291                                                mlog(0, "%s:%.*s: no "
1292                                                     "longer blocking. try to "
1293                                                     "master this here\n",
1294                                                     dlm->name,
1295                                                     res->lockname.len,
1296                                                     res->lockname.name);
1297                                                mle->type = DLM_MLE_MASTER;
1298                                                mle->mleres = res;
1299                                        }
1300                                }
1301                        }
1302
1303                        /* now blank out everything, as if we had never
1304                         * contacted anyone */
1305                        memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1306                        memset(mle->response_map, 0, sizeof(mle->response_map));
1307                        /* reset the vote_map to the current node_map */
1308                        memcpy(mle->vote_map, mle->node_map,
1309                               sizeof(mle->node_map));
1310                        /* put myself into the maybe map */
1311                        if (mle->type != DLM_MLE_BLOCK)
1312                                set_bit(dlm->node_num, mle->maybe_map);
1313                }
1314                ret = -EAGAIN;
1315                node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1316        }
1317        return ret;
1318}
1319
1320
1321/*
1322 * DLM_MASTER_REQUEST_MSG
1323 *
1324 * returns: 0 on success,
1325 *          -errno on a network error
1326 *
1327 * on error, the caller should assume the target node is "dead"
1328 *
1329 */
1330
1331static int dlm_do_master_request(struct dlm_lock_resource *res,
1332                                 struct dlm_master_list_entry *mle, int to)
1333{
1334        struct dlm_ctxt *dlm = mle->dlm;
1335        struct dlm_master_request request;
1336        int ret, response=0, resend;
1337
1338        memset(&request, 0, sizeof(request));
1339        request.node_idx = dlm->node_num;
1340
1341        BUG_ON(mle->type == DLM_MLE_MIGRATION);
1342
1343        request.namelen = (u8)mle->mnamelen;
1344        memcpy(request.name, mle->mname, request.namelen);
1345
1346again:
1347        ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1348                                 sizeof(request), to, &response);
1349        if (ret < 0)  {
1350                if (ret == -ESRCH) {
1351                        /* should never happen */
1352                        mlog(ML_ERROR, "TCP stack not ready!\n");
1353                        BUG();
1354                } else if (ret == -EINVAL) {
1355                        mlog(ML_ERROR, "bad args passed to o2net!\n");
1356                        BUG();
1357                } else if (ret == -ENOMEM) {
1358                        mlog(ML_ERROR, "out of memory while trying to send "
1359                             "network message!  retrying\n");
1360                        /* this is totally crude */
1361                        msleep(50);
1362                        goto again;
1363                } else if (!dlm_is_host_down(ret)) {
1364                        /* not a network error. bad. */
1365                        mlog_errno(ret);
1366                        mlog(ML_ERROR, "unhandled error!");
1367                        BUG();
1368                }
1369                /* all other errors should be network errors,
1370                 * and likely indicate node death */
1371                mlog(ML_ERROR, "link to %d went down!\n", to);
1372                goto out;
1373        }
1374
1375        ret = 0;
1376        resend = 0;
1377        spin_lock(&mle->spinlock);
1378        switch (response) {
1379                case DLM_MASTER_RESP_YES:
1380                        set_bit(to, mle->response_map);
1381                        mlog(0, "node %u is the master, response=YES\n", to);
1382                        mlog(0, "%s:%.*s: master node %u now knows I have a "
1383                             "reference\n", dlm->name, res->lockname.len,
1384                             res->lockname.name, to);
1385                        mle->master = to;
1386                        break;
1387                case DLM_MASTER_RESP_NO:
1388                        mlog(0, "node %u not master, response=NO\n", to);
1389                        set_bit(to, mle->response_map);
1390                        break;
1391                case DLM_MASTER_RESP_MAYBE:
1392                        mlog(0, "node %u not master, response=MAYBE\n", to);
1393                        set_bit(to, mle->response_map);
1394                        set_bit(to, mle->maybe_map);
1395                        break;
1396                case DLM_MASTER_RESP_ERROR:
1397                        mlog(0, "node %u hit an error, resending\n", to);
1398                        resend = 1;
1399                        response = 0;
1400                        break;
1401                default:
1402                        mlog(ML_ERROR, "bad response! %u\n", response);
1403                        BUG();
1404        }
1405        spin_unlock(&mle->spinlock);
1406        if (resend) {
1407                /* this is also totally crude */
1408                msleep(50);
1409                goto again;
1410        }
1411
1412out:
1413        return ret;
1414}
1415
1416/*
1417 * locks that can be taken here:
1418 * dlm->spinlock
1419 * res->spinlock
1420 * mle->spinlock
1421 * dlm->master_list
1422 *
1423 * if possible, TRIM THIS DOWN!!!
1424 */
1425int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1426                               void **ret_data)
1427{
1428        u8 response = DLM_MASTER_RESP_MAYBE;
1429        struct dlm_ctxt *dlm = data;
1430        struct dlm_lock_resource *res = NULL;
1431        struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1432        struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1433        char *name;
1434        unsigned int namelen, hash;
1435        int found, ret;
1436        int set_maybe;
1437        int dispatch_assert = 0;
1438
1439        if (!dlm_grab(dlm))
1440                return DLM_MASTER_RESP_NO;
1441
1442        if (!dlm_domain_fully_joined(dlm)) {
1443                response = DLM_MASTER_RESP_NO;
1444                goto send_response;
1445        }
1446
1447        name = request->name;
1448        namelen = request->namelen;
1449        hash = dlm_lockid_hash(name, namelen);
1450
1451        if (namelen > DLM_LOCKID_NAME_MAX) {
1452                response = DLM_IVBUFLEN;
1453                goto send_response;
1454        }
1455
1456way_up_top:
1457        spin_lock(&dlm->spinlock);
1458        res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1459        if (res) {
1460                spin_unlock(&dlm->spinlock);
1461
1462                /* take care of the easy cases up front */
1463                spin_lock(&res->spinlock);
1464                if (res->state & (DLM_LOCK_RES_RECOVERING|
1465                                  DLM_LOCK_RES_MIGRATING)) {
1466                        spin_unlock(&res->spinlock);
1467                        mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1468                             "being recovered/migrated\n");
1469                        response = DLM_MASTER_RESP_ERROR;
1470                        if (mle)
1471                                kmem_cache_free(dlm_mle_cache, mle);
1472                        goto send_response;
1473                }
1474
1475                if (res->owner == dlm->node_num) {
1476                        dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1477                        spin_unlock(&res->spinlock);
1478                        response = DLM_MASTER_RESP_YES;
1479                        if (mle)
1480                                kmem_cache_free(dlm_mle_cache, mle);
1481
1482                        /* this node is the owner.
1483                         * there is some extra work that needs to
1484                         * happen now.  the requesting node has
1485                         * caused all nodes up to this one to
1486                         * create mles.  this node now needs to
1487                         * go back and clean those up. */
1488                        dispatch_assert = 1;
1489                        goto send_response;
1490                } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1491                        spin_unlock(&res->spinlock);
1492                        // mlog(0, "node %u is the master\n", res->owner);
1493                        response = DLM_MASTER_RESP_NO;
1494                        if (mle)
1495                                kmem_cache_free(dlm_mle_cache, mle);
1496                        goto send_response;
1497                }
1498
1499                /* ok, there is no owner.  either this node is
1500                 * being blocked, or it is actively trying to
1501                 * master this lock. */
1502                if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1503                        mlog(ML_ERROR, "lock with no owner should be "
1504                             "in-progress!\n");
1505                        BUG();
1506                }
1507
1508                // mlog(0, "lockres is in progress...\n");
1509                spin_lock(&dlm->master_lock);
1510                found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1511                if (!found) {
1512                        mlog(ML_ERROR, "no mle found for this lock!\n");
1513                        BUG();
1514                }
1515                set_maybe = 1;
1516                spin_lock(&tmpmle->spinlock);
1517                if (tmpmle->type == DLM_MLE_BLOCK) {
1518                        // mlog(0, "this node is waiting for "
1519                        // "lockres to be mastered\n");
1520                        response = DLM_MASTER_RESP_NO;
1521                } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1522                        mlog(0, "node %u is master, but trying to migrate to "
1523                             "node %u.\n", tmpmle->master, tmpmle->new_master);
1524                        if (tmpmle->master == dlm->node_num) {
1525                                mlog(ML_ERROR, "no owner on lockres, but this "
1526                                     "node is trying to migrate it to %u?!\n",
1527                                     tmpmle->new_master);
1528                                BUG();
1529                        } else {
1530                                /* the real master can respond on its own */
1531                                response = DLM_MASTER_RESP_NO;
1532                        }
1533                } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1534                        set_maybe = 0;
1535                        if (tmpmle->master == dlm->node_num) {
1536                                response = DLM_MASTER_RESP_YES;
1537                                /* this node will be the owner.
1538                                 * go back and clean the mles on any
1539                                 * other nodes */
1540                                dispatch_assert = 1;
1541                                dlm_lockres_set_refmap_bit(dlm, res,
1542                                                           request->node_idx);
1543                        } else
1544                                response = DLM_MASTER_RESP_NO;
1545                } else {
1546                        // mlog(0, "this node is attempting to "
1547                        // "master lockres\n");
1548                        response = DLM_MASTER_RESP_MAYBE;
1549                }
1550                if (set_maybe)
1551                        set_bit(request->node_idx, tmpmle->maybe_map);
1552                spin_unlock(&tmpmle->spinlock);
1553
1554                spin_unlock(&dlm->master_lock);
1555                spin_unlock(&res->spinlock);
1556
1557                /* keep the mle attached to heartbeat events */
1558                dlm_put_mle(tmpmle);
1559                if (mle)
1560                        kmem_cache_free(dlm_mle_cache, mle);
1561                goto send_response;
1562        }
1563
1564        /*
1565         * lockres doesn't exist on this node
1566         * if there is an MLE_BLOCK, return NO
1567         * if there is an MLE_MASTER, return MAYBE
1568         * otherwise, add an MLE_BLOCK, return NO
1569         */
1570        spin_lock(&dlm->master_lock);
1571        found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1572        if (!found) {
1573                /* this lockid has never been seen on this node yet */
1574                // mlog(0, "no mle found\n");
1575                if (!mle) {
1576                        spin_unlock(&dlm->master_lock);
1577                        spin_unlock(&dlm->spinlock);
1578
1579                        mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1580                        if (!mle) {
1581                                response = DLM_MASTER_RESP_ERROR;
1582                                mlog_errno(-ENOMEM);
1583                                goto send_response;
1584                        }
1585                        goto way_up_top;
1586                }
1587
1588                // mlog(0, "this is second time thru, already allocated, "
1589                // "add the block.\n");
1590                dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1591                set_bit(request->node_idx, mle->maybe_map);
1592                __dlm_insert_mle(dlm, mle);
1593                response = DLM_MASTER_RESP_NO;
1594        } else {
1595                // mlog(0, "mle was found\n");
1596                set_maybe = 1;
1597                spin_lock(&tmpmle->spinlock);
1598                if (tmpmle->master == dlm->node_num) {
1599                        mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1600                        BUG();
1601                }
1602                if (tmpmle->type == DLM_MLE_BLOCK)
1603                        response = DLM_MASTER_RESP_NO;
1604                else if (tmpmle->type == DLM_MLE_MIGRATION) {
1605                        mlog(0, "migration mle was found (%u->%u)\n",
1606                             tmpmle->master, tmpmle->new_master);
1607                        /* real master can respond on its own */
1608                        response = DLM_MASTER_RESP_NO;
1609                } else
1610                        response = DLM_MASTER_RESP_MAYBE;
1611                if (set_maybe)
1612                        set_bit(request->node_idx, tmpmle->maybe_map);
1613                spin_unlock(&tmpmle->spinlock);
1614        }
1615        spin_unlock(&dlm->master_lock);
1616        spin_unlock(&dlm->spinlock);
1617
1618        if (found) {
1619                /* keep the mle attached to heartbeat events */
1620                dlm_put_mle(tmpmle);
1621        }
1622send_response:
1623        /*
1624         * __dlm_lookup_lockres() grabbed a reference to this lockres.
1625         * The reference is released by dlm_assert_master_worker() under
1626         * the call to dlm_dispatch_assert_master().  If
1627         * dlm_assert_master_worker() isn't called, we drop it here.
1628         */
1629        if (dispatch_assert) {
1630                if (response != DLM_MASTER_RESP_YES)
1631                        mlog(ML_ERROR, "invalid response %d\n", response);
1632                if (!res) {
1633                        mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1634                        BUG();
1635                }
1636                mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1637                             dlm->node_num, res->lockname.len, res->lockname.name);
1638                ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1639                                                 DLM_ASSERT_MASTER_MLE_CLEANUP);
1640                if (ret < 0) {
1641                        mlog(ML_ERROR, "failed to dispatch assert master work\n");
1642                        response = DLM_MASTER_RESP_ERROR;
1643                        dlm_lockres_put(res);
1644                } else
1645                        dlm_lockres_grab_inflight_worker(dlm, res);
1646        } else {
1647                if (res)
1648                        dlm_lockres_put(res);
1649        }
1650
1651        dlm_put(dlm);
1652        return response;
1653}
1654
1655/*
1656 * DLM_ASSERT_MASTER_MSG
1657 */
1658
1659
1660/*
1661 * NOTE: this can be used for debugging
1662 * can periodically run all locks owned by this node
1663 * and re-assert across the cluster...
1664 */
1665static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1666                                struct dlm_lock_resource *res,
1667                                void *nodemap, u32 flags)
1668{
1669        struct dlm_assert_master assert;
1670        int to, tmpret;
1671        struct dlm_node_iter iter;
1672        int ret = 0;
1673        int reassert;
1674        const char *lockname = res->lockname.name;
1675        unsigned int namelen = res->lockname.len;
1676
1677        BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1678
1679        spin_lock(&res->spinlock);
1680        res->state |= DLM_LOCK_RES_SETREF_INPROG;
1681        spin_unlock(&res->spinlock);
1682
1683again:
1684        reassert = 0;
1685
1686        /* note that if this nodemap is empty, it returns 0 */
1687        dlm_node_iter_init(nodemap, &iter);
1688        while ((to = dlm_node_iter_next(&iter)) >= 0) {
1689                int r = 0;
1690                struct dlm_master_list_entry *mle = NULL;
1691
1692                mlog(0, "sending assert master to %d (%.*s)\n", to,
1693                     namelen, lockname);
1694                memset(&assert, 0, sizeof(assert));
1695                assert.node_idx = dlm->node_num;
1696                assert.namelen = namelen;
1697                memcpy(assert.name, lockname, namelen);
1698                assert.flags = cpu_to_be32(flags);
1699
1700                tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1701                                            &assert, sizeof(assert), to, &r);
1702                if (tmpret < 0) {
1703                        mlog(ML_ERROR, "Error %d when sending message %u (key "
1704                             "0x%x) to node %u\n", tmpret,
1705                             DLM_ASSERT_MASTER_MSG, dlm->key, to);
1706                        if (!dlm_is_host_down(tmpret)) {
1707                                mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1708                                BUG();
1709                        }
1710                        /* a node died.  finish out the rest of the nodes. */
1711                        mlog(0, "link to %d went down!\n", to);
1712                        /* any nonzero status return will do */
1713                        ret = tmpret;
1714                        r = 0;
1715                } else if (r < 0) {
1716                        /* ok, something horribly messed.  kill thyself. */
1717                        mlog(ML_ERROR,"during assert master of %.*s to %u, "
1718                             "got %d.\n", namelen, lockname, to, r);
1719                        spin_lock(&dlm->spinlock);
1720                        spin_lock(&dlm->master_lock);
1721                        if (dlm_find_mle(dlm, &mle, (char *)lockname,
1722                                         namelen)) {
1723                                dlm_print_one_mle(mle);
1724                                __dlm_put_mle(mle);
1725                        }
1726                        spin_unlock(&dlm->master_lock);
1727                        spin_unlock(&dlm->spinlock);
1728                        BUG();
1729                }
1730
1731                if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1732                    !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1733                                mlog(ML_ERROR, "%.*s: very strange, "
1734                                     "master MLE but no lockres on %u\n",
1735                                     namelen, lockname, to);
1736                }
1737
1738                if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1739                        mlog(0, "%.*s: node %u create mles on other "
1740                             "nodes and requests a re-assert\n",
1741                             namelen, lockname, to);
1742                        reassert = 1;
1743                }
1744                if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1745                        mlog(0, "%.*s: node %u has a reference to this "
1746                             "lockres, set the bit in the refmap\n",
1747                             namelen, lockname, to);
1748                        spin_lock(&res->spinlock);
1749                        dlm_lockres_set_refmap_bit(dlm, res, to);
1750                        spin_unlock(&res->spinlock);
1751                }
1752        }
1753
1754        if (reassert)
1755                goto again;
1756
1757        spin_lock(&res->spinlock);
1758        res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1759        spin_unlock(&res->spinlock);
1760        wake_up(&res->wq);
1761
1762        return ret;
1763}
1764
1765/*
1766 * locks that can be taken here:
1767 * dlm->spinlock
1768 * res->spinlock
1769 * mle->spinlock
1770 * dlm->master_list
1771 *
1772 * if possible, TRIM THIS DOWN!!!
1773 */
1774int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1775                              void **ret_data)
1776{
1777        struct dlm_ctxt *dlm = data;
1778        struct dlm_master_list_entry *mle = NULL;
1779        struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1780        struct dlm_lock_resource *res = NULL;
1781        char *name;
1782        unsigned int namelen, hash;
1783        u32 flags;
1784        int master_request = 0, have_lockres_ref = 0;
1785        int ret = 0;
1786
1787        if (!dlm_grab(dlm))
1788                return 0;
1789
1790        name = assert->name;
1791        namelen = assert->namelen;
1792        hash = dlm_lockid_hash(name, namelen);
1793        flags = be32_to_cpu(assert->flags);
1794
1795        if (namelen > DLM_LOCKID_NAME_MAX) {
1796                mlog(ML_ERROR, "Invalid name length!");
1797                goto done;
1798        }
1799
1800        spin_lock(&dlm->spinlock);
1801
1802        if (flags)
1803                mlog(0, "assert_master with flags: %u\n", flags);
1804
1805        /* find the MLE */
1806        spin_lock(&dlm->master_lock);
1807        if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1808                /* not an error, could be master just re-asserting */
1809                mlog(0, "just got an assert_master from %u, but no "
1810                     "MLE for it! (%.*s)\n", assert->node_idx,
1811                     namelen, name);
1812        } else {
1813                int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1814                if (bit >= O2NM_MAX_NODES) {
1815                        /* not necessarily an error, though less likely.
1816                         * could be master just re-asserting. */
1817                        mlog(0, "no bits set in the maybe_map, but %u "
1818                             "is asserting! (%.*s)\n", assert->node_idx,
1819                             namelen, name);
1820                } else if (bit != assert->node_idx) {
1821                        if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1822                                mlog(0, "master %u was found, %u should "
1823                                     "back off\n", assert->node_idx, bit);
1824                        } else {
1825                                /* with the fix for bug 569, a higher node
1826                                 * number winning the mastery will respond
1827                                 * YES to mastery requests, but this node
1828                                 * had no way of knowing.  let it pass. */
1829                                mlog(0, "%u is the lowest node, "
1830                                     "%u is asserting. (%.*s)  %u must "
1831                                     "have begun after %u won.\n", bit,
1832                                     assert->node_idx, namelen, name, bit,
1833                                     assert->node_idx);
1834                        }
1835                }
1836                if (mle->type == DLM_MLE_MIGRATION) {
1837                        if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1838                                mlog(0, "%s:%.*s: got cleanup assert"
1839                                     " from %u for migration\n",
1840                                     dlm->name, namelen, name,
1841                                     assert->node_idx);
1842                        } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1843                                mlog(0, "%s:%.*s: got unrelated assert"
1844                                     " from %u for migration, ignoring\n",
1845                                     dlm->name, namelen, name,
1846                                     assert->node_idx);
1847                                __dlm_put_mle(mle);
1848                                spin_unlock(&dlm->master_lock);
1849                                spin_unlock(&dlm->spinlock);
1850                                goto done;
1851                        }
1852                }
1853        }
1854        spin_unlock(&dlm->master_lock);
1855
1856        /* ok everything checks out with the MLE
1857         * now check to see if there is a lockres */
1858        res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1859        if (res) {
1860                spin_lock(&res->spinlock);
1861                if (res->state & DLM_LOCK_RES_RECOVERING)  {
1862                        mlog(ML_ERROR, "%u asserting but %.*s is "
1863                             "RECOVERING!\n", assert->node_idx, namelen, name);
1864                        goto kill;
1865                }
1866                if (!mle) {
1867                        if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1868                            res->owner != assert->node_idx) {
1869                                mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1870                                     "but current owner is %u! (%.*s)\n",
1871                                     assert->node_idx, res->owner, namelen,
1872                                     name);
1873                                __dlm_print_one_lock_resource(res);
1874                                BUG();
1875                        }
1876                } else if (mle->type != DLM_MLE_MIGRATION) {
1877                        if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1878                                /* owner is just re-asserting */
1879                                if (res->owner == assert->node_idx) {
1880                                        mlog(0, "owner %u re-asserting on "
1881                                             "lock %.*s\n", assert->node_idx,
1882                                             namelen, name);
1883                                        goto ok;
1884                                }
1885                                mlog(ML_ERROR, "got assert_master from "
1886                                     "node %u, but %u is the owner! "
1887                                     "(%.*s)\n", assert->node_idx,
1888                                     res->owner, namelen, name);
1889                                goto kill;
1890                        }
1891                        if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1892                                mlog(ML_ERROR, "got assert from %u, but lock "
1893                                     "with no owner should be "
1894                                     "in-progress! (%.*s)\n",
1895                                     assert->node_idx,
1896                                     namelen, name);
1897                                goto kill;
1898                        }
1899                } else /* mle->type == DLM_MLE_MIGRATION */ {
1900                        /* should only be getting an assert from new master */
1901                        if (assert->node_idx != mle->new_master) {
1902                                mlog(ML_ERROR, "got assert from %u, but "
1903                                     "new master is %u, and old master "
1904                                     "was %u (%.*s)\n",
1905                                     assert->node_idx, mle->new_master,
1906                                     mle->master, namelen, name);
1907                                goto kill;
1908                        }
1909
1910                }
1911ok:
1912                spin_unlock(&res->spinlock);
1913        }
1914
1915        // mlog(0, "woo!  got an assert_master from node %u!\n",
1916        //           assert->node_idx);
1917        if (mle) {
1918                int extra_ref = 0;
1919                int nn = -1;
1920                int rr, err = 0;
1921
1922                spin_lock(&mle->spinlock);
1923                if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1924                        extra_ref = 1;
1925                else {
1926                        /* MASTER mle: if any bits set in the response map
1927                         * then the calling node needs to re-assert to clear
1928                         * up nodes that this node contacted */
1929                        while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1930                                                    nn+1)) < O2NM_MAX_NODES) {
1931                                if (nn != dlm->node_num && nn != assert->node_idx) {
1932                                        master_request = 1;
1933                                        break;
1934                                }
1935                        }
1936                }
1937                mle->master = assert->node_idx;
1938                atomic_set(&mle->woken, 1);
1939                wake_up(&mle->wq);
1940                spin_unlock(&mle->spinlock);
1941
1942                if (res) {
1943                        int wake = 0;
1944                        spin_lock(&res->spinlock);
1945                        if (mle->type == DLM_MLE_MIGRATION) {
1946                                mlog(0, "finishing off migration of lockres %.*s, "
1947                                        "from %u to %u\n",
1948                                        res->lockname.len, res->lockname.name,
1949                                        dlm->node_num, mle->new_master);
1950                                res->state &= ~DLM_LOCK_RES_MIGRATING;
1951                                wake = 1;
1952                                dlm_change_lockres_owner(dlm, res, mle->new_master);
1953                                BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1954                        } else {
1955                                dlm_change_lockres_owner(dlm, res, mle->master);
1956                        }
1957                        spin_unlock(&res->spinlock);
1958                        have_lockres_ref = 1;
1959                        if (wake)
1960                                wake_up(&res->wq);
1961                }
1962
1963                /* master is known, detach if not already detached.
1964                 * ensures that only one assert_master call will happen
1965                 * on this mle. */
1966                spin_lock(&dlm->master_lock);
1967
1968                rr = atomic_read(&mle->mle_refs.refcount);
1969                if (mle->inuse > 0) {
1970                        if (extra_ref && rr < 3)
1971                                err = 1;
1972                        else if (!extra_ref && rr < 2)
1973                                err = 1;
1974                } else {
1975                        if (extra_ref && rr < 2)
1976                                err = 1;
1977                        else if (!extra_ref && rr < 1)
1978                                err = 1;
1979                }
1980                if (err) {
1981                        mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1982                             "that will mess up this node, refs=%d, extra=%d, "
1983                             "inuse=%d\n", dlm->name, namelen, name,
1984                             assert->node_idx, rr, extra_ref, mle->inuse);
1985                        dlm_print_one_mle(mle);
1986                }
1987                __dlm_unlink_mle(dlm, mle);
1988                __dlm_mle_detach_hb_events(dlm, mle);
1989                __dlm_put_mle(mle);
1990                if (extra_ref) {
1991                        /* the assert master message now balances the extra
1992                         * ref given by the master / migration request message.
1993                         * if this is the last put, it will be removed
1994                         * from the list. */
1995                        __dlm_put_mle(mle);
1996                }
1997                spin_unlock(&dlm->master_lock);
1998        } else if (res) {
1999                if (res->owner != assert->node_idx) {
2000                        mlog(0, "assert_master from %u, but current "
2001                             "owner is %u (%.*s), no mle\n", assert->node_idx,
2002                             res->owner, namelen, name);
2003                }
2004        }
2005        spin_unlock(&dlm->spinlock);
2006
2007done:
2008        ret = 0;
2009        if (res) {
2010                spin_lock(&res->spinlock);
2011                res->state |= DLM_LOCK_RES_SETREF_INPROG;
2012                spin_unlock(&res->spinlock);
2013                *ret_data = (void *)res;
2014        }
2015        dlm_put(dlm);
2016        if (master_request) {
2017                mlog(0, "need to tell master to reassert\n");
2018                /* positive. negative would shoot down the node. */
2019                ret |= DLM_ASSERT_RESPONSE_REASSERT;
2020                if (!have_lockres_ref) {
2021                        mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2022                             "mle present here for %s:%.*s, but no lockres!\n",
2023                             assert->node_idx, dlm->name, namelen, name);
2024                }
2025        }
2026        if (have_lockres_ref) {
2027                /* let the master know we have a reference to the lockres */
2028                ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2029                mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2030                     dlm->name, namelen, name, assert->node_idx);
2031        }
2032        return ret;
2033
2034kill:
2035        /* kill the caller! */
2036        mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
2037             "and killing the other node now!  This node is OK and can continue.\n");
2038        __dlm_print_one_lock_resource(res);
2039        spin_unlock(&res->spinlock);
2040        spin_unlock(&dlm->spinlock);
2041        *ret_data = (void *)res;
2042        dlm_put(dlm);
2043        return -EINVAL;
2044}
2045
2046void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2047{
2048        struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2049
2050        if (ret_data) {
2051                spin_lock(&res->spinlock);
2052                res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2053                spin_unlock(&res->spinlock);
2054                wake_up(&res->wq);
2055                dlm_lockres_put(res);
2056        }
2057        return;
2058}
2059
2060int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2061                               struct dlm_lock_resource *res,
2062                               int ignore_higher, u8 request_from, u32 flags)
2063{
2064        struct dlm_work_item *item;
2065        item = kzalloc(sizeof(*item), GFP_ATOMIC);
2066        if (!item)
2067                return -ENOMEM;
2068
2069
2070        /* queue up work for dlm_assert_master_worker */
2071        dlm_grab(dlm);  /* get an extra ref for the work item */
2072        dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2073        item->u.am.lockres = res; /* already have a ref */
2074        /* can optionally ignore node numbers higher than this node */
2075        item->u.am.ignore_higher = ignore_higher;
2076        item->u.am.request_from = request_from;
2077        item->u.am.flags = flags;
2078
2079        if (ignore_higher)
2080                mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2081                     res->lockname.name);
2082
2083        spin_lock(&dlm->work_lock);
2084        list_add_tail(&item->list, &dlm->work_list);
2085        spin_unlock(&dlm->work_lock);
2086
2087        queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2088        return 0;
2089}
2090
2091static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2092{
2093        struct dlm_ctxt *dlm = data;
2094        int ret = 0;
2095        struct dlm_lock_resource *res;
2096        unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2097        int ignore_higher;
2098        int bit;
2099        u8 request_from;
2100        u32 flags;
2101
2102        dlm = item->dlm;
2103        res = item->u.am.lockres;
2104        ignore_higher = item->u.am.ignore_higher;
2105        request_from = item->u.am.request_from;
2106        flags = item->u.am.flags;
2107
2108        spin_lock(&dlm->spinlock);
2109        memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2110        spin_unlock(&dlm->spinlock);
2111
2112        clear_bit(dlm->node_num, nodemap);
2113        if (ignore_higher) {
2114                /* if is this just to clear up mles for nodes below
2115                 * this node, do not send the message to the original
2116                 * caller or any node number higher than this */
2117                clear_bit(request_from, nodemap);
2118                bit = dlm->node_num;
2119                while (1) {
2120                        bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2121                                            bit+1);
2122                        if (bit >= O2NM_MAX_NODES)
2123                                break;
2124                        clear_bit(bit, nodemap);
2125                }
2126        }
2127
2128        /*
2129         * If we're migrating this lock to someone else, we are no
2130         * longer allowed to assert out own mastery.  OTOH, we need to
2131         * prevent migration from starting while we're still asserting
2132         * our dominance.  The reserved ast delays migration.
2133         */
2134        spin_lock(&res->spinlock);
2135        if (res->state & DLM_LOCK_RES_MIGRATING) {
2136                mlog(0, "Someone asked us to assert mastery, but we're "
2137                     "in the middle of migration.  Skipping assert, "
2138                     "the new master will handle that.\n");
2139                spin_unlock(&res->spinlock);
2140                goto put;
2141        } else
2142                __dlm_lockres_reserve_ast(res);
2143        spin_unlock(&res->spinlock);
2144
2145        /* this call now finishes out the nodemap
2146         * even if one or more nodes die */
2147        mlog(0, "worker about to master %.*s here, this=%u\n",
2148                     res->lockname.len, res->lockname.name, dlm->node_num);
2149        ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2150        if (ret < 0) {
2151                /* no need to restart, we are done */
2152                if (!dlm_is_host_down(ret))
2153                        mlog_errno(ret);
2154        }
2155
2156        /* Ok, we've asserted ourselves.  Let's let migration start. */
2157        dlm_lockres_release_ast(dlm, res);
2158
2159put:
2160        dlm_lockres_drop_inflight_worker(dlm, res);
2161
2162        dlm_lockres_put(res);
2163
2164        mlog(0, "finished with dlm_assert_master_worker\n");
2165}
2166
2167/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2168 * We cannot wait for node recovery to complete to begin mastering this
2169 * lockres because this lockres is used to kick off recovery! ;-)
2170 * So, do a pre-check on all living nodes to see if any of those nodes
2171 * think that $RECOVERY is currently mastered by a dead node.  If so,
2172 * we wait a short time to allow that node to get notified by its own
2173 * heartbeat stack, then check again.  All $RECOVERY lock resources
2174 * mastered by dead nodes are purged when the hearbeat callback is
2175 * fired, so we can know for sure that it is safe to continue once
2176 * the node returns a live node or no node.  */
2177static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2178                                       struct dlm_lock_resource *res)
2179{
2180        struct dlm_node_iter iter;
2181        int nodenum;
2182        int ret = 0;
2183        u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2184
2185        spin_lock(&dlm->spinlock);
2186        dlm_node_iter_init(dlm->domain_map, &iter);
2187        spin_unlock(&dlm->spinlock);
2188
2189        while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2190                /* do not send to self */
2191                if (nodenum == dlm->node_num)
2192                        continue;
2193                ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2194                if (ret < 0) {
2195                        mlog_errno(ret);
2196                        if (!dlm_is_host_down(ret))
2197                                BUG();
2198                        /* host is down, so answer for that node would be
2199                         * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2200                        ret = 0;
2201                }
2202
2203                if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2204                        /* check to see if this master is in the recovery map */
2205                        spin_lock(&dlm->spinlock);
2206                        if (test_bit(master, dlm->recovery_map)) {
2207                                mlog(ML_NOTICE, "%s: node %u has not seen "
2208                                     "node %u go down yet, and thinks the "
2209                                     "dead node is mastering the recovery "
2210                                     "lock.  must wait.\n", dlm->name,
2211                                     nodenum, master);
2212                                ret = -EAGAIN;
2213                        }
2214                        spin_unlock(&dlm->spinlock);
2215                        mlog(0, "%s: reco lock master is %u\n", dlm->name,
2216                             master);
2217                        break;
2218                }
2219        }
2220        return ret;
2221}
2222
2223/*
2224 * DLM_DEREF_LOCKRES_MSG
2225 */
2226
2227int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2228{
2229        struct dlm_deref_lockres deref;
2230        int ret = 0, r;
2231        const char *lockname;
2232        unsigned int namelen;
2233
2234        lockname = res->lockname.name;
2235        namelen = res->lockname.len;
2236        BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2237
2238        memset(&deref, 0, sizeof(deref));
2239        deref.node_idx = dlm->node_num;
2240        deref.namelen = namelen;
2241        memcpy(deref.name, lockname, namelen);
2242
2243        ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2244                                 &deref, sizeof(deref), res->owner, &r);
2245        if (ret < 0)
2246                mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2247                     dlm->name, namelen, lockname, ret, res->owner);
2248        else if (r < 0) {
2249                /* BAD.  other node says I did not have a ref. */
2250                mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2251                     dlm->name, namelen, lockname, res->owner, r);
2252                dlm_print_one_lock_resource(res);
2253                BUG();
2254        }
2255        return ret;
2256}
2257
2258int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2259                              void **ret_data)
2260{
2261        struct dlm_ctxt *dlm = data;
2262        struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2263        struct dlm_lock_resource *res = NULL;
2264        char *name;
2265        unsigned int namelen;
2266        int ret = -EINVAL;
2267        u8 node;
2268        unsigned int hash;
2269        struct dlm_work_item *item;
2270        int cleared = 0;
2271        int dispatch = 0;
2272
2273        if (!dlm_grab(dlm))
2274                return 0;
2275
2276        name = deref->name;
2277        namelen = deref->namelen;
2278        node = deref->node_idx;
2279
2280        if (namelen > DLM_LOCKID_NAME_MAX) {
2281                mlog(ML_ERROR, "Invalid name length!");
2282                goto done;
2283        }
2284        if (deref->node_idx >= O2NM_MAX_NODES) {
2285                mlog(ML_ERROR, "Invalid node number: %u\n", node);
2286                goto done;
2287        }
2288
2289        hash = dlm_lockid_hash(name, namelen);
2290
2291        spin_lock(&dlm->spinlock);
2292        res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2293        if (!res) {
2294                spin_unlock(&dlm->spinlock);
2295                mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2296                     dlm->name, namelen, name);
2297                goto done;
2298        }
2299        spin_unlock(&dlm->spinlock);
2300
2301        spin_lock(&res->spinlock);
2302        if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2303                dispatch = 1;
2304        else {
2305                BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2306                if (test_bit(node, res->refmap)) {
2307                        dlm_lockres_clear_refmap_bit(dlm, res, node);
2308                        cleared = 1;
2309                }
2310        }
2311        spin_unlock(&res->spinlock);
2312
2313        if (!dispatch) {
2314                if (cleared)
2315                        dlm_lockres_calc_usage(dlm, res);
2316                else {
2317                        mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2318                        "but it is already dropped!\n", dlm->name,
2319                        res->lockname.len, res->lockname.name, node);
2320                        dlm_print_one_lock_resource(res);
2321                }
2322                ret = 0;
2323                goto done;
2324        }
2325
2326        item = kzalloc(sizeof(*item), GFP_NOFS);
2327        if (!item) {
2328                ret = -ENOMEM;
2329                mlog_errno(ret);
2330                goto done;
2331        }
2332
2333        dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2334        item->u.dl.deref_res = res;
2335        item->u.dl.deref_node = node;
2336
2337        spin_lock(&dlm->work_lock);
2338        list_add_tail(&item->list, &dlm->work_list);
2339        spin_unlock(&dlm->work_lock);
2340
2341        queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2342        return 0;
2343
2344done:
2345        if (res)
2346                dlm_lockres_put(res);
2347        dlm_put(dlm);
2348
2349        return ret;
2350}
2351
2352static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2353{
2354        struct dlm_ctxt *dlm;
2355        struct dlm_lock_resource *res;
2356        u8 node;
2357        u8 cleared = 0;
2358
2359        dlm = item->dlm;
2360        res = item->u.dl.deref_res;
2361        node = item->u.dl.deref_node;
2362
2363        spin_lock(&res->spinlock);
2364        BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2365        if (test_bit(node, res->refmap)) {
2366                __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2367                dlm_lockres_clear_refmap_bit(dlm, res, node);
2368                cleared = 1;
2369        }
2370        spin_unlock(&res->spinlock);
2371
2372        if (cleared) {
2373                mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2374                     dlm->name, res->lockname.len, res->lockname.name, node);
2375                dlm_lockres_calc_usage(dlm, res);
2376        } else {
2377                mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2378                     "but it is already dropped!\n", dlm->name,
2379                     res->lockname.len, res->lockname.name, node);
2380                dlm_print_one_lock_resource(res);
2381        }
2382
2383        dlm_lockres_put(res);
2384}
2385
2386/*
2387 * A migrateable resource is one that is :
2388 * 1. locally mastered, and,
2389 * 2. zero local locks, and,
2390 * 3. one or more non-local locks, or, one or more references
2391 * Returns 1 if yes, 0 if not.
2392 */
2393static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2394                                      struct dlm_lock_resource *res)
2395{
2396        enum dlm_lockres_list idx;
2397        int nonlocal = 0, node_ref;
2398        struct list_head *queue;
2399        struct dlm_lock *lock;
2400        u64 cookie;
2401
2402        assert_spin_locked(&res->spinlock);
2403
2404        /* delay migration when the lockres is in MIGRATING state */
2405        if (res->state & DLM_LOCK_RES_MIGRATING)
2406                return 0;
2407
2408        if (res->owner != dlm->node_num)
2409                return 0;
2410
2411        for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2412                queue = dlm_list_idx_to_ptr(res, idx);
2413                list_for_each_entry(lock, queue, list) {
2414                        if (lock->ml.node != dlm->node_num) {
2415                                nonlocal++;
2416                                continue;
2417                        }
2418                        cookie = be64_to_cpu(lock->ml.cookie);
2419                        mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
2420                             "%s list\n", dlm->name, res->lockname.len,
2421                             res->lockname.name,
2422                             dlm_get_lock_cookie_node(cookie),
2423                             dlm_get_lock_cookie_seq(cookie),
2424                             dlm_list_in_text(idx));
2425                        return 0;
2426                }
2427        }
2428
2429        if (!nonlocal) {
2430                node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2431                if (node_ref >= O2NM_MAX_NODES)
2432                        return 0;
2433        }
2434
2435        mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
2436             res->lockname.name);
2437
2438        return 1;
2439}
2440
2441/*
2442 * DLM_MIGRATE_LOCKRES
2443 */
2444
2445
2446static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2447                               struct dlm_lock_resource *res, u8 target)
2448{
2449        struct dlm_master_list_entry *mle = NULL;
2450        struct dlm_master_list_entry *oldmle = NULL;
2451        struct dlm_migratable_lockres *mres = NULL;
2452        int ret = 0;
2453        const char *name;
2454        unsigned int namelen;
2455        int mle_added = 0;
2456        int wake = 0;
2457
2458        if (!dlm_grab(dlm))
2459                return -EINVAL;
2460
2461        BUG_ON(target == O2NM_MAX_NODES);
2462
2463        name = res->lockname.name;
2464        namelen = res->lockname.len;
2465
2466        mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
2467             target);
2468
2469        /* preallocate up front. if this fails, abort */
2470        ret = -ENOMEM;
2471        mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2472        if (!mres) {
2473                mlog_errno(ret);
2474                goto leave;
2475        }
2476
2477        mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2478        if (!mle) {
2479                mlog_errno(ret);
2480                goto leave;
2481        }
2482        ret = 0;
2483
2484        /*
2485         * clear any existing master requests and
2486         * add the migration mle to the list
2487         */
2488        spin_lock(&dlm->spinlock);
2489        spin_lock(&dlm->master_lock);
2490        ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2491                                    namelen, target, dlm->node_num);
2492        spin_unlock(&dlm->master_lock);
2493        spin_unlock(&dlm->spinlock);
2494
2495        if (ret == -EEXIST) {
2496                mlog(0, "another process is already migrating it\n");
2497                goto fail;
2498        }
2499        mle_added = 1;
2500
2501        /*
2502         * set the MIGRATING flag and flush asts
2503         * if we fail after this we need to re-dirty the lockres
2504         */
2505        if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2506                mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2507                     "the target went down.\n", res->lockname.len,
2508                     res->lockname.name, target);
2509                spin_lock(&res->spinlock);
2510                res->state &= ~DLM_LOCK_RES_MIGRATING;
2511                wake = 1;
2512                spin_unlock(&res->spinlock);
2513                ret = -EINVAL;
2514        }
2515
2516fail:
2517        if (oldmle) {
2518                /* master is known, detach if not already detached */
2519                dlm_mle_detach_hb_events(dlm, oldmle);
2520                dlm_put_mle(oldmle);
2521        }
2522
2523        if (ret < 0) {
2524                if (mle_added) {
2525                        dlm_mle_detach_hb_events(dlm, mle);
2526                        dlm_put_mle(mle);
2527                } else if (mle) {
2528                        kmem_cache_free(dlm_mle_cache, mle);
2529                        mle = NULL;
2530                }
2531                goto leave;
2532        }
2533
2534        /*
2535         * at this point, we have a migration target, an mle
2536         * in the master list, and the MIGRATING flag set on
2537         * the lockres
2538         */
2539
2540        /* now that remote nodes are spinning on the MIGRATING flag,
2541         * ensure that all assert_master work is flushed. */
2542        flush_workqueue(dlm->dlm_worker);
2543
2544        /* get an extra reference on the mle.
2545         * otherwise the assert_master from the new
2546         * master will destroy this.
2547         * also, make sure that all callers of dlm_get_mle
2548         * take both dlm->spinlock and dlm->master_lock */
2549        spin_lock(&dlm->spinlock);
2550        spin_lock(&dlm->master_lock);
2551        dlm_get_mle_inuse(mle);
2552        spin_unlock(&dlm->master_lock);
2553        spin_unlock(&dlm->spinlock);
2554
2555        /* notify new node and send all lock state */
2556        /* call send_one_lockres with migration flag.
2557         * this serves as notice to the target node that a
2558         * migration is starting. */
2559        ret = dlm_send_one_lockres(dlm, res, mres, target,
2560                                   DLM_MRES_MIGRATION);
2561
2562        if (ret < 0) {
2563                mlog(0, "migration to node %u failed with %d\n",
2564                     target, ret);
2565                /* migration failed, detach and clean up mle */
2566                dlm_mle_detach_hb_events(dlm, mle);
2567                dlm_put_mle(mle);
2568                dlm_put_mle_inuse(mle);
2569                spin_lock(&res->spinlock);
2570                res->state &= ~DLM_LOCK_RES_MIGRATING;
2571                wake = 1;
2572                spin_unlock(&res->spinlock);
2573                if (dlm_is_host_down(ret))
2574                        dlm_wait_for_node_death(dlm, target,
2575                                                DLM_NODE_DEATH_WAIT_MAX);
2576                goto leave;
2577        }
2578
2579        /* at this point, the target sends a message to all nodes,
2580         * (using dlm_do_migrate_request).  this node is skipped since
2581         * we had to put an mle in the list to begin the process.  this
2582         * node now waits for target to do an assert master.  this node
2583         * will be the last one notified, ensuring that the migration
2584         * is complete everywhere.  if the target dies while this is
2585         * going on, some nodes could potentially see the target as the
2586         * master, so it is important that my recovery finds the migration
2587         * mle and sets the master to UNKNOWN. */
2588
2589
2590        /* wait for new node to assert master */
2591        while (1) {
2592                ret = wait_event_interruptible_timeout(mle->wq,
2593                                        (atomic_read(&mle->woken) == 1),
2594                                        msecs_to_jiffies(5000));
2595
2596                if (ret >= 0) {
2597                        if (atomic_read(&mle->woken) == 1 ||
2598                            res->owner == target)
2599                                break;
2600
2601                        mlog(0, "%s:%.*s: timed out during migration\n",
2602                             dlm->name, res->lockname.len, res->lockname.name);
2603                        /* avoid hang during shutdown when migrating lockres
2604                         * to a node which also goes down */
2605                        if (dlm_is_node_dead(dlm, target)) {
2606                                mlog(0, "%s:%.*s: expected migration "
2607                                     "target %u is no longer up, restarting\n",
2608                                     dlm->name, res->lockname.len,
2609                                     res->lockname.name, target);
2610                                ret = -EINVAL;
2611                                /* migration failed, detach and clean up mle */
2612                                dlm_mle_detach_hb_events(dlm, mle);
2613                                dlm_put_mle(mle);
2614                                dlm_put_mle_inuse(mle);
2615                                spin_lock(&res->spinlock);
2616                                res->state &= ~DLM_LOCK_RES_MIGRATING;
2617                                wake = 1;
2618                                spin_unlock(&res->spinlock);
2619                                goto leave;
2620                        }
2621                } else
2622                        mlog(0, "%s:%.*s: caught signal during migration\n",
2623                             dlm->name, res->lockname.len, res->lockname.name);
2624        }
2625
2626        /* all done, set the owner, clear the flag */
2627        spin_lock(&res->spinlock);
2628        dlm_set_lockres_owner(dlm, res, target);
2629        res->state &= ~DLM_LOCK_RES_MIGRATING;
2630        dlm_remove_nonlocal_locks(dlm, res);
2631        spin_unlock(&res->spinlock);
2632        wake_up(&res->wq);
2633
2634        /* master is known, detach if not already detached */
2635        dlm_mle_detach_hb_events(dlm, mle);
2636        dlm_put_mle_inuse(mle);
2637        ret = 0;
2638
2639        dlm_lockres_calc_usage(dlm, res);
2640
2641leave:
2642        /* re-dirty the lockres if we failed */
2643        if (ret < 0)
2644                dlm_kick_thread(dlm, res);
2645
2646        /* wake up waiters if the MIGRATING flag got set
2647         * but migration failed */
2648        if (wake)
2649                wake_up(&res->wq);
2650
2651        if (mres)
2652                free_page((unsigned long)mres);
2653
2654        dlm_put(dlm);
2655
2656        mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
2657             name, target, ret);
2658        return ret;
2659}
2660
2661#define DLM_MIGRATION_RETRY_MS  100
2662
2663/*
2664 * Should be called only after beginning the domain leave process.
2665 * There should not be any remaining locks on nonlocal lock resources,
2666 * and there should be no local locks left on locally mastered resources.
2667 *
2668 * Called with the dlm spinlock held, may drop it to do migration, but
2669 * will re-acquire before exit.
2670 *
2671 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2672 */
2673int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2674{
2675        int ret;
2676        int lock_dropped = 0;
2677        u8 target = O2NM_MAX_NODES;
2678
2679        assert_spin_locked(&dlm->spinlock);
2680
2681        spin_lock(&res->spinlock);
2682        if (dlm_is_lockres_migrateable(dlm, res))
2683                target = dlm_pick_migration_target(dlm, res);
2684        spin_unlock(&res->spinlock);
2685
2686        if (target == O2NM_MAX_NODES)
2687                goto leave;
2688
2689        /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2690        spin_unlock(&dlm->spinlock);
2691        lock_dropped = 1;
2692        ret = dlm_migrate_lockres(dlm, res, target);
2693        if (ret)
2694                mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2695                     dlm->name, res->lockname.len, res->lockname.name,
2696                     target, ret);
2697        spin_lock(&dlm->spinlock);
2698leave:
2699        return lock_dropped;
2700}
2701
2702int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2703{
2704        int ret;
2705        spin_lock(&dlm->ast_lock);
2706        spin_lock(&lock->spinlock);
2707        ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2708        spin_unlock(&lock->spinlock);
2709        spin_unlock(&dlm->ast_lock);
2710        return ret;
2711}
2712
2713static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2714                                     struct dlm_lock_resource *res,
2715                                     u8 mig_target)
2716{
2717        int can_proceed;
2718        spin_lock(&res->spinlock);
2719        can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2720        spin_unlock(&res->spinlock);
2721
2722        /* target has died, so make the caller break out of the
2723         * wait_event, but caller must recheck the domain_map */
2724        spin_lock(&dlm->spinlock);
2725        if (!test_bit(mig_target, dlm->domain_map))
2726                can_proceed = 1;
2727        spin_unlock(&dlm->spinlock);
2728        return can_proceed;
2729}
2730
2731static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2732                                struct dlm_lock_resource *res)
2733{
2734        int ret;
2735        spin_lock(&res->spinlock);
2736        ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2737        spin_unlock(&res->spinlock);
2738        return ret;
2739}
2740
2741
2742static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2743                                       struct dlm_lock_resource *res,
2744                                       u8 target)
2745{
2746        int ret = 0;
2747
2748        mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2749               res->lockname.len, res->lockname.name, dlm->node_num,
2750               target);
2751        /* need to set MIGRATING flag on lockres.  this is done by
2752         * ensuring that all asts have been flushed for this lockres. */
2753        spin_lock(&res->spinlock);
2754        BUG_ON(res->migration_pending);
2755        res->migration_pending = 1;
2756        /* strategy is to reserve an extra ast then release
2757         * it below, letting the release do all of the work */
2758        __dlm_lockres_reserve_ast(res);
2759        spin_unlock(&res->spinlock);
2760
2761        /* now flush all the pending asts */
2762        dlm_kick_thread(dlm, res);
2763        /* before waiting on DIRTY, block processes which may
2764         * try to dirty the lockres before MIGRATING is set */
2765        spin_lock(&res->spinlock);
2766        BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2767        res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2768        spin_unlock(&res->spinlock);
2769        /* now wait on any pending asts and the DIRTY state */
2770        wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2771        dlm_lockres_release_ast(dlm, res);
2772
2773        mlog(0, "about to wait on migration_wq, dirty=%s\n",
2774               res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2775        /* if the extra ref we just put was the final one, this
2776         * will pass thru immediately.  otherwise, we need to wait
2777         * for the last ast to finish. */
2778again:
2779        ret = wait_event_interruptible_timeout(dlm->migration_wq,
2780                   dlm_migration_can_proceed(dlm, res, target),
2781                   msecs_to_jiffies(1000));
2782        if (ret < 0) {
2783                mlog(0, "woken again: migrating? %s, dead? %s\n",
2784                       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2785                       test_bit(target, dlm->domain_map) ? "no":"yes");
2786        } else {
2787                mlog(0, "all is well: migrating? %s, dead? %s\n",
2788                       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2789                       test_bit(target, dlm->domain_map) ? "no":"yes");
2790        }
2791        if (!dlm_migration_can_proceed(dlm, res, target)) {
2792                mlog(0, "trying again...\n");
2793                goto again;
2794        }
2795
2796        ret = 0;
2797        /* did the target go down or die? */
2798        spin_lock(&dlm->spinlock);
2799        if (!test_bit(target, dlm->domain_map)) {
2800                mlog(ML_ERROR, "aha. migration target %u just went down\n",
2801                     target);
2802                ret = -EHOSTDOWN;
2803        }
2804        spin_unlock(&dlm->spinlock);
2805
2806        /*
2807         * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2808         * another try; otherwise, we are sure the MIGRATING state is there,
2809         * drop the unneded state which blocked threads trying to DIRTY
2810         */
2811        spin_lock(&res->spinlock);
2812        BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2813        res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2814        if (!ret)
2815                BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2816        spin_unlock(&res->spinlock);
2817
2818        /*
2819         * at this point:
2820         *
2821         *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2822         *   o there are no pending asts on this lockres
2823         *   o all processes trying to reserve an ast on this
2824         *     lockres must wait for the MIGRATING flag to clear
2825         */
2826        return ret;
2827}
2828
2829/* last step in the migration process.
2830 * original master calls this to free all of the dlm_lock
2831 * structures that used to be for other nodes. */
2832static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2833                                      struct dlm_lock_resource *res)
2834{
2835        struct list_head *queue = &res->granted;
2836        int i, bit;
2837        struct dlm_lock *lock, *next;
2838
2839        assert_spin_locked(&res->spinlock);
2840
2841        BUG_ON(res->owner == dlm->node_num);
2842
2843        for (i=0; i<3; i++) {
2844                list_for_each_entry_safe(lock, next, queue, list) {
2845                        if (lock->ml.node != dlm->node_num) {
2846                                mlog(0, "putting lock for node %u\n",
2847                                     lock->ml.node);
2848                                /* be extra careful */
2849                                BUG_ON(!list_empty(&lock->ast_list));
2850                                BUG_ON(!list_empty(&lock->bast_list));
2851                                BUG_ON(lock->ast_pending);
2852                                BUG_ON(lock->bast_pending);
2853                                dlm_lockres_clear_refmap_bit(dlm, res,
2854                                                             lock->ml.node);
2855                                list_del_init(&lock->list);
2856                                dlm_lock_put(lock);
2857                                /* In a normal unlock, we would have added a
2858                                 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2859                                dlm_lock_put(lock);
2860                        }
2861                }
2862                queue++;
2863        }
2864        bit = 0;
2865        while (1) {
2866                bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2867                if (bit >= O2NM_MAX_NODES)
2868                        break;
2869                /* do not clear the local node reference, if there is a
2870                 * process holding this, let it drop the ref itself */
2871                if (bit != dlm->node_num) {
2872                        mlog(0, "%s:%.*s: node %u had a ref to this "
2873                             "migrating lockres, clearing\n", dlm->name,
2874                             res->lockname.len, res->lockname.name, bit);
2875                        dlm_lockres_clear_refmap_bit(dlm, res, bit);
2876                }
2877                bit++;
2878        }
2879}
2880
2881/*
2882 * Pick a node to migrate the lock resource to. This function selects a
2883 * potential target based first on the locks and then on refmap. It skips
2884 * nodes that are in the process of exiting the domain.
2885 */
2886static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2887                                    struct dlm_lock_resource *res)
2888{
2889        enum dlm_lockres_list idx;
2890        struct list_head *queue = &res->granted;
2891        struct dlm_lock *lock;
2892        int noderef;
2893        u8 nodenum = O2NM_MAX_NODES;
2894
2895        assert_spin_locked(&dlm->spinlock);
2896        assert_spin_locked(&res->spinlock);
2897
2898        /* Go through all the locks */
2899        for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2900                queue = dlm_list_idx_to_ptr(res, idx);
2901                list_for_each_entry(lock, queue, list) {
2902                        if (lock->ml.node == dlm->node_num)
2903                                continue;
2904                        if (test_bit(lock->ml.node, dlm->exit_domain_map))
2905                                continue;
2906                        nodenum = lock->ml.node;
2907                        goto bail;
2908                }
2909        }
2910
2911        /* Go thru the refmap */
2912        noderef = -1;
2913        while (1) {
2914                noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
2915                                        noderef + 1);
2916                if (noderef >= O2NM_MAX_NODES)
2917                        break;
2918                if (noderef == dlm->node_num)
2919                        continue;
2920                if (test_bit(noderef, dlm->exit_domain_map))
2921                        continue;
2922                nodenum = noderef;
2923                goto bail;
2924        }
2925
2926bail:
2927        return nodenum;
2928}
2929
2930/* this is called by the new master once all lockres
2931 * data has been received */
2932static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2933                                  struct dlm_lock_resource *res,
2934                                  u8 master, u8 new_master,
2935                                  struct dlm_node_iter *iter)
2936{
2937        struct dlm_migrate_request migrate;
2938        int ret, skip, status = 0;
2939        int nodenum;
2940
2941        memset(&migrate, 0, sizeof(migrate));
2942        migrate.namelen = res->lockname.len;
2943        memcpy(migrate.name, res->lockname.name, migrate.namelen);
2944        migrate.new_master = new_master;
2945        migrate.master = master;
2946
2947        ret = 0;
2948
2949        /* send message to all nodes, except the master and myself */
2950        while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2951                if (nodenum == master ||
2952                    nodenum == new_master)
2953                        continue;
2954
2955                /* We could race exit domain. If exited, skip. */
2956                spin_lock(&dlm->spinlock);
2957                skip = (!test_bit(nodenum, dlm->domain_map));
2958                spin_unlock(&dlm->spinlock);
2959                if (skip) {
2960                        clear_bit(nodenum, iter->node_map);
2961                        continue;
2962                }
2963
2964                ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2965                                         &migrate, sizeof(migrate), nodenum,
2966                                         &status);
2967                if (ret < 0) {
2968                        mlog(ML_ERROR, "%s: res %.*s, Error %d send "
2969                             "MIGRATE_REQUEST to node %u\n", dlm->name,
2970                             migrate.namelen, migrate.name, ret, nodenum);
2971                        if (!dlm_is_host_down(ret)) {
2972                                mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2973                                BUG();
2974                        }
2975                        clear_bit(nodenum, iter->node_map);
2976                        ret = 0;
2977                } else if (status < 0) {
2978                        mlog(0, "migrate request (node %u) returned %d!\n",
2979                             nodenum, status);
2980                        ret = status;
2981                } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
2982                        /* during the migration request we short-circuited
2983                         * the mastery of the lockres.  make sure we have
2984                         * a mastery ref for nodenum */
2985                        mlog(0, "%s:%.*s: need ref for node %u\n",
2986                             dlm->name, res->lockname.len, res->lockname.name,
2987                             nodenum);
2988                        spin_lock(&res->spinlock);
2989                        dlm_lockres_set_refmap_bit(dlm, res, nodenum);
2990                        spin_unlock(&res->spinlock);
2991                }
2992        }
2993
2994        if (ret < 0)
2995                mlog_errno(ret);
2996
2997        mlog(0, "returning ret=%d\n", ret);
2998        return ret;
2999}
3000
3001
3002/* if there is an existing mle for this lockres, we now know who the master is.
3003 * (the one who sent us *this* message) we can clear it up right away.
3004 * since the process that put the mle on the list still has a reference to it,
3005 * we can unhash it now, set the master and wake the process.  as a result,
3006 * we will have no mle in the list to start with.  now we can add an mle for
3007 * the migration and this should be the only one found for those scanning the
3008 * list.  */
3009int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3010                                void **ret_data)
3011{
3012        struct dlm_ctxt *dlm = data;
3013        struct dlm_lock_resource *res = NULL;
3014        struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3015        struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3016        const char *name;
3017        unsigned int namelen, hash;
3018        int ret = 0;
3019
3020        if (!dlm_grab(dlm))
3021                return -EINVAL;
3022
3023        name = migrate->name;
3024        namelen = migrate->namelen;
3025        hash = dlm_lockid_hash(name, namelen);
3026
3027        /* preallocate.. if this fails, abort */
3028        mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3029
3030        if (!mle) {
3031                ret = -ENOMEM;
3032                goto leave;
3033        }
3034
3035        /* check for pre-existing lock */
3036        spin_lock(&dlm->spinlock);
3037        res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3038        if (res) {
3039                spin_lock(&res->spinlock);
3040                if (res->state & DLM_LOCK_RES_RECOVERING) {
3041                        /* if all is working ok, this can only mean that we got
3042                        * a migrate request from a node that we now see as
3043                        * dead.  what can we do here?  drop it to the floor? */
3044                        spin_unlock(&res->spinlock);
3045                        mlog(ML_ERROR, "Got a migrate request, but the "
3046                             "lockres is marked as recovering!");
3047                        kmem_cache_free(dlm_mle_cache, mle);
3048                        ret = -EINVAL; /* need a better solution */
3049                        goto unlock;
3050                }
3051                res->state |= DLM_LOCK_RES_MIGRATING;
3052                spin_unlock(&res->spinlock);
3053        }
3054
3055        spin_lock(&dlm->master_lock);
3056        /* ignore status.  only nonzero status would BUG. */
3057        ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3058                                    name, namelen,
3059                                    migrate->new_master,
3060                                    migrate->master);
3061
3062        spin_unlock(&dlm->master_lock);
3063unlock:
3064        spin_unlock(&dlm->spinlock);
3065
3066        if (oldmle) {
3067                /* master is known, detach if not already detached */
3068                dlm_mle_detach_hb_events(dlm, oldmle);
3069                dlm_put_mle(oldmle);
3070        }
3071
3072        if (res)
3073                dlm_lockres_put(res);
3074leave:
3075        dlm_put(dlm);
3076        return ret;
3077}
3078
3079/* must be holding dlm->spinlock and dlm->master_lock
3080 * when adding a migration mle, we can clear any other mles
3081 * in the master list because we know with certainty that
3082 * the master is "master".  so we remove any old mle from
3083 * the list after setting it's master field, and then add
3084 * the new migration mle.  this way we can hold with the rule
3085 * of having only one mle for a given lock name at all times. */
3086static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3087                                 struct dlm_lock_resource *res,
3088                                 struct dlm_master_list_entry *mle,
3089                                 struct dlm_master_list_entry **oldmle,
3090                                 const char *name, unsigned int namelen,
3091                                 u8 new_master, u8 master)
3092{
3093        int found;
3094        int ret = 0;
3095
3096        *oldmle = NULL;
3097
3098        assert_spin_locked(&dlm->spinlock);
3099        assert_spin_locked(&dlm->master_lock);
3100
3101        /* caller is responsible for any ref taken here on oldmle */
3102        found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3103        if (found) {
3104                struct dlm_master_list_entry *tmp = *oldmle;
3105                spin_lock(&tmp->spinlock);
3106                if (tmp->type == DLM_MLE_MIGRATION) {
3107                        if (master == dlm->node_num) {
3108                                /* ah another process raced me to it */
3109                                mlog(0, "tried to migrate %.*s, but some "
3110                                     "process beat me to it\n",
3111                                     namelen, name);
3112                                ret = -EEXIST;
3113                        } else {
3114                                /* bad.  2 NODES are trying to migrate! */
3115                                mlog(ML_ERROR, "migration error  mle: "
3116                                     "master=%u new_master=%u // request: "
3117                                     "master=%u new_master=%u // "
3118                                     "lockres=%.*s\n",
3119                                     tmp->master, tmp->new_master,
3120                                     master, new_master,
3121                                     namelen, name);
3122                                BUG();
3123                        }
3124                } else {
3125                        /* this is essentially what assert_master does */
3126                        tmp->master = master;
3127                        atomic_set(&tmp->woken, 1);
3128                        wake_up(&tmp->wq);
3129                        /* remove it so that only one mle will be found */
3130                        __dlm_unlink_mle(dlm, tmp);
3131                        __dlm_mle_detach_hb_events(dlm, tmp);
3132                        if (tmp->type == DLM_MLE_MASTER) {
3133                                ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3134                                mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3135                                                "telling master to get ref "
3136                                                "for cleared out mle during "
3137                                                "migration\n", dlm->name,
3138                                                namelen, name, master,
3139                                                new_master);
3140                        }
3141                }
3142                spin_unlock(&tmp->spinlock);
3143        }
3144
3145        /* now add a migration mle to the tail of the list */
3146        dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3147        mle->new_master = new_master;
3148        /* the new master will be sending an assert master for this.
3149         * at that point we will get the refmap reference */
3150        mle->master = master;
3151        /* do this for consistency with other mle types */
3152        set_bit(new_master, mle->maybe_map);
3153        __dlm_insert_mle(dlm, mle);
3154
3155        return ret;
3156}
3157
3158/*
3159 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3160 */
3161static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3162                                        struct dlm_master_list_entry *mle)
3163{
3164        struct dlm_lock_resource *res;
3165
3166        /* Find the lockres associated to the mle and set its owner to UNK */
3167        res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3168                                   mle->mnamehash);
3169        if (res) {
3170                spin_unlock(&dlm->master_lock);
3171
3172                /* move lockres onto recovery list */
3173                spin_lock(&res->spinlock);
3174                dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3175                dlm_move_lockres_to_recovery_list(dlm, res);
3176                spin_unlock(&res->spinlock);
3177                dlm_lockres_put(res);
3178
3179                /* about to get rid of mle, detach from heartbeat */
3180                __dlm_mle_detach_hb_events(dlm, mle);
3181
3182                /* dump the mle */
3183                spin_lock(&dlm->master_lock);
3184                __dlm_put_mle(mle);
3185                spin_unlock(&dlm->master_lock);
3186        }
3187
3188        return res;
3189}
3190
3191static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3192                                    struct dlm_master_list_entry *mle)
3193{
3194        __dlm_mle_detach_hb_events(dlm, mle);
3195
3196        spin_lock(&mle->spinlock);
3197        __dlm_unlink_mle(dlm, mle);
3198        atomic_set(&mle->woken, 1);
3199        spin_unlock(&mle->spinlock);
3200
3201        wake_up(&mle->wq);
3202}
3203
3204static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3205                                struct dlm_master_list_entry *mle, u8 dead_node)
3206{
3207        int bit;
3208
3209        BUG_ON(mle->type != DLM_MLE_BLOCK);
3210
3211        spin_lock(&mle->spinlock);
3212        bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3213        if (bit != dead_node) {
3214                mlog(0, "mle found, but dead node %u would not have been "
3215                     "master\n", dead_node);
3216                spin_unlock(&mle->spinlock);
3217        } else {
3218                /* Must drop the refcount by one since the assert_master will
3219                 * never arrive. This may result in the mle being unlinked and
3220                 * freed, but there may still be a process waiting in the
3221                 * dlmlock path which is fine. */
3222                mlog(0, "node %u was expected master\n", dead_node);
3223                atomic_set(&mle->woken, 1);
3224                spin_unlock(&mle->spinlock);
3225                wake_up(&mle->wq);
3226
3227                /* Do not need events any longer, so detach from heartbeat */
3228                __dlm_mle_detach_hb_events(dlm, mle);
3229                __dlm_put_mle(mle);
3230        }
3231}
3232
3233void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3234{
3235        struct dlm_master_list_entry *mle;
3236        struct dlm_lock_resource *res;
3237        struct hlist_head *bucket;
3238        struct hlist_node *tmp;
3239        unsigned int i;
3240
3241        mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3242top:
3243        assert_spin_locked(&dlm->spinlock);
3244
3245        /* clean the master list */
3246        spin_lock(&dlm->master_lock);
3247        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3248                bucket = dlm_master_hash(dlm, i);
3249                hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3250                        BUG_ON(mle->type != DLM_MLE_BLOCK &&
3251                               mle->type != DLM_MLE_MASTER &&
3252                               mle->type != DLM_MLE_MIGRATION);
3253
3254                        /* MASTER mles are initiated locally. The waiting
3255                         * process will notice the node map change shortly.
3256                         * Let that happen as normal. */
3257                        if (mle->type == DLM_MLE_MASTER)
3258                                continue;
3259
3260                        /* BLOCK mles are initiated by other nodes. Need to
3261                         * clean up if the dead node would have been the
3262                         * master. */
3263                        if (mle->type == DLM_MLE_BLOCK) {
3264                                dlm_clean_block_mle(dlm, mle, dead_node);
3265                                continue;
3266                        }
3267
3268                        /* Everything else is a MIGRATION mle */
3269
3270                        /* The rule for MIGRATION mles is that the master
3271                         * becomes UNKNOWN if *either* the original or the new
3272                         * master dies. All UNKNOWN lockres' are sent to
3273                         * whichever node becomes the recovery master. The new
3274                         * master is responsible for determining if there is
3275                         * still a master for this lockres, or if he needs to
3276                         * take over mastery. Either way, this node should
3277                         * expect another message to resolve this. */
3278
3279                        if (mle->master != dead_node &&
3280                            mle->new_master != dead_node)
3281                                continue;
3282
3283                        /* If we have reached this point, this mle needs to be
3284                         * removed from the list and freed. */
3285                        dlm_clean_migration_mle(dlm, mle);
3286
3287                        mlog(0, "%s: node %u died during migration from "
3288                             "%u to %u!\n", dlm->name, dead_node, mle->master,
3289                             mle->new_master);
3290
3291                        /* If we find a lockres associated with the mle, we've
3292                         * hit this rare case that messes up our lock ordering.
3293                         * If so, we need to drop the master lock so that we can
3294                         * take the lockres lock, meaning that we will have to
3295                         * restart from the head of list. */
3296                        res = dlm_reset_mleres_owner(dlm, mle);
3297                        if (res)
3298                                /* restart */
3299                                goto top;
3300
3301                        /* This may be the last reference */
3302                        __dlm_put_mle(mle);
3303                }
3304        }
3305        spin_unlock(&dlm->master_lock);
3306}
3307
3308int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3309                         u8 old_master)
3310{
3311        struct dlm_node_iter iter;
3312        int ret = 0;
3313
3314        spin_lock(&dlm->spinlock);
3315        dlm_node_iter_init(dlm->domain_map, &iter);
3316        clear_bit(old_master, iter.node_map);
3317        clear_bit(dlm->node_num, iter.node_map);
3318        spin_unlock(&dlm->spinlock);
3319
3320        /* ownership of the lockres is changing.  account for the
3321         * mastery reference here since old_master will briefly have
3322         * a reference after the migration completes */
3323        spin_lock(&res->spinlock);
3324        dlm_lockres_set_refmap_bit(dlm, res, old_master);
3325        spin_unlock(&res->spinlock);
3326
3327        mlog(0, "now time to do a migrate request to other nodes\n");
3328        ret = dlm_do_migrate_request(dlm, res, old_master,
3329                                     dlm->node_num, &iter);
3330        if (ret < 0) {
3331                mlog_errno(ret);
3332                goto leave;
3333        }
3334
3335        mlog(0, "doing assert master of %.*s to all except the original node\n",
3336             res->lockname.len, res->lockname.name);
3337        /* this call now finishes out the nodemap
3338         * even if one or more nodes die */
3339        ret = dlm_do_assert_master(dlm, res, iter.node_map,
3340                                   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3341        if (ret < 0) {
3342                /* no longer need to retry.  all living nodes contacted. */
3343                mlog_errno(ret);
3344                ret = 0;
3345        }
3346
3347        memset(iter.node_map, 0, sizeof(iter.node_map));
3348        set_bit(old_master, iter.node_map);
3349        mlog(0, "doing assert master of %.*s back to %u\n",
3350             res->lockname.len, res->lockname.name, old_master);
3351        ret = dlm_do_assert_master(dlm, res, iter.node_map,
3352                                   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3353        if (ret < 0) {
3354                mlog(0, "assert master to original master failed "
3355                     "with %d.\n", ret);
3356                /* the only nonzero status here would be because of
3357                 * a dead original node.  we're done. */
3358                ret = 0;
3359        }
3360
3361        /* all done, set the owner, clear the flag */
3362        spin_lock(&res->spinlock);
3363        dlm_set_lockres_owner(dlm, res, dlm->node_num);
3364        res->state &= ~DLM_LOCK_RES_MIGRATING;
3365        spin_unlock(&res->spinlock);
3366        /* re-dirty it on the new master */
3367        dlm_kick_thread(dlm, res);
3368        wake_up(&res->wq);
3369leave:
3370        return ret;
3371}
3372
3373/*
3374 * LOCKRES AST REFCOUNT
3375 * this is integral to migration
3376 */
3377
3378/* for future intent to call an ast, reserve one ahead of time.
3379 * this should be called only after waiting on the lockres
3380 * with dlm_wait_on_lockres, and while still holding the
3381 * spinlock after the call. */
3382void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3383{
3384        assert_spin_locked(&res->spinlock);
3385        if (res->state & DLM_LOCK_RES_MIGRATING) {
3386                __dlm_print_one_lock_resource(res);
3387        }
3388        BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3389
3390        atomic_inc(&res->asts_reserved);
3391}
3392
3393/*
3394 * used to drop the reserved ast, either because it went unused,
3395 * or because the ast/bast was actually called.
3396 *
3397 * also, if there is a pending migration on this lockres,
3398 * and this was the last pending ast on the lockres,
3399 * atomically set the MIGRATING flag before we drop the lock.
3400 * this is how we ensure that migration can proceed with no
3401 * asts in progress.  note that it is ok if the state of the
3402 * queues is such that a lock should be granted in the future
3403 * or that a bast should be fired, because the new master will
3404 * shuffle the lists on this lockres as soon as it is migrated.
3405 */
3406void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3407                             struct dlm_lock_resource *res)
3408{
3409        if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3410                return;
3411
3412        if (!res->migration_pending) {
3413                spin_unlock(&res->spinlock);
3414                return;
3415        }
3416
3417        BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3418        res->migration_pending = 0;
3419        res->state |= DLM_LOCK_RES_MIGRATING;
3420        spin_unlock(&res->spinlock);
3421        wake_up(&res->wq);
3422        wake_up(&dlm->migration_wq);
3423}
3424
3425void dlm_force_free_mles(struct dlm_ctxt *dlm)
3426{
3427        int i;
3428        struct hlist_head *bucket;
3429        struct dlm_master_list_entry *mle;
3430        struct hlist_node *tmp;
3431
3432        /*
3433         * We notified all other nodes that we are exiting the domain and
3434         * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3435         * around we force free them and wake any processes that are waiting
3436         * on the mles
3437         */
3438        spin_lock(&dlm->spinlock);
3439        spin_lock(&dlm->master_lock);
3440
3441        BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3442        BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3443
3444        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3445                bucket = dlm_master_hash(dlm, i);
3446                hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3447                        if (mle->type != DLM_MLE_BLOCK) {
3448                                mlog(ML_ERROR, "bad mle: %p\n", mle);
3449                                dlm_print_one_mle(mle);
3450                        }
3451                        atomic_set(&mle->woken, 1);
3452                        wake_up(&mle->wq);
3453
3454                        __dlm_unlink_mle(dlm, mle);
3455                        __dlm_mle_detach_hb_events(dlm, mle);
3456                        __dlm_put_mle(mle);
3457                }
3458        }
3459        spin_unlock(&dlm->master_lock);
3460        spin_unlock(&dlm->spinlock);
3461}
3462