linux/fs/dlm/ast.c
<<
>>
Prefs
   1/******************************************************************************
   2*******************************************************************************
   3**
   4**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
   5**  Copyright (C) 2004-2010 Red Hat, Inc.  All rights reserved.
   6**
   7**  This copyrighted material is made available to anyone wishing to use,
   8**  modify, copy, or redistribute it subject to the terms and conditions
   9**  of the GNU General Public License v.2.
  10**
  11*******************************************************************************
  12******************************************************************************/
  13
  14#include "dlm_internal.h"
  15#include "lock.h"
  16#include "user.h"
  17#include "ast.h"
  18
  19static uint64_t dlm_cb_seq;
  20static DEFINE_SPINLOCK(dlm_cb_seq_spin);
  21
  22static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
  23{
  24        int i;
  25
  26        log_print("last_bast %x %llu flags %x mode %d sb %d %x",
  27                  lkb->lkb_id,
  28                  (unsigned long long)lkb->lkb_last_bast.seq,
  29                  lkb->lkb_last_bast.flags,
  30                  lkb->lkb_last_bast.mode,
  31                  lkb->lkb_last_bast.sb_status,
  32                  lkb->lkb_last_bast.sb_flags);
  33
  34        log_print("last_cast %x %llu flags %x mode %d sb %d %x",
  35                  lkb->lkb_id,
  36                  (unsigned long long)lkb->lkb_last_cast.seq,
  37                  lkb->lkb_last_cast.flags,
  38                  lkb->lkb_last_cast.mode,
  39                  lkb->lkb_last_cast.sb_status,
  40                  lkb->lkb_last_cast.sb_flags);
  41
  42        for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
  43                log_print("cb %x %llu flags %x mode %d sb %d %x",
  44                          lkb->lkb_id,
  45                          (unsigned long long)lkb->lkb_callbacks[i].seq,
  46                          lkb->lkb_callbacks[i].flags,
  47                          lkb->lkb_callbacks[i].mode,
  48                          lkb->lkb_callbacks[i].sb_status,
  49                          lkb->lkb_callbacks[i].sb_flags);
  50        }
  51}
  52
  53int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
  54                         int status, uint32_t sbflags, uint64_t seq)
  55{
  56        struct dlm_ls *ls = lkb->lkb_resource->res_ls;
  57        uint64_t prev_seq;
  58        int prev_mode;
  59        int i, rv;
  60
  61        for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
  62                if (lkb->lkb_callbacks[i].seq)
  63                        continue;
  64
  65                /*
  66                 * Suppress some redundant basts here, do more on removal.
  67                 * Don't even add a bast if the callback just before it
  68                 * is a bast for the same mode or a more restrictive mode.
  69                 * (the addional > PR check is needed for PR/CW inversion)
  70                 */
  71
  72                if ((i > 0) && (flags & DLM_CB_BAST) &&
  73                    (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) {
  74
  75                        prev_seq = lkb->lkb_callbacks[i-1].seq;
  76                        prev_mode = lkb->lkb_callbacks[i-1].mode;
  77
  78                        if ((prev_mode == mode) ||
  79                            (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
  80
  81                                log_debug(ls, "skip %x add bast %llu mode %d "
  82                                          "for bast %llu mode %d",
  83                                          lkb->lkb_id,
  84                                          (unsigned long long)seq,
  85                                          mode,
  86                                          (unsigned long long)prev_seq,
  87                                          prev_mode);
  88                                rv = 0;
  89                                goto out;
  90                        }
  91                }
  92
  93                lkb->lkb_callbacks[i].seq = seq;
  94                lkb->lkb_callbacks[i].flags = flags;
  95                lkb->lkb_callbacks[i].mode = mode;
  96                lkb->lkb_callbacks[i].sb_status = status;
  97                lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
  98                rv = 0;
  99                break;
 100        }
 101
 102        if (i == DLM_CALLBACKS_SIZE) {
 103                log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x",
 104                          lkb->lkb_id, (unsigned long long)seq,
 105                          flags, mode, status, sbflags);
 106                dlm_dump_lkb_callbacks(lkb);
 107                rv = -1;
 108                goto out;
 109        }
 110 out:
 111        return rv;
 112}
 113
 114int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
 115                         struct dlm_callback *cb, int *resid)
 116{
 117        int i, rv;
 118
 119        *resid = 0;
 120
 121        if (!lkb->lkb_callbacks[0].seq) {
 122                rv = -ENOENT;
 123                goto out;
 124        }
 125
 126        /* oldest undelivered cb is callbacks[0] */
 127
 128        memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback));
 129        memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback));
 130
 131        /* shift others down */
 132
 133        for (i = 1; i < DLM_CALLBACKS_SIZE; i++) {
 134                if (!lkb->lkb_callbacks[i].seq)
 135                        break;
 136                memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i],
 137                       sizeof(struct dlm_callback));
 138                memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback));
 139                (*resid)++;
 140        }
 141
 142        /* if cb is a bast, it should be skipped if the blocking mode is
 143           compatible with the last granted mode */
 144
 145        if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) {
 146                if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) {
 147                        cb->flags |= DLM_CB_SKIP;
 148
 149                        log_debug(ls, "skip %x bast %llu mode %d "
 150                                  "for cast %llu mode %d",
 151                                  lkb->lkb_id,
 152                                  (unsigned long long)cb->seq,
 153                                  cb->mode,
 154                                  (unsigned long long)lkb->lkb_last_cast.seq,
 155                                  lkb->lkb_last_cast.mode);
 156                        rv = 0;
 157                        goto out;
 158                }
 159        }
 160
 161        if (cb->flags & DLM_CB_CAST) {
 162                memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback));
 163                lkb->lkb_last_cast_time = ktime_get();
 164        }
 165
 166        if (cb->flags & DLM_CB_BAST) {
 167                memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
 168                lkb->lkb_last_bast_time = ktime_get();
 169        }
 170        rv = 0;
 171 out:
 172        return rv;
 173}
 174
 175void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
 176                uint32_t sbflags)
 177{
 178        struct dlm_ls *ls = lkb->lkb_resource->res_ls;
 179        uint64_t new_seq, prev_seq;
 180        int rv;
 181
 182        spin_lock(&dlm_cb_seq_spin);
 183        new_seq = ++dlm_cb_seq;
 184        if (!dlm_cb_seq)
 185                new_seq = ++dlm_cb_seq;
 186        spin_unlock(&dlm_cb_seq_spin);
 187
 188        if (lkb->lkb_flags & DLM_IFL_USER) {
 189                dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq);
 190                return;
 191        }
 192
 193        mutex_lock(&lkb->lkb_cb_mutex);
 194        prev_seq = lkb->lkb_callbacks[0].seq;
 195
 196        rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq);
 197        if (rv < 0)
 198                goto out;
 199
 200        if (!prev_seq) {
 201                kref_get(&lkb->lkb_ref);
 202
 203                if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
 204                        mutex_lock(&ls->ls_cb_mutex);
 205                        list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
 206                        mutex_unlock(&ls->ls_cb_mutex);
 207                } else {
 208                        queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
 209                }
 210        }
 211 out:
 212        mutex_unlock(&lkb->lkb_cb_mutex);
 213}
 214
 215void dlm_callback_work(struct work_struct *work)
 216{
 217        struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
 218        struct dlm_ls *ls = lkb->lkb_resource->res_ls;
 219        void (*castfn) (void *astparam);
 220        void (*bastfn) (void *astparam, int mode);
 221        struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
 222        int i, rv, resid;
 223
 224        memset(&callbacks, 0, sizeof(callbacks));
 225
 226        mutex_lock(&lkb->lkb_cb_mutex);
 227        if (!lkb->lkb_callbacks[0].seq) {
 228                /* no callback work exists, shouldn't happen */
 229                log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id);
 230                dlm_print_lkb(lkb);
 231                dlm_dump_lkb_callbacks(lkb);
 232        }
 233
 234        for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
 235                rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
 236                if (rv < 0)
 237                        break;
 238        }
 239
 240        if (resid) {
 241                /* cbs remain, loop should have removed all, shouldn't happen */
 242                log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id,
 243                          resid);
 244                dlm_print_lkb(lkb);
 245                dlm_dump_lkb_callbacks(lkb);
 246        }
 247        mutex_unlock(&lkb->lkb_cb_mutex);
 248
 249        castfn = lkb->lkb_astfn;
 250        bastfn = lkb->lkb_bastfn;
 251
 252        for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
 253                if (!callbacks[i].seq)
 254                        break;
 255                if (callbacks[i].flags & DLM_CB_SKIP) {
 256                        continue;
 257                } else if (callbacks[i].flags & DLM_CB_BAST) {
 258                        bastfn(lkb->lkb_astparam, callbacks[i].mode);
 259                } else if (callbacks[i].flags & DLM_CB_CAST) {
 260                        lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
 261                        lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
 262                        castfn(lkb->lkb_astparam);
 263                }
 264        }
 265
 266        /* undo kref_get from dlm_add_callback, may cause lkb to be freed */
 267        dlm_put_lkb(lkb);
 268}
 269
 270int dlm_callback_start(struct dlm_ls *ls)
 271{
 272        ls->ls_callback_wq = alloc_workqueue("dlm_callback",
 273                                             WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
 274        if (!ls->ls_callback_wq) {
 275                log_print("can't start dlm_callback workqueue");
 276                return -ENOMEM;
 277        }
 278        return 0;
 279}
 280
 281void dlm_callback_stop(struct dlm_ls *ls)
 282{
 283        if (ls->ls_callback_wq)
 284                destroy_workqueue(ls->ls_callback_wq);
 285}
 286
 287void dlm_callback_suspend(struct dlm_ls *ls)
 288{
 289        set_bit(LSFL_CB_DELAY, &ls->ls_flags);
 290
 291        if (ls->ls_callback_wq)
 292                flush_workqueue(ls->ls_callback_wq);
 293}
 294
 295#define MAX_CB_QUEUE 25
 296
 297void dlm_callback_resume(struct dlm_ls *ls)
 298{
 299        struct dlm_lkb *lkb, *safe;
 300        int count = 0;
 301
 302        clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
 303
 304        if (!ls->ls_callback_wq)
 305                return;
 306
 307more:
 308        mutex_lock(&ls->ls_cb_mutex);
 309        list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
 310                list_del_init(&lkb->lkb_cb_list);
 311                queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
 312                count++;
 313                if (count == MAX_CB_QUEUE)
 314                        break;
 315        }
 316        mutex_unlock(&ls->ls_cb_mutex);
 317
 318        if (count)
 319                log_rinfo(ls, "dlm_callback_resume %d", count);
 320        if (count == MAX_CB_QUEUE) {
 321                count = 0;
 322                cond_resched();
 323                goto more;
 324        }
 325}
 326
 327