linux/fs/afs/callback.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
   3 *
   4 * This software may be freely redistributed under the terms of the
   5 * GNU General Public License.
   6 *
   7 * You should have received a copy of the GNU General Public License
   8 * along with this program; if not, write to the Free Software
   9 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  10 *
  11 * Authors: David Woodhouse <dwmw2@infradead.org>
  12 *          David Howells <dhowells@redhat.com>
  13 *
  14 */
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/init.h>
  19#include <linux/circ_buf.h>
  20#include <linux/sched.h>
  21#include "internal.h"
  22
  23#if 0
  24unsigned afs_vnode_update_timeout = 10;
  25#endif  /*  0  */
  26
  27#define afs_breakring_space(server) \
  28        CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail,    \
  29                   ARRAY_SIZE((server)->cb_break))
  30
  31//static void afs_callback_updater(struct work_struct *);
  32
  33static struct workqueue_struct *afs_callback_update_worker;
  34
  35/*
  36 * allow the fileserver to request callback state (re-)initialisation
  37 */
  38void afs_init_callback_state(struct afs_server *server)
  39{
  40        struct afs_vnode *vnode;
  41
  42        _enter("{%p}", server);
  43
  44        spin_lock(&server->cb_lock);
  45
  46        /* kill all the promises on record from this server */
  47        while (!RB_EMPTY_ROOT(&server->cb_promises)) {
  48                vnode = rb_entry(server->cb_promises.rb_node,
  49                                 struct afs_vnode, cb_promise);
  50                _debug("UNPROMISE { vid=%x:%u uq=%u}",
  51                       vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
  52                rb_erase(&vnode->cb_promise, &server->cb_promises);
  53                vnode->cb_promised = false;
  54        }
  55
  56        spin_unlock(&server->cb_lock);
  57        _leave("");
  58}
  59
  60/*
  61 * handle the data invalidation side of a callback being broken
  62 */
  63void afs_broken_callback_work(struct work_struct *work)
  64{
  65        struct afs_vnode *vnode =
  66                container_of(work, struct afs_vnode, cb_broken_work);
  67
  68        _enter("");
  69
  70        if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
  71                return;
  72
  73        /* we're only interested in dealing with a broken callback on *this*
  74         * vnode and only if no-one else has dealt with it yet */
  75        if (!mutex_trylock(&vnode->validate_lock))
  76                return; /* someone else is dealing with it */
  77
  78        if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
  79                if (S_ISDIR(vnode->vfs_inode.i_mode))
  80                        afs_clear_permits(vnode);
  81
  82                if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0)
  83                        goto out;
  84
  85                if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
  86                        goto out;
  87
  88                /* if the vnode's data version number changed then its contents
  89                 * are different */
  90                if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
  91                        afs_zap_data(vnode);
  92        }
  93
  94out:
  95        mutex_unlock(&vnode->validate_lock);
  96
  97        /* avoid the potential race whereby the mutex_trylock() in this
  98         * function happens again between the clear_bit() and the
  99         * mutex_unlock() */
 100        if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
 101                _debug("requeue");
 102                queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
 103        }
 104        _leave("");
 105}
 106
 107/*
 108 * actually break a callback
 109 */
 110static void afs_break_callback(struct afs_server *server,
 111                               struct afs_vnode *vnode)
 112{
 113        _enter("");
 114
 115        set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
 116
 117        if (vnode->cb_promised) {
 118                spin_lock(&vnode->lock);
 119
 120                _debug("break callback");
 121
 122                spin_lock(&server->cb_lock);
 123                if (vnode->cb_promised) {
 124                        rb_erase(&vnode->cb_promise, &server->cb_promises);
 125                        vnode->cb_promised = false;
 126                }
 127                spin_unlock(&server->cb_lock);
 128
 129                queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
 130                if (list_empty(&vnode->granted_locks) &&
 131                    !list_empty(&vnode->pending_locks))
 132                        afs_lock_may_be_available(vnode);
 133                spin_unlock(&vnode->lock);
 134        }
 135}
 136
 137/*
 138 * allow the fileserver to explicitly break one callback
 139 * - happens when
 140 *   - the backing file is changed
 141 *   - a lock is released
 142 */
 143static void afs_break_one_callback(struct afs_server *server,
 144                                   struct afs_fid *fid)
 145{
 146        struct afs_vnode *vnode;
 147        struct rb_node *p;
 148
 149        _debug("find");
 150        spin_lock(&server->fs_lock);
 151        p = server->fs_vnodes.rb_node;
 152        while (p) {
 153                vnode = rb_entry(p, struct afs_vnode, server_rb);
 154                if (fid->vid < vnode->fid.vid)
 155                        p = p->rb_left;
 156                else if (fid->vid > vnode->fid.vid)
 157                        p = p->rb_right;
 158                else if (fid->vnode < vnode->fid.vnode)
 159                        p = p->rb_left;
 160                else if (fid->vnode > vnode->fid.vnode)
 161                        p = p->rb_right;
 162                else if (fid->unique < vnode->fid.unique)
 163                        p = p->rb_left;
 164                else if (fid->unique > vnode->fid.unique)
 165                        p = p->rb_right;
 166                else
 167                        goto found;
 168        }
 169
 170        /* not found so we just ignore it (it may have moved to another
 171         * server) */
 172not_available:
 173        _debug("not avail");
 174        spin_unlock(&server->fs_lock);
 175        _leave("");
 176        return;
 177
 178found:
 179        _debug("found");
 180        ASSERTCMP(server, ==, vnode->server);
 181
 182        if (!igrab(AFS_VNODE_TO_I(vnode)))
 183                goto not_available;
 184        spin_unlock(&server->fs_lock);
 185
 186        afs_break_callback(server, vnode);
 187        iput(&vnode->vfs_inode);
 188        _leave("");
 189}
 190
 191/*
 192 * allow the fileserver to break callback promises
 193 */
 194void afs_break_callbacks(struct afs_server *server, size_t count,
 195                         struct afs_callback callbacks[])
 196{
 197        _enter("%p,%zu,", server, count);
 198
 199        ASSERT(server != NULL);
 200        ASSERTCMP(count, <=, AFSCBMAX);
 201
 202        for (; count > 0; callbacks++, count--) {
 203                _debug("- Fid { vl=%08x n=%u u=%u }  CB { v=%u x=%u t=%u }",
 204                       callbacks->fid.vid,
 205                       callbacks->fid.vnode,
 206                       callbacks->fid.unique,
 207                       callbacks->version,
 208                       callbacks->expiry,
 209                       callbacks->type
 210                       );
 211                afs_break_one_callback(server, &callbacks->fid);
 212        }
 213
 214        _leave("");
 215        return;
 216}
 217
 218/*
 219 * record the callback for breaking
 220 * - the caller must hold server->cb_lock
 221 */
 222static void afs_do_give_up_callback(struct afs_server *server,
 223                                    struct afs_vnode *vnode)
 224{
 225        struct afs_callback *cb;
 226
 227        _enter("%p,%p", server, vnode);
 228
 229        cb = &server->cb_break[server->cb_break_head];
 230        cb->fid         = vnode->fid;
 231        cb->version     = vnode->cb_version;
 232        cb->expiry      = vnode->cb_expiry;
 233        cb->type        = vnode->cb_type;
 234        smp_wmb();
 235        server->cb_break_head =
 236                (server->cb_break_head + 1) &
 237                (ARRAY_SIZE(server->cb_break) - 1);
 238
 239        /* defer the breaking of callbacks to try and collect as many as
 240         * possible to ship in one operation */
 241        switch (atomic_inc_return(&server->cb_break_n)) {
 242        case 1 ... AFSCBMAX - 1:
 243                queue_delayed_work(afs_callback_update_worker,
 244                                   &server->cb_break_work, HZ * 2);
 245                break;
 246        case AFSCBMAX:
 247                afs_flush_callback_breaks(server);
 248                break;
 249        default:
 250                break;
 251        }
 252
 253        ASSERT(server->cb_promises.rb_node != NULL);
 254        rb_erase(&vnode->cb_promise, &server->cb_promises);
 255        vnode->cb_promised = false;
 256        _leave("");
 257}
 258
 259/*
 260 * discard the callback on a deleted item
 261 */
 262void afs_discard_callback_on_delete(struct afs_vnode *vnode)
 263{
 264        struct afs_server *server = vnode->server;
 265
 266        _enter("%d", vnode->cb_promised);
 267
 268        if (!vnode->cb_promised) {
 269                _leave(" [not promised]");
 270                return;
 271        }
 272
 273        ASSERT(server != NULL);
 274
 275        spin_lock(&server->cb_lock);
 276        if (vnode->cb_promised) {
 277                ASSERT(server->cb_promises.rb_node != NULL);
 278                rb_erase(&vnode->cb_promise, &server->cb_promises);
 279                vnode->cb_promised = false;
 280        }
 281        spin_unlock(&server->cb_lock);
 282        _leave("");
 283}
 284
 285/*
 286 * give up the callback registered for a vnode on the file server when the
 287 * inode is being cleared
 288 */
 289void afs_give_up_callback(struct afs_vnode *vnode)
 290{
 291        struct afs_server *server = vnode->server;
 292
 293        DECLARE_WAITQUEUE(myself, current);
 294
 295        _enter("%d", vnode->cb_promised);
 296
 297        _debug("GIVE UP INODE %p", &vnode->vfs_inode);
 298
 299        if (!vnode->cb_promised) {
 300                _leave(" [not promised]");
 301                return;
 302        }
 303
 304        ASSERT(server != NULL);
 305
 306        spin_lock(&server->cb_lock);
 307        if (vnode->cb_promised && afs_breakring_space(server) == 0) {
 308                add_wait_queue(&server->cb_break_waitq, &myself);
 309                for (;;) {
 310                        set_current_state(TASK_UNINTERRUPTIBLE);
 311                        if (!vnode->cb_promised ||
 312                            afs_breakring_space(server) != 0)
 313                                break;
 314                        spin_unlock(&server->cb_lock);
 315                        schedule();
 316                        spin_lock(&server->cb_lock);
 317                }
 318                remove_wait_queue(&server->cb_break_waitq, &myself);
 319                __set_current_state(TASK_RUNNING);
 320        }
 321
 322        /* of course, it's always possible for the server to break this vnode's
 323         * callback first... */
 324        if (vnode->cb_promised)
 325                afs_do_give_up_callback(server, vnode);
 326
 327        spin_unlock(&server->cb_lock);
 328        _leave("");
 329}
 330
 331/*
 332 * dispatch a deferred give up callbacks operation
 333 */
 334void afs_dispatch_give_up_callbacks(struct work_struct *work)
 335{
 336        struct afs_server *server =
 337                container_of(work, struct afs_server, cb_break_work.work);
 338
 339        _enter("");
 340
 341        /* tell the fileserver to discard the callback promises it has
 342         * - in the event of ENOMEM or some other error, we just forget that we
 343         *   had callbacks entirely, and the server will call us later to break
 344         *   them
 345         */
 346        afs_fs_give_up_callbacks(server, &afs_async_call);
 347}
 348
 349/*
 350 * flush the outstanding callback breaks on a server
 351 */
 352void afs_flush_callback_breaks(struct afs_server *server)
 353{
 354        mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0);
 355}
 356
 357#if 0
 358/*
 359 * update a bunch of callbacks
 360 */
 361static void afs_callback_updater(struct work_struct *work)
 362{
 363        struct afs_server *server;
 364        struct afs_vnode *vnode, *xvnode;
 365        time_t now;
 366        long timeout;
 367        int ret;
 368
 369        server = container_of(work, struct afs_server, updater);
 370
 371        _enter("");
 372
 373        now = get_seconds();
 374
 375        /* find the first vnode to update */
 376        spin_lock(&server->cb_lock);
 377        for (;;) {
 378                if (RB_EMPTY_ROOT(&server->cb_promises)) {
 379                        spin_unlock(&server->cb_lock);
 380                        _leave(" [nothing]");
 381                        return;
 382                }
 383
 384                vnode = rb_entry(rb_first(&server->cb_promises),
 385                                 struct afs_vnode, cb_promise);
 386                if (atomic_read(&vnode->usage) > 0)
 387                        break;
 388                rb_erase(&vnode->cb_promise, &server->cb_promises);
 389                vnode->cb_promised = false;
 390        }
 391
 392        timeout = vnode->update_at - now;
 393        if (timeout > 0) {
 394                queue_delayed_work(afs_vnode_update_worker,
 395                                   &afs_vnode_update, timeout * HZ);
 396                spin_unlock(&server->cb_lock);
 397                _leave(" [nothing]");
 398                return;
 399        }
 400
 401        list_del_init(&vnode->update);
 402        atomic_inc(&vnode->usage);
 403        spin_unlock(&server->cb_lock);
 404
 405        /* we can now perform the update */
 406        _debug("update %s", vnode->vldb.name);
 407        vnode->state = AFS_VL_UPDATING;
 408        vnode->upd_rej_cnt = 0;
 409        vnode->upd_busy_cnt = 0;
 410
 411        ret = afs_vnode_update_record(vl, &vldb);
 412        switch (ret) {
 413        case 0:
 414                afs_vnode_apply_update(vl, &vldb);
 415                vnode->state = AFS_VL_UPDATING;
 416                break;
 417        case -ENOMEDIUM:
 418                vnode->state = AFS_VL_VOLUME_DELETED;
 419                break;
 420        default:
 421                vnode->state = AFS_VL_UNCERTAIN;
 422                break;
 423        }
 424
 425        /* and then reschedule */
 426        _debug("reschedule");
 427        vnode->update_at = get_seconds() + afs_vnode_update_timeout;
 428
 429        spin_lock(&server->cb_lock);
 430
 431        if (!list_empty(&server->cb_promises)) {
 432                /* next update in 10 minutes, but wait at least 1 second more
 433                 * than the newest record already queued so that we don't spam
 434                 * the VL server suddenly with lots of requests
 435                 */
 436                xvnode = list_entry(server->cb_promises.prev,
 437                                    struct afs_vnode, update);
 438                if (vnode->update_at <= xvnode->update_at)
 439                        vnode->update_at = xvnode->update_at + 1;
 440                xvnode = list_entry(server->cb_promises.next,
 441                                    struct afs_vnode, update);
 442                timeout = xvnode->update_at - now;
 443                if (timeout < 0)
 444                        timeout = 0;
 445        } else {
 446                timeout = afs_vnode_update_timeout;
 447        }
 448
 449        list_add_tail(&vnode->update, &server->cb_promises);
 450
 451        _debug("timeout %ld", timeout);
 452        queue_delayed_work(afs_vnode_update_worker,
 453                           &afs_vnode_update, timeout * HZ);
 454        spin_unlock(&server->cb_lock);
 455        afs_put_vnode(vl);
 456}
 457#endif
 458
 459/*
 460 * initialise the callback update process
 461 */
 462int __init afs_callback_update_init(void)
 463{
 464        afs_callback_update_worker =
 465                create_singlethread_workqueue("kafs_callbackd");
 466        return afs_callback_update_worker ? 0 : -ENOMEM;
 467}
 468
 469/*
 470 * shut down the callback update process
 471 */
 472void afs_callback_update_kill(void)
 473{
 474        destroy_workqueue(afs_callback_update_worker);
 475}
 476