linux/fs/afs/cell.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* AFS cell and server record management
   3 *
   4 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/key.h>
  10#include <linux/ctype.h>
  11#include <linux/dns_resolver.h>
  12#include <linux/sched.h>
  13#include <linux/inet.h>
  14#include <linux/namei.h>
  15#include <keys/rxrpc-type.h>
  16#include "internal.h"
  17
  18static unsigned __read_mostly afs_cell_gc_delay = 10;
  19static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
  20static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
  21
  22static void afs_manage_cell(struct work_struct *);
  23
  24static void afs_dec_cells_outstanding(struct afs_net *net)
  25{
  26        if (atomic_dec_and_test(&net->cells_outstanding))
  27                wake_up_var(&net->cells_outstanding);
  28}
  29
  30/*
  31 * Set the cell timer to fire after a given delay, assuming it's not already
  32 * set for an earlier time.
  33 */
  34static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
  35{
  36        if (net->live) {
  37                atomic_inc(&net->cells_outstanding);
  38                if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
  39                        afs_dec_cells_outstanding(net);
  40        }
  41}
  42
  43/*
  44 * Look up and get an activation reference on a cell record under RCU
  45 * conditions.  The caller must hold the RCU read lock.
  46 */
  47struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
  48                                     const char *name, unsigned int namesz)
  49{
  50        struct afs_cell *cell = NULL;
  51        struct rb_node *p;
  52        int n, seq = 0, ret = 0;
  53
  54        _enter("%*.*s", namesz, namesz, name);
  55
  56        if (name && namesz == 0)
  57                return ERR_PTR(-EINVAL);
  58        if (namesz > AFS_MAXCELLNAME)
  59                return ERR_PTR(-ENAMETOOLONG);
  60
  61        do {
  62                /* Unfortunately, rbtree walking doesn't give reliable results
  63                 * under just the RCU read lock, so we have to check for
  64                 * changes.
  65                 */
  66                if (cell)
  67                        afs_put_cell(net, cell);
  68                cell = NULL;
  69                ret = -ENOENT;
  70
  71                read_seqbegin_or_lock(&net->cells_lock, &seq);
  72
  73                if (!name) {
  74                        cell = rcu_dereference_raw(net->ws_cell);
  75                        if (cell) {
  76                                afs_get_cell(cell);
  77                                ret = 0;
  78                                break;
  79                        }
  80                        ret = -EDESTADDRREQ;
  81                        continue;
  82                }
  83
  84                p = rcu_dereference_raw(net->cells.rb_node);
  85                while (p) {
  86                        cell = rb_entry(p, struct afs_cell, net_node);
  87
  88                        n = strncasecmp(cell->name, name,
  89                                        min_t(size_t, cell->name_len, namesz));
  90                        if (n == 0)
  91                                n = cell->name_len - namesz;
  92                        if (n < 0) {
  93                                p = rcu_dereference_raw(p->rb_left);
  94                        } else if (n > 0) {
  95                                p = rcu_dereference_raw(p->rb_right);
  96                        } else {
  97                                if (atomic_inc_not_zero(&cell->usage)) {
  98                                        ret = 0;
  99                                        break;
 100                                }
 101                                /* We want to repeat the search, this time with
 102                                 * the lock properly locked.
 103                                 */
 104                        }
 105                        cell = NULL;
 106                }
 107
 108        } while (need_seqretry(&net->cells_lock, seq));
 109
 110        done_seqretry(&net->cells_lock, seq);
 111
 112        if (ret != 0 && cell)
 113                afs_put_cell(net, cell);
 114
 115        return ret == 0 ? cell : ERR_PTR(ret);
 116}
 117
 118/*
 119 * Set up a cell record and fill in its name, VL server address list and
 120 * allocate an anonymous key
 121 */
 122static struct afs_cell *afs_alloc_cell(struct afs_net *net,
 123                                       const char *name, unsigned int namelen,
 124                                       const char *addresses)
 125{
 126        struct afs_vlserver_list *vllist;
 127        struct afs_cell *cell;
 128        int i, ret;
 129
 130        ASSERT(name);
 131        if (namelen == 0)
 132                return ERR_PTR(-EINVAL);
 133        if (namelen > AFS_MAXCELLNAME) {
 134                _leave(" = -ENAMETOOLONG");
 135                return ERR_PTR(-ENAMETOOLONG);
 136        }
 137
 138        /* Prohibit cell names that contain unprintable chars, '/' and '@' or
 139         * that begin with a dot.  This also precludes "@cell".
 140         */
 141        if (name[0] == '.')
 142                return ERR_PTR(-EINVAL);
 143        for (i = 0; i < namelen; i++) {
 144                char ch = name[i];
 145                if (!isprint(ch) || ch == '/' || ch == '@')
 146                        return ERR_PTR(-EINVAL);
 147        }
 148
 149        _enter("%*.*s,%s", namelen, namelen, name, addresses);
 150
 151        cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
 152        if (!cell) {
 153                _leave(" = -ENOMEM");
 154                return ERR_PTR(-ENOMEM);
 155        }
 156
 157        cell->net = net;
 158        cell->name_len = namelen;
 159        for (i = 0; i < namelen; i++)
 160                cell->name[i] = tolower(name[i]);
 161
 162        atomic_set(&cell->usage, 2);
 163        INIT_WORK(&cell->manager, afs_manage_cell);
 164        INIT_LIST_HEAD(&cell->proc_volumes);
 165        rwlock_init(&cell->proc_lock);
 166        rwlock_init(&cell->vl_servers_lock);
 167
 168        /* Provide a VL server list, filling it in if we were given a list of
 169         * addresses to use.
 170         */
 171        if (addresses) {
 172                vllist = afs_parse_text_addrs(net,
 173                                              addresses, strlen(addresses), ':',
 174                                              VL_SERVICE, AFS_VL_PORT);
 175                if (IS_ERR(vllist)) {
 176                        ret = PTR_ERR(vllist);
 177                        goto parse_failed;
 178                }
 179
 180                vllist->source = DNS_RECORD_FROM_CONFIG;
 181                vllist->status = DNS_LOOKUP_NOT_DONE;
 182                cell->dns_expiry = TIME64_MAX;
 183        } else {
 184                ret = -ENOMEM;
 185                vllist = afs_alloc_vlserver_list(0);
 186                if (!vllist)
 187                        goto error;
 188                vllist->source = DNS_RECORD_UNAVAILABLE;
 189                vllist->status = DNS_LOOKUP_NOT_DONE;
 190                cell->dns_expiry = ktime_get_real_seconds();
 191        }
 192
 193        rcu_assign_pointer(cell->vl_servers, vllist);
 194
 195        cell->dns_source = vllist->source;
 196        cell->dns_status = vllist->status;
 197        smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
 198
 199        _leave(" = %p", cell);
 200        return cell;
 201
 202parse_failed:
 203        if (ret == -EINVAL)
 204                printk(KERN_ERR "kAFS: bad VL server IP address\n");
 205error:
 206        kfree(cell);
 207        _leave(" = %d", ret);
 208        return ERR_PTR(ret);
 209}
 210
 211/*
 212 * afs_lookup_cell - Look up or create a cell record.
 213 * @net:        The network namespace
 214 * @name:       The name of the cell.
 215 * @namesz:     The strlen of the cell name.
 216 * @vllist:     A colon/comma separated list of numeric IP addresses or NULL.
 217 * @excl:       T if an error should be given if the cell name already exists.
 218 *
 219 * Look up a cell record by name and query the DNS for VL server addresses if
 220 * needed.  Note that that actual DNS query is punted off to the manager thread
 221 * so that this function can return immediately if interrupted whilst allowing
 222 * cell records to be shared even if not yet fully constructed.
 223 */
 224struct afs_cell *afs_lookup_cell(struct afs_net *net,
 225                                 const char *name, unsigned int namesz,
 226                                 const char *vllist, bool excl)
 227{
 228        struct afs_cell *cell, *candidate, *cursor;
 229        struct rb_node *parent, **pp;
 230        enum afs_cell_state state;
 231        int ret, n;
 232
 233        _enter("%s,%s", name, vllist);
 234
 235        if (!excl) {
 236                rcu_read_lock();
 237                cell = afs_lookup_cell_rcu(net, name, namesz);
 238                rcu_read_unlock();
 239                if (!IS_ERR(cell))
 240                        goto wait_for_cell;
 241        }
 242
 243        /* Assume we're probably going to create a cell and preallocate and
 244         * mostly set up a candidate record.  We can then use this to stash the
 245         * name, the net namespace and VL server addresses.
 246         *
 247         * We also want to do this before we hold any locks as it may involve
 248         * upcalling to userspace to make DNS queries.
 249         */
 250        candidate = afs_alloc_cell(net, name, namesz, vllist);
 251        if (IS_ERR(candidate)) {
 252                _leave(" = %ld", PTR_ERR(candidate));
 253                return candidate;
 254        }
 255
 256        /* Find the insertion point and check to see if someone else added a
 257         * cell whilst we were allocating.
 258         */
 259        write_seqlock(&net->cells_lock);
 260
 261        pp = &net->cells.rb_node;
 262        parent = NULL;
 263        while (*pp) {
 264                parent = *pp;
 265                cursor = rb_entry(parent, struct afs_cell, net_node);
 266
 267                n = strncasecmp(cursor->name, name,
 268                                min_t(size_t, cursor->name_len, namesz));
 269                if (n == 0)
 270                        n = cursor->name_len - namesz;
 271                if (n < 0)
 272                        pp = &(*pp)->rb_left;
 273                else if (n > 0)
 274                        pp = &(*pp)->rb_right;
 275                else
 276                        goto cell_already_exists;
 277        }
 278
 279        cell = candidate;
 280        candidate = NULL;
 281        rb_link_node_rcu(&cell->net_node, parent, pp);
 282        rb_insert_color(&cell->net_node, &net->cells);
 283        atomic_inc(&net->cells_outstanding);
 284        write_sequnlock(&net->cells_lock);
 285
 286        queue_work(afs_wq, &cell->manager);
 287
 288wait_for_cell:
 289        _debug("wait_for_cell");
 290        wait_var_event(&cell->state,
 291                       ({
 292                               state = smp_load_acquire(&cell->state); /* vs error */
 293                               state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
 294                       }));
 295
 296        /* Check the state obtained from the wait check. */
 297        if (state == AFS_CELL_FAILED) {
 298                ret = cell->error;
 299                goto error;
 300        }
 301
 302        _leave(" = %p [cell]", cell);
 303        return cell;
 304
 305cell_already_exists:
 306        _debug("cell exists");
 307        cell = cursor;
 308        if (excl) {
 309                ret = -EEXIST;
 310        } else {
 311                afs_get_cell(cursor);
 312                ret = 0;
 313        }
 314        write_sequnlock(&net->cells_lock);
 315        kfree(candidate);
 316        if (ret == 0)
 317                goto wait_for_cell;
 318        goto error_noput;
 319error:
 320        afs_put_cell(net, cell);
 321error_noput:
 322        _leave(" = %d [error]", ret);
 323        return ERR_PTR(ret);
 324}
 325
 326/*
 327 * set the root cell information
 328 * - can be called with a module parameter string
 329 * - can be called from a write to /proc/fs/afs/rootcell
 330 */
 331int afs_cell_init(struct afs_net *net, const char *rootcell)
 332{
 333        struct afs_cell *old_root, *new_root;
 334        const char *cp, *vllist;
 335        size_t len;
 336
 337        _enter("");
 338
 339        if (!rootcell) {
 340                /* module is loaded with no parameters, or built statically.
 341                 * - in the future we might initialize cell DB here.
 342                 */
 343                _leave(" = 0 [no root]");
 344                return 0;
 345        }
 346
 347        cp = strchr(rootcell, ':');
 348        if (!cp) {
 349                _debug("kAFS: no VL server IP addresses specified");
 350                vllist = NULL;
 351                len = strlen(rootcell);
 352        } else {
 353                vllist = cp + 1;
 354                len = cp - rootcell;
 355        }
 356
 357        /* allocate a cell record for the root cell */
 358        new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
 359        if (IS_ERR(new_root)) {
 360                _leave(" = %ld", PTR_ERR(new_root));
 361                return PTR_ERR(new_root);
 362        }
 363
 364        if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
 365                afs_get_cell(new_root);
 366
 367        /* install the new cell */
 368        write_seqlock(&net->cells_lock);
 369        old_root = rcu_access_pointer(net->ws_cell);
 370        rcu_assign_pointer(net->ws_cell, new_root);
 371        write_sequnlock(&net->cells_lock);
 372
 373        afs_put_cell(net, old_root);
 374        _leave(" = 0");
 375        return 0;
 376}
 377
 378/*
 379 * Update a cell's VL server address list from the DNS.
 380 */
 381static int afs_update_cell(struct afs_cell *cell)
 382{
 383        struct afs_vlserver_list *vllist, *old = NULL, *p;
 384        unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
 385        unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
 386        time64_t now, expiry = 0;
 387        int ret = 0;
 388
 389        _enter("%s", cell->name);
 390
 391        vllist = afs_dns_query(cell, &expiry);
 392        if (IS_ERR(vllist)) {
 393                ret = PTR_ERR(vllist);
 394
 395                _debug("%s: fail %d", cell->name, ret);
 396                if (ret == -ENOMEM)
 397                        goto out_wake;
 398
 399                ret = -ENOMEM;
 400                vllist = afs_alloc_vlserver_list(0);
 401                if (!vllist)
 402                        goto out_wake;
 403
 404                switch (ret) {
 405                case -ENODATA:
 406                case -EDESTADDRREQ:
 407                        vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
 408                        break;
 409                case -EAGAIN:
 410                case -ECONNREFUSED:
 411                        vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
 412                        break;
 413                default:
 414                        vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
 415                        break;
 416                }
 417        }
 418
 419        _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
 420        cell->dns_status = vllist->status;
 421
 422        now = ktime_get_real_seconds();
 423        if (min_ttl > max_ttl)
 424                max_ttl = min_ttl;
 425        if (expiry < now + min_ttl)
 426                expiry = now + min_ttl;
 427        else if (expiry > now + max_ttl)
 428                expiry = now + max_ttl;
 429
 430        _debug("%s: status %d", cell->name, vllist->status);
 431        if (vllist->source == DNS_RECORD_UNAVAILABLE) {
 432                switch (vllist->status) {
 433                case DNS_LOOKUP_GOT_NOT_FOUND:
 434                        /* The DNS said that the cell does not exist or there
 435                         * weren't any addresses to be had.
 436                         */
 437                        cell->dns_expiry = expiry;
 438                        break;
 439
 440                case DNS_LOOKUP_BAD:
 441                case DNS_LOOKUP_GOT_LOCAL_FAILURE:
 442                case DNS_LOOKUP_GOT_TEMP_FAILURE:
 443                case DNS_LOOKUP_GOT_NS_FAILURE:
 444                default:
 445                        cell->dns_expiry = now + 10;
 446                        break;
 447                }
 448        } else {
 449                cell->dns_expiry = expiry;
 450        }
 451
 452        /* Replace the VL server list if the new record has servers or the old
 453         * record doesn't.
 454         */
 455        write_lock(&cell->vl_servers_lock);
 456        p = rcu_dereference_protected(cell->vl_servers, true);
 457        if (vllist->nr_servers > 0 || p->nr_servers == 0) {
 458                rcu_assign_pointer(cell->vl_servers, vllist);
 459                cell->dns_source = vllist->source;
 460                old = p;
 461        }
 462        write_unlock(&cell->vl_servers_lock);
 463        afs_put_vlserverlist(cell->net, old);
 464
 465out_wake:
 466        smp_store_release(&cell->dns_lookup_count,
 467                          cell->dns_lookup_count + 1); /* vs source/status */
 468        wake_up_var(&cell->dns_lookup_count);
 469        _leave(" = %d", ret);
 470        return ret;
 471}
 472
 473/*
 474 * Destroy a cell record
 475 */
 476static void afs_cell_destroy(struct rcu_head *rcu)
 477{
 478        struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
 479
 480        _enter("%p{%s}", cell, cell->name);
 481
 482        ASSERTCMP(atomic_read(&cell->usage), ==, 0);
 483
 484        afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
 485        key_put(cell->anonymous_key);
 486        kfree(cell);
 487
 488        _leave(" [destroyed]");
 489}
 490
 491/*
 492 * Queue the cell manager.
 493 */
 494static void afs_queue_cell_manager(struct afs_net *net)
 495{
 496        int outstanding = atomic_inc_return(&net->cells_outstanding);
 497
 498        _enter("%d", outstanding);
 499
 500        if (!queue_work(afs_wq, &net->cells_manager))
 501                afs_dec_cells_outstanding(net);
 502}
 503
 504/*
 505 * Cell management timer.  We have an increment on cells_outstanding that we
 506 * need to pass along to the work item.
 507 */
 508void afs_cells_timer(struct timer_list *timer)
 509{
 510        struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
 511
 512        _enter("");
 513        if (!queue_work(afs_wq, &net->cells_manager))
 514                afs_dec_cells_outstanding(net);
 515}
 516
 517/*
 518 * Get a reference on a cell record.
 519 */
 520struct afs_cell *afs_get_cell(struct afs_cell *cell)
 521{
 522        atomic_inc(&cell->usage);
 523        return cell;
 524}
 525
 526/*
 527 * Drop a reference on a cell record.
 528 */
 529void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
 530{
 531        time64_t now, expire_delay;
 532
 533        if (!cell)
 534                return;
 535
 536        _enter("%s", cell->name);
 537
 538        now = ktime_get_real_seconds();
 539        cell->last_inactive = now;
 540        expire_delay = 0;
 541        if (cell->vl_servers->nr_servers)
 542                expire_delay = afs_cell_gc_delay;
 543
 544        if (atomic_dec_return(&cell->usage) > 1)
 545                return;
 546
 547        /* 'cell' may now be garbage collected. */
 548        afs_set_cell_timer(net, expire_delay);
 549}
 550
 551/*
 552 * Allocate a key to use as a placeholder for anonymous user security.
 553 */
 554static int afs_alloc_anon_key(struct afs_cell *cell)
 555{
 556        struct key *key;
 557        char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
 558
 559        /* Create a key to represent an anonymous user. */
 560        memcpy(keyname, "afs@", 4);
 561        dp = keyname + 4;
 562        cp = cell->name;
 563        do {
 564                *dp++ = tolower(*cp);
 565        } while (*cp++);
 566
 567        key = rxrpc_get_null_key(keyname);
 568        if (IS_ERR(key))
 569                return PTR_ERR(key);
 570
 571        cell->anonymous_key = key;
 572
 573        _debug("anon key %p{%x}",
 574               cell->anonymous_key, key_serial(cell->anonymous_key));
 575        return 0;
 576}
 577
 578/*
 579 * Activate a cell.
 580 */
 581static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
 582{
 583        struct hlist_node **p;
 584        struct afs_cell *pcell;
 585        int ret;
 586
 587        if (!cell->anonymous_key) {
 588                ret = afs_alloc_anon_key(cell);
 589                if (ret < 0)
 590                        return ret;
 591        }
 592
 593#ifdef CONFIG_AFS_FSCACHE
 594        cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
 595                                             &afs_cell_cache_index_def,
 596                                             cell->name, strlen(cell->name),
 597                                             NULL, 0,
 598                                             cell, 0, true);
 599#endif
 600        ret = afs_proc_cell_setup(cell);
 601        if (ret < 0)
 602                return ret;
 603
 604        mutex_lock(&net->proc_cells_lock);
 605        for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
 606                pcell = hlist_entry(*p, struct afs_cell, proc_link);
 607                if (strcmp(cell->name, pcell->name) < 0)
 608                        break;
 609        }
 610
 611        cell->proc_link.pprev = p;
 612        cell->proc_link.next = *p;
 613        rcu_assign_pointer(*p, &cell->proc_link.next);
 614        if (cell->proc_link.next)
 615                cell->proc_link.next->pprev = &cell->proc_link.next;
 616
 617        afs_dynroot_mkdir(net, cell);
 618        mutex_unlock(&net->proc_cells_lock);
 619        return 0;
 620}
 621
 622/*
 623 * Deactivate a cell.
 624 */
 625static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
 626{
 627        _enter("%s", cell->name);
 628
 629        afs_proc_cell_remove(cell);
 630
 631        mutex_lock(&net->proc_cells_lock);
 632        hlist_del_rcu(&cell->proc_link);
 633        afs_dynroot_rmdir(net, cell);
 634        mutex_unlock(&net->proc_cells_lock);
 635
 636#ifdef CONFIG_AFS_FSCACHE
 637        fscache_relinquish_cookie(cell->cache, NULL, false);
 638        cell->cache = NULL;
 639#endif
 640
 641        _leave("");
 642}
 643
 644/*
 645 * Manage a cell record, initialising and destroying it, maintaining its DNS
 646 * records.
 647 */
 648static void afs_manage_cell(struct work_struct *work)
 649{
 650        struct afs_cell *cell = container_of(work, struct afs_cell, manager);
 651        struct afs_net *net = cell->net;
 652        bool deleted;
 653        int ret, usage;
 654
 655        _enter("%s", cell->name);
 656
 657again:
 658        _debug("state %u", cell->state);
 659        switch (cell->state) {
 660        case AFS_CELL_INACTIVE:
 661        case AFS_CELL_FAILED:
 662                write_seqlock(&net->cells_lock);
 663                usage = 1;
 664                deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
 665                if (deleted)
 666                        rb_erase(&cell->net_node, &net->cells);
 667                write_sequnlock(&net->cells_lock);
 668                if (deleted)
 669                        goto final_destruction;
 670                if (cell->state == AFS_CELL_FAILED)
 671                        goto done;
 672                smp_store_release(&cell->state, AFS_CELL_UNSET);
 673                wake_up_var(&cell->state);
 674                goto again;
 675
 676        case AFS_CELL_UNSET:
 677                smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
 678                wake_up_var(&cell->state);
 679                goto again;
 680
 681        case AFS_CELL_ACTIVATING:
 682                ret = afs_activate_cell(net, cell);
 683                if (ret < 0)
 684                        goto activation_failed;
 685
 686                smp_store_release(&cell->state, AFS_CELL_ACTIVE);
 687                wake_up_var(&cell->state);
 688                goto again;
 689
 690        case AFS_CELL_ACTIVE:
 691                if (atomic_read(&cell->usage) > 1) {
 692                        if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
 693                                ret = afs_update_cell(cell);
 694                                if (ret < 0)
 695                                        cell->error = ret;
 696                        }
 697                        goto done;
 698                }
 699                smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
 700                wake_up_var(&cell->state);
 701                goto again;
 702
 703        case AFS_CELL_DEACTIVATING:
 704                if (atomic_read(&cell->usage) > 1)
 705                        goto reverse_deactivation;
 706                afs_deactivate_cell(net, cell);
 707                smp_store_release(&cell->state, AFS_CELL_INACTIVE);
 708                wake_up_var(&cell->state);
 709                goto again;
 710
 711        default:
 712                break;
 713        }
 714        _debug("bad state %u", cell->state);
 715        BUG(); /* Unhandled state */
 716
 717activation_failed:
 718        cell->error = ret;
 719        afs_deactivate_cell(net, cell);
 720
 721        smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
 722        wake_up_var(&cell->state);
 723        goto again;
 724
 725reverse_deactivation:
 726        smp_store_release(&cell->state, AFS_CELL_ACTIVE);
 727        wake_up_var(&cell->state);
 728        _leave(" [deact->act]");
 729        return;
 730
 731done:
 732        _leave(" [done %u]", cell->state);
 733        return;
 734
 735final_destruction:
 736        call_rcu(&cell->rcu, afs_cell_destroy);
 737        afs_dec_cells_outstanding(net);
 738        _leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
 739}
 740
 741/*
 742 * Manage the records of cells known to a network namespace.  This includes
 743 * updating the DNS records and garbage collecting unused cells that were
 744 * automatically added.
 745 *
 746 * Note that constructed cell records may only be removed from net->cells by
 747 * this work item, so it is safe for this work item to stash a cursor pointing
 748 * into the tree and then return to caller (provided it skips cells that are
 749 * still under construction).
 750 *
 751 * Note also that we were given an increment on net->cells_outstanding by
 752 * whoever queued us that we need to deal with before returning.
 753 */
 754void afs_manage_cells(struct work_struct *work)
 755{
 756        struct afs_net *net = container_of(work, struct afs_net, cells_manager);
 757        struct rb_node *cursor;
 758        time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
 759        bool purging = !net->live;
 760
 761        _enter("");
 762
 763        /* Trawl the cell database looking for cells that have expired from
 764         * lack of use and cells whose DNS results have expired and dispatch
 765         * their managers.
 766         */
 767        read_seqlock_excl(&net->cells_lock);
 768
 769        for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
 770                struct afs_cell *cell =
 771                        rb_entry(cursor, struct afs_cell, net_node);
 772                unsigned usage;
 773                bool sched_cell = false;
 774
 775                usage = atomic_read(&cell->usage);
 776                _debug("manage %s %u", cell->name, usage);
 777
 778                ASSERTCMP(usage, >=, 1);
 779
 780                if (purging) {
 781                        if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
 782                                usage = atomic_dec_return(&cell->usage);
 783                        ASSERTCMP(usage, ==, 1);
 784                }
 785
 786                if (usage == 1) {
 787                        struct afs_vlserver_list *vllist;
 788                        time64_t expire_at = cell->last_inactive;
 789
 790                        read_lock(&cell->vl_servers_lock);
 791                        vllist = rcu_dereference_protected(
 792                                cell->vl_servers,
 793                                lockdep_is_held(&cell->vl_servers_lock));
 794                        if (vllist->nr_servers > 0)
 795                                expire_at += afs_cell_gc_delay;
 796                        read_unlock(&cell->vl_servers_lock);
 797                        if (purging || expire_at <= now)
 798                                sched_cell = true;
 799                        else if (expire_at < next_manage)
 800                                next_manage = expire_at;
 801                }
 802
 803                if (!purging) {
 804                        if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
 805                                sched_cell = true;
 806                }
 807
 808                if (sched_cell)
 809                        queue_work(afs_wq, &cell->manager);
 810        }
 811
 812        read_sequnlock_excl(&net->cells_lock);
 813
 814        /* Update the timer on the way out.  We have to pass an increment on
 815         * cells_outstanding in the namespace that we are in to the timer or
 816         * the work scheduler.
 817         */
 818        if (!purging && next_manage < TIME64_MAX) {
 819                now = ktime_get_real_seconds();
 820
 821                if (next_manage - now <= 0) {
 822                        if (queue_work(afs_wq, &net->cells_manager))
 823                                atomic_inc(&net->cells_outstanding);
 824                } else {
 825                        afs_set_cell_timer(net, next_manage - now);
 826                }
 827        }
 828
 829        afs_dec_cells_outstanding(net);
 830        _leave(" [%d]", atomic_read(&net->cells_outstanding));
 831}
 832
 833/*
 834 * Purge in-memory cell database.
 835 */
 836void afs_cell_purge(struct afs_net *net)
 837{
 838        struct afs_cell *ws;
 839
 840        _enter("");
 841
 842        write_seqlock(&net->cells_lock);
 843        ws = rcu_access_pointer(net->ws_cell);
 844        RCU_INIT_POINTER(net->ws_cell, NULL);
 845        write_sequnlock(&net->cells_lock);
 846        afs_put_cell(net, ws);
 847
 848        _debug("del timer");
 849        if (del_timer_sync(&net->cells_timer))
 850                atomic_dec(&net->cells_outstanding);
 851
 852        _debug("kick mgr");
 853        afs_queue_cell_manager(net);
 854
 855        _debug("wait");
 856        wait_var_event(&net->cells_outstanding,
 857                       !atomic_read(&net->cells_outstanding));
 858        _leave("");
 859}
 860