linux/fs/afs/cell.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* AFS cell and server record management
   3 *
   4 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/key.h>
  10#include <linux/ctype.h>
  11#include <linux/dns_resolver.h>
  12#include <linux/sched.h>
  13#include <linux/inet.h>
  14#include <linux/namei.h>
  15#include <keys/rxrpc-type.h>
  16#include "internal.h"
  17
  18static unsigned __read_mostly afs_cell_gc_delay = 10;
  19static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
  20static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
  21
  22static void afs_manage_cell(struct work_struct *);
  23
  24static void afs_dec_cells_outstanding(struct afs_net *net)
  25{
  26        if (atomic_dec_and_test(&net->cells_outstanding))
  27                wake_up_var(&net->cells_outstanding);
  28}
  29
  30/*
  31 * Set the cell timer to fire after a given delay, assuming it's not already
  32 * set for an earlier time.
  33 */
  34static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
  35{
  36        if (net->live) {
  37                atomic_inc(&net->cells_outstanding);
  38                if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
  39                        afs_dec_cells_outstanding(net);
  40        }
  41}
  42
  43/*
  44 * Look up and get an activation reference on a cell record under RCU
  45 * conditions.  The caller must hold the RCU read lock.
  46 */
  47struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
  48                                     const char *name, unsigned int namesz)
  49{
  50        struct afs_cell *cell = NULL;
  51        struct rb_node *p;
  52        int n, seq = 0, ret = 0;
  53
  54        _enter("%*.*s", namesz, namesz, name);
  55
  56        if (name && namesz == 0)
  57                return ERR_PTR(-EINVAL);
  58        if (namesz > AFS_MAXCELLNAME)
  59                return ERR_PTR(-ENAMETOOLONG);
  60
  61        do {
  62                /* Unfortunately, rbtree walking doesn't give reliable results
  63                 * under just the RCU read lock, so we have to check for
  64                 * changes.
  65                 */
  66                if (cell)
  67                        afs_put_cell(net, cell);
  68                cell = NULL;
  69                ret = -ENOENT;
  70
  71                read_seqbegin_or_lock(&net->cells_lock, &seq);
  72
  73                if (!name) {
  74                        cell = rcu_dereference_raw(net->ws_cell);
  75                        if (cell) {
  76                                afs_get_cell(cell);
  77                                ret = 0;
  78                                break;
  79                        }
  80                        ret = -EDESTADDRREQ;
  81                        continue;
  82                }
  83
  84                p = rcu_dereference_raw(net->cells.rb_node);
  85                while (p) {
  86                        cell = rb_entry(p, struct afs_cell, net_node);
  87
  88                        n = strncasecmp(cell->name, name,
  89                                        min_t(size_t, cell->name_len, namesz));
  90                        if (n == 0)
  91                                n = cell->name_len - namesz;
  92                        if (n < 0) {
  93                                p = rcu_dereference_raw(p->rb_left);
  94                        } else if (n > 0) {
  95                                p = rcu_dereference_raw(p->rb_right);
  96                        } else {
  97                                if (atomic_inc_not_zero(&cell->usage)) {
  98                                        ret = 0;
  99                                        break;
 100                                }
 101                                /* We want to repeat the search, this time with
 102                                 * the lock properly locked.
 103                                 */
 104                        }
 105                        cell = NULL;
 106                }
 107
 108        } while (need_seqretry(&net->cells_lock, seq));
 109
 110        done_seqretry(&net->cells_lock, seq);
 111
 112        if (ret != 0 && cell)
 113                afs_put_cell(net, cell);
 114
 115        return ret == 0 ? cell : ERR_PTR(ret);
 116}
 117
 118/*
 119 * Set up a cell record and fill in its name, VL server address list and
 120 * allocate an anonymous key
 121 */
 122static struct afs_cell *afs_alloc_cell(struct afs_net *net,
 123                                       const char *name, unsigned int namelen,
 124                                       const char *addresses)
 125{
 126        struct afs_vlserver_list *vllist;
 127        struct afs_cell *cell;
 128        int i, ret;
 129
 130        ASSERT(name);
 131        if (namelen == 0)
 132                return ERR_PTR(-EINVAL);
 133        if (namelen > AFS_MAXCELLNAME) {
 134                _leave(" = -ENAMETOOLONG");
 135                return ERR_PTR(-ENAMETOOLONG);
 136        }
 137
 138        /* Prohibit cell names that contain unprintable chars, '/' and '@' or
 139         * that begin with a dot.  This also precludes "@cell".
 140         */
 141        if (name[0] == '.')
 142                return ERR_PTR(-EINVAL);
 143        for (i = 0; i < namelen; i++) {
 144                char ch = name[i];
 145                if (!isprint(ch) || ch == '/' || ch == '@')
 146                        return ERR_PTR(-EINVAL);
 147        }
 148
 149        _enter("%*.*s,%s", namelen, namelen, name, addresses);
 150
 151        cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
 152        if (!cell) {
 153                _leave(" = -ENOMEM");
 154                return ERR_PTR(-ENOMEM);
 155        }
 156
 157        cell->name = kmalloc(namelen + 1, GFP_KERNEL);
 158        if (!cell->name) {
 159                kfree(cell);
 160                return ERR_PTR(-ENOMEM);
 161        }
 162
 163        cell->net = net;
 164        cell->name_len = namelen;
 165        for (i = 0; i < namelen; i++)
 166                cell->name[i] = tolower(name[i]);
 167        cell->name[i] = 0;
 168
 169        atomic_set(&cell->usage, 2);
 170        INIT_WORK(&cell->manager, afs_manage_cell);
 171        cell->volumes = RB_ROOT;
 172        INIT_HLIST_HEAD(&cell->proc_volumes);
 173        seqlock_init(&cell->volume_lock);
 174        cell->fs_servers = RB_ROOT;
 175        seqlock_init(&cell->fs_lock);
 176        rwlock_init(&cell->vl_servers_lock);
 177        cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
 178
 179        /* Provide a VL server list, filling it in if we were given a list of
 180         * addresses to use.
 181         */
 182        if (addresses) {
 183                vllist = afs_parse_text_addrs(net,
 184                                              addresses, strlen(addresses), ':',
 185                                              VL_SERVICE, AFS_VL_PORT);
 186                if (IS_ERR(vllist)) {
 187                        ret = PTR_ERR(vllist);
 188                        goto parse_failed;
 189                }
 190
 191                vllist->source = DNS_RECORD_FROM_CONFIG;
 192                vllist->status = DNS_LOOKUP_NOT_DONE;
 193                cell->dns_expiry = TIME64_MAX;
 194        } else {
 195                ret = -ENOMEM;
 196                vllist = afs_alloc_vlserver_list(0);
 197                if (!vllist)
 198                        goto error;
 199                vllist->source = DNS_RECORD_UNAVAILABLE;
 200                vllist->status = DNS_LOOKUP_NOT_DONE;
 201                cell->dns_expiry = ktime_get_real_seconds();
 202        }
 203
 204        rcu_assign_pointer(cell->vl_servers, vllist);
 205
 206        cell->dns_source = vllist->source;
 207        cell->dns_status = vllist->status;
 208        smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
 209
 210        _leave(" = %p", cell);
 211        return cell;
 212
 213parse_failed:
 214        if (ret == -EINVAL)
 215                printk(KERN_ERR "kAFS: bad VL server IP address\n");
 216error:
 217        kfree(cell->name);
 218        kfree(cell);
 219        _leave(" = %d", ret);
 220        return ERR_PTR(ret);
 221}
 222
 223/*
 224 * afs_lookup_cell - Look up or create a cell record.
 225 * @net:        The network namespace
 226 * @name:       The name of the cell.
 227 * @namesz:     The strlen of the cell name.
 228 * @vllist:     A colon/comma separated list of numeric IP addresses or NULL.
 229 * @excl:       T if an error should be given if the cell name already exists.
 230 *
 231 * Look up a cell record by name and query the DNS for VL server addresses if
 232 * needed.  Note that that actual DNS query is punted off to the manager thread
 233 * so that this function can return immediately if interrupted whilst allowing
 234 * cell records to be shared even if not yet fully constructed.
 235 */
 236struct afs_cell *afs_lookup_cell(struct afs_net *net,
 237                                 const char *name, unsigned int namesz,
 238                                 const char *vllist, bool excl)
 239{
 240        struct afs_cell *cell, *candidate, *cursor;
 241        struct rb_node *parent, **pp;
 242        enum afs_cell_state state;
 243        int ret, n;
 244
 245        _enter("%s,%s", name, vllist);
 246
 247        if (!excl) {
 248                rcu_read_lock();
 249                cell = afs_lookup_cell_rcu(net, name, namesz);
 250                rcu_read_unlock();
 251                if (!IS_ERR(cell))
 252                        goto wait_for_cell;
 253        }
 254
 255        /* Assume we're probably going to create a cell and preallocate and
 256         * mostly set up a candidate record.  We can then use this to stash the
 257         * name, the net namespace and VL server addresses.
 258         *
 259         * We also want to do this before we hold any locks as it may involve
 260         * upcalling to userspace to make DNS queries.
 261         */
 262        candidate = afs_alloc_cell(net, name, namesz, vllist);
 263        if (IS_ERR(candidate)) {
 264                _leave(" = %ld", PTR_ERR(candidate));
 265                return candidate;
 266        }
 267
 268        /* Find the insertion point and check to see if someone else added a
 269         * cell whilst we were allocating.
 270         */
 271        write_seqlock(&net->cells_lock);
 272
 273        pp = &net->cells.rb_node;
 274        parent = NULL;
 275        while (*pp) {
 276                parent = *pp;
 277                cursor = rb_entry(parent, struct afs_cell, net_node);
 278
 279                n = strncasecmp(cursor->name, name,
 280                                min_t(size_t, cursor->name_len, namesz));
 281                if (n == 0)
 282                        n = cursor->name_len - namesz;
 283                if (n < 0)
 284                        pp = &(*pp)->rb_left;
 285                else if (n > 0)
 286                        pp = &(*pp)->rb_right;
 287                else
 288                        goto cell_already_exists;
 289        }
 290
 291        cell = candidate;
 292        candidate = NULL;
 293        rb_link_node_rcu(&cell->net_node, parent, pp);
 294        rb_insert_color(&cell->net_node, &net->cells);
 295        atomic_inc(&net->cells_outstanding);
 296        write_sequnlock(&net->cells_lock);
 297
 298        queue_work(afs_wq, &cell->manager);
 299
 300wait_for_cell:
 301        _debug("wait_for_cell");
 302        wait_var_event(&cell->state,
 303                       ({
 304                               state = smp_load_acquire(&cell->state); /* vs error */
 305                               state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
 306                       }));
 307
 308        /* Check the state obtained from the wait check. */
 309        if (state == AFS_CELL_FAILED) {
 310                ret = cell->error;
 311                goto error;
 312        }
 313
 314        _leave(" = %p [cell]", cell);
 315        return cell;
 316
 317cell_already_exists:
 318        _debug("cell exists");
 319        cell = cursor;
 320        if (excl) {
 321                ret = -EEXIST;
 322        } else {
 323                afs_get_cell(cursor);
 324                ret = 0;
 325        }
 326        write_sequnlock(&net->cells_lock);
 327        kfree(candidate);
 328        if (ret == 0)
 329                goto wait_for_cell;
 330        goto error_noput;
 331error:
 332        afs_put_cell(net, cell);
 333error_noput:
 334        _leave(" = %d [error]", ret);
 335        return ERR_PTR(ret);
 336}
 337
 338/*
 339 * set the root cell information
 340 * - can be called with a module parameter string
 341 * - can be called from a write to /proc/fs/afs/rootcell
 342 */
 343int afs_cell_init(struct afs_net *net, const char *rootcell)
 344{
 345        struct afs_cell *old_root, *new_root;
 346        const char *cp, *vllist;
 347        size_t len;
 348
 349        _enter("");
 350
 351        if (!rootcell) {
 352                /* module is loaded with no parameters, or built statically.
 353                 * - in the future we might initialize cell DB here.
 354                 */
 355                _leave(" = 0 [no root]");
 356                return 0;
 357        }
 358
 359        cp = strchr(rootcell, ':');
 360        if (!cp) {
 361                _debug("kAFS: no VL server IP addresses specified");
 362                vllist = NULL;
 363                len = strlen(rootcell);
 364        } else {
 365                vllist = cp + 1;
 366                len = cp - rootcell;
 367        }
 368
 369        /* allocate a cell record for the root cell */
 370        new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
 371        if (IS_ERR(new_root)) {
 372                _leave(" = %ld", PTR_ERR(new_root));
 373                return PTR_ERR(new_root);
 374        }
 375
 376        if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
 377                afs_get_cell(new_root);
 378
 379        /* install the new cell */
 380        write_seqlock(&net->cells_lock);
 381        old_root = rcu_access_pointer(net->ws_cell);
 382        rcu_assign_pointer(net->ws_cell, new_root);
 383        write_sequnlock(&net->cells_lock);
 384
 385        afs_put_cell(net, old_root);
 386        _leave(" = 0");
 387        return 0;
 388}
 389
 390/*
 391 * Update a cell's VL server address list from the DNS.
 392 */
 393static int afs_update_cell(struct afs_cell *cell)
 394{
 395        struct afs_vlserver_list *vllist, *old = NULL, *p;
 396        unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
 397        unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
 398        time64_t now, expiry = 0;
 399        int ret = 0;
 400
 401        _enter("%s", cell->name);
 402
 403        vllist = afs_dns_query(cell, &expiry);
 404        if (IS_ERR(vllist)) {
 405                ret = PTR_ERR(vllist);
 406
 407                _debug("%s: fail %d", cell->name, ret);
 408                if (ret == -ENOMEM)
 409                        goto out_wake;
 410
 411                ret = -ENOMEM;
 412                vllist = afs_alloc_vlserver_list(0);
 413                if (!vllist)
 414                        goto out_wake;
 415
 416                switch (ret) {
 417                case -ENODATA:
 418                case -EDESTADDRREQ:
 419                        vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
 420                        break;
 421                case -EAGAIN:
 422                case -ECONNREFUSED:
 423                        vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
 424                        break;
 425                default:
 426                        vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
 427                        break;
 428                }
 429        }
 430
 431        _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
 432        cell->dns_status = vllist->status;
 433
 434        now = ktime_get_real_seconds();
 435        if (min_ttl > max_ttl)
 436                max_ttl = min_ttl;
 437        if (expiry < now + min_ttl)
 438                expiry = now + min_ttl;
 439        else if (expiry > now + max_ttl)
 440                expiry = now + max_ttl;
 441
 442        _debug("%s: status %d", cell->name, vllist->status);
 443        if (vllist->source == DNS_RECORD_UNAVAILABLE) {
 444                switch (vllist->status) {
 445                case DNS_LOOKUP_GOT_NOT_FOUND:
 446                        /* The DNS said that the cell does not exist or there
 447                         * weren't any addresses to be had.
 448                         */
 449                        cell->dns_expiry = expiry;
 450                        break;
 451
 452                case DNS_LOOKUP_BAD:
 453                case DNS_LOOKUP_GOT_LOCAL_FAILURE:
 454                case DNS_LOOKUP_GOT_TEMP_FAILURE:
 455                case DNS_LOOKUP_GOT_NS_FAILURE:
 456                default:
 457                        cell->dns_expiry = now + 10;
 458                        break;
 459                }
 460        } else {
 461                cell->dns_expiry = expiry;
 462        }
 463
 464        /* Replace the VL server list if the new record has servers or the old
 465         * record doesn't.
 466         */
 467        write_lock(&cell->vl_servers_lock);
 468        p = rcu_dereference_protected(cell->vl_servers, true);
 469        if (vllist->nr_servers > 0 || p->nr_servers == 0) {
 470                rcu_assign_pointer(cell->vl_servers, vllist);
 471                cell->dns_source = vllist->source;
 472                old = p;
 473        }
 474        write_unlock(&cell->vl_servers_lock);
 475        afs_put_vlserverlist(cell->net, old);
 476
 477out_wake:
 478        smp_store_release(&cell->dns_lookup_count,
 479                          cell->dns_lookup_count + 1); /* vs source/status */
 480        wake_up_var(&cell->dns_lookup_count);
 481        _leave(" = %d", ret);
 482        return ret;
 483}
 484
 485/*
 486 * Destroy a cell record
 487 */
 488static void afs_cell_destroy(struct rcu_head *rcu)
 489{
 490        struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
 491
 492        _enter("%p{%s}", cell, cell->name);
 493
 494        ASSERTCMP(atomic_read(&cell->usage), ==, 0);
 495
 496        afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
 497        afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
 498        afs_put_cell(cell->net, cell->alias_of);
 499        key_put(cell->anonymous_key);
 500        kfree(cell->name);
 501        kfree(cell);
 502
 503        _leave(" [destroyed]");
 504}
 505
 506/*
 507 * Queue the cell manager.
 508 */
 509static void afs_queue_cell_manager(struct afs_net *net)
 510{
 511        int outstanding = atomic_inc_return(&net->cells_outstanding);
 512
 513        _enter("%d", outstanding);
 514
 515        if (!queue_work(afs_wq, &net->cells_manager))
 516                afs_dec_cells_outstanding(net);
 517}
 518
 519/*
 520 * Cell management timer.  We have an increment on cells_outstanding that we
 521 * need to pass along to the work item.
 522 */
 523void afs_cells_timer(struct timer_list *timer)
 524{
 525        struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
 526
 527        _enter("");
 528        if (!queue_work(afs_wq, &net->cells_manager))
 529                afs_dec_cells_outstanding(net);
 530}
 531
 532/*
 533 * Get a reference on a cell record.
 534 */
 535struct afs_cell *afs_get_cell(struct afs_cell *cell)
 536{
 537        atomic_inc(&cell->usage);
 538        return cell;
 539}
 540
 541/*
 542 * Drop a reference on a cell record.
 543 */
 544void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
 545{
 546        time64_t now, expire_delay;
 547
 548        if (!cell)
 549                return;
 550
 551        _enter("%s", cell->name);
 552
 553        now = ktime_get_real_seconds();
 554        cell->last_inactive = now;
 555        expire_delay = 0;
 556        if (cell->vl_servers->nr_servers)
 557                expire_delay = afs_cell_gc_delay;
 558
 559        if (atomic_dec_return(&cell->usage) > 1)
 560                return;
 561
 562        /* 'cell' may now be garbage collected. */
 563        afs_set_cell_timer(net, expire_delay);
 564}
 565
 566/*
 567 * Allocate a key to use as a placeholder for anonymous user security.
 568 */
 569static int afs_alloc_anon_key(struct afs_cell *cell)
 570{
 571        struct key *key;
 572        char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
 573
 574        /* Create a key to represent an anonymous user. */
 575        memcpy(keyname, "afs@", 4);
 576        dp = keyname + 4;
 577        cp = cell->name;
 578        do {
 579                *dp++ = tolower(*cp);
 580        } while (*cp++);
 581
 582        key = rxrpc_get_null_key(keyname);
 583        if (IS_ERR(key))
 584                return PTR_ERR(key);
 585
 586        cell->anonymous_key = key;
 587
 588        _debug("anon key %p{%x}",
 589               cell->anonymous_key, key_serial(cell->anonymous_key));
 590        return 0;
 591}
 592
 593/*
 594 * Activate a cell.
 595 */
 596static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
 597{
 598        struct hlist_node **p;
 599        struct afs_cell *pcell;
 600        int ret;
 601
 602        if (!cell->anonymous_key) {
 603                ret = afs_alloc_anon_key(cell);
 604                if (ret < 0)
 605                        return ret;
 606        }
 607
 608#ifdef CONFIG_AFS_FSCACHE
 609        cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
 610                                             &afs_cell_cache_index_def,
 611                                             cell->name, strlen(cell->name),
 612                                             NULL, 0,
 613                                             cell, 0, true);
 614#endif
 615        ret = afs_proc_cell_setup(cell);
 616        if (ret < 0)
 617                return ret;
 618
 619        mutex_lock(&net->proc_cells_lock);
 620        for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
 621                pcell = hlist_entry(*p, struct afs_cell, proc_link);
 622                if (strcmp(cell->name, pcell->name) < 0)
 623                        break;
 624        }
 625
 626        cell->proc_link.pprev = p;
 627        cell->proc_link.next = *p;
 628        rcu_assign_pointer(*p, &cell->proc_link.next);
 629        if (cell->proc_link.next)
 630                cell->proc_link.next->pprev = &cell->proc_link.next;
 631
 632        afs_dynroot_mkdir(net, cell);
 633        mutex_unlock(&net->proc_cells_lock);
 634        return 0;
 635}
 636
 637/*
 638 * Deactivate a cell.
 639 */
 640static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
 641{
 642        _enter("%s", cell->name);
 643
 644        afs_proc_cell_remove(cell);
 645
 646        mutex_lock(&net->proc_cells_lock);
 647        hlist_del_rcu(&cell->proc_link);
 648        afs_dynroot_rmdir(net, cell);
 649        mutex_unlock(&net->proc_cells_lock);
 650
 651#ifdef CONFIG_AFS_FSCACHE
 652        fscache_relinquish_cookie(cell->cache, NULL, false);
 653        cell->cache = NULL;
 654#endif
 655
 656        _leave("");
 657}
 658
 659/*
 660 * Manage a cell record, initialising and destroying it, maintaining its DNS
 661 * records.
 662 */
 663static void afs_manage_cell(struct work_struct *work)
 664{
 665        struct afs_cell *cell = container_of(work, struct afs_cell, manager);
 666        struct afs_net *net = cell->net;
 667        bool deleted;
 668        int ret, usage;
 669
 670        _enter("%s", cell->name);
 671
 672again:
 673        _debug("state %u", cell->state);
 674        switch (cell->state) {
 675        case AFS_CELL_INACTIVE:
 676        case AFS_CELL_FAILED:
 677                write_seqlock(&net->cells_lock);
 678                usage = 1;
 679                deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
 680                if (deleted)
 681                        rb_erase(&cell->net_node, &net->cells);
 682                write_sequnlock(&net->cells_lock);
 683                if (deleted)
 684                        goto final_destruction;
 685                if (cell->state == AFS_CELL_FAILED)
 686                        goto done;
 687                smp_store_release(&cell->state, AFS_CELL_UNSET);
 688                wake_up_var(&cell->state);
 689                goto again;
 690
 691        case AFS_CELL_UNSET:
 692                smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
 693                wake_up_var(&cell->state);
 694                goto again;
 695
 696        case AFS_CELL_ACTIVATING:
 697                ret = afs_activate_cell(net, cell);
 698                if (ret < 0)
 699                        goto activation_failed;
 700
 701                smp_store_release(&cell->state, AFS_CELL_ACTIVE);
 702                wake_up_var(&cell->state);
 703                goto again;
 704
 705        case AFS_CELL_ACTIVE:
 706                if (atomic_read(&cell->usage) > 1) {
 707                        if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
 708                                ret = afs_update_cell(cell);
 709                                if (ret < 0)
 710                                        cell->error = ret;
 711                        }
 712                        goto done;
 713                }
 714                smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
 715                wake_up_var(&cell->state);
 716                goto again;
 717
 718        case AFS_CELL_DEACTIVATING:
 719                if (atomic_read(&cell->usage) > 1)
 720                        goto reverse_deactivation;
 721                afs_deactivate_cell(net, cell);
 722                smp_store_release(&cell->state, AFS_CELL_INACTIVE);
 723                wake_up_var(&cell->state);
 724                goto again;
 725
 726        default:
 727                break;
 728        }
 729        _debug("bad state %u", cell->state);
 730        BUG(); /* Unhandled state */
 731
 732activation_failed:
 733        cell->error = ret;
 734        afs_deactivate_cell(net, cell);
 735
 736        smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
 737        wake_up_var(&cell->state);
 738        goto again;
 739
 740reverse_deactivation:
 741        smp_store_release(&cell->state, AFS_CELL_ACTIVE);
 742        wake_up_var(&cell->state);
 743        _leave(" [deact->act]");
 744        return;
 745
 746done:
 747        _leave(" [done %u]", cell->state);
 748        return;
 749
 750final_destruction:
 751        call_rcu(&cell->rcu, afs_cell_destroy);
 752        afs_dec_cells_outstanding(net);
 753        _leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
 754}
 755
 756/*
 757 * Manage the records of cells known to a network namespace.  This includes
 758 * updating the DNS records and garbage collecting unused cells that were
 759 * automatically added.
 760 *
 761 * Note that constructed cell records may only be removed from net->cells by
 762 * this work item, so it is safe for this work item to stash a cursor pointing
 763 * into the tree and then return to caller (provided it skips cells that are
 764 * still under construction).
 765 *
 766 * Note also that we were given an increment on net->cells_outstanding by
 767 * whoever queued us that we need to deal with before returning.
 768 */
 769void afs_manage_cells(struct work_struct *work)
 770{
 771        struct afs_net *net = container_of(work, struct afs_net, cells_manager);
 772        struct rb_node *cursor;
 773        time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
 774        bool purging = !net->live;
 775
 776        _enter("");
 777
 778        /* Trawl the cell database looking for cells that have expired from
 779         * lack of use and cells whose DNS results have expired and dispatch
 780         * their managers.
 781         */
 782        read_seqlock_excl(&net->cells_lock);
 783
 784        for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
 785                struct afs_cell *cell =
 786                        rb_entry(cursor, struct afs_cell, net_node);
 787                unsigned usage;
 788                bool sched_cell = false;
 789
 790                usage = atomic_read(&cell->usage);
 791                _debug("manage %s %u", cell->name, usage);
 792
 793                ASSERTCMP(usage, >=, 1);
 794
 795                if (purging) {
 796                        if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
 797                                usage = atomic_dec_return(&cell->usage);
 798                        ASSERTCMP(usage, ==, 1);
 799                }
 800
 801                if (usage == 1) {
 802                        struct afs_vlserver_list *vllist;
 803                        time64_t expire_at = cell->last_inactive;
 804
 805                        read_lock(&cell->vl_servers_lock);
 806                        vllist = rcu_dereference_protected(
 807                                cell->vl_servers,
 808                                lockdep_is_held(&cell->vl_servers_lock));
 809                        if (vllist->nr_servers > 0)
 810                                expire_at += afs_cell_gc_delay;
 811                        read_unlock(&cell->vl_servers_lock);
 812                        if (purging || expire_at <= now)
 813                                sched_cell = true;
 814                        else if (expire_at < next_manage)
 815                                next_manage = expire_at;
 816                }
 817
 818                if (!purging) {
 819                        if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
 820                                sched_cell = true;
 821                }
 822
 823                if (sched_cell)
 824                        queue_work(afs_wq, &cell->manager);
 825        }
 826
 827        read_sequnlock_excl(&net->cells_lock);
 828
 829        /* Update the timer on the way out.  We have to pass an increment on
 830         * cells_outstanding in the namespace that we are in to the timer or
 831         * the work scheduler.
 832         */
 833        if (!purging && next_manage < TIME64_MAX) {
 834                now = ktime_get_real_seconds();
 835
 836                if (next_manage - now <= 0) {
 837                        if (queue_work(afs_wq, &net->cells_manager))
 838                                atomic_inc(&net->cells_outstanding);
 839                } else {
 840                        afs_set_cell_timer(net, next_manage - now);
 841                }
 842        }
 843
 844        afs_dec_cells_outstanding(net);
 845        _leave(" [%d]", atomic_read(&net->cells_outstanding));
 846}
 847
 848/*
 849 * Purge in-memory cell database.
 850 */
 851void afs_cell_purge(struct afs_net *net)
 852{
 853        struct afs_cell *ws;
 854
 855        _enter("");
 856
 857        write_seqlock(&net->cells_lock);
 858        ws = rcu_access_pointer(net->ws_cell);
 859        RCU_INIT_POINTER(net->ws_cell, NULL);
 860        write_sequnlock(&net->cells_lock);
 861        afs_put_cell(net, ws);
 862
 863        _debug("del timer");
 864        if (del_timer_sync(&net->cells_timer))
 865                atomic_dec(&net->cells_outstanding);
 866
 867        _debug("kick mgr");
 868        afs_queue_cell_manager(net);
 869
 870        _debug("wait");
 871        wait_var_event(&net->cells_outstanding,
 872                       !atomic_read(&net->cells_outstanding));
 873        _leave("");
 874}
 875