linux/kernel/audit_tree.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include "audit.h"
   3#include <linux/fsnotify_backend.h>
   4#include <linux/namei.h>
   5#include <linux/mount.h>
   6#include <linux/kthread.h>
   7#include <linux/refcount.h>
   8#include <linux/slab.h>
   9
  10struct audit_tree;
  11struct audit_chunk;
  12
  13struct audit_tree {
  14        refcount_t count;
  15        int goner;
  16        struct audit_chunk *root;
  17        struct list_head chunks;
  18        struct list_head rules;
  19        struct list_head list;
  20        struct list_head same_root;
  21        struct rcu_head head;
  22        char pathname[];
  23};
  24
  25struct audit_chunk {
  26        struct list_head hash;
  27        unsigned long key;
  28        struct fsnotify_mark *mark;
  29        struct list_head trees;         /* with root here */
  30        int count;
  31        atomic_long_t refs;
  32        struct rcu_head head;
  33        struct node {
  34                struct list_head list;
  35                struct audit_tree *owner;
  36                unsigned index;         /* index; upper bit indicates 'will prune' */
  37        } owners[];
  38};
  39
  40struct audit_tree_mark {
  41        struct fsnotify_mark mark;
  42        struct audit_chunk *chunk;
  43};
  44
  45static LIST_HEAD(tree_list);
  46static LIST_HEAD(prune_list);
  47static struct task_struct *prune_thread;
  48
  49/*
  50 * One struct chunk is attached to each inode of interest through
  51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
  52 * untagging, the mark is stable as long as there is chunk attached. The
  53 * association between mark and chunk is protected by hash_lock and
  54 * audit_tree_group->mark_mutex. Thus as long as we hold
  55 * audit_tree_group->mark_mutex and check that the mark is alive by
  56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
  57 * the current chunk.
  58 *
  59 * Rules have pointer to struct audit_tree.
  60 * Rules have struct list_head rlist forming a list of rules over
  61 * the same tree.
  62 * References to struct chunk are collected at audit_inode{,_child}()
  63 * time and used in AUDIT_TREE rule matching.
  64 * These references are dropped at the same time we are calling
  65 * audit_free_names(), etc.
  66 *
  67 * Cyclic lists galore:
  68 * tree.chunks anchors chunk.owners[].list                      hash_lock
  69 * tree.rules anchors rule.rlist                                audit_filter_mutex
  70 * chunk.trees anchors tree.same_root                           hash_lock
  71 * chunk.hash is a hash with middle bits of watch.inode as
  72 * a hash function.                                             RCU, hash_lock
  73 *
  74 * tree is refcounted; one reference for "some rules on rules_list refer to
  75 * it", one for each chunk with pointer to it.
  76 *
  77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
  78 * one chunk reference. This reference is dropped either when a mark is going
  79 * to be freed (corresponding inode goes away) or when chunk attached to the
  80 * mark gets replaced. This reference must be dropped using
  81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
  82 * grace period as it protects RCU readers of the hash table.
  83 *
  84 * node.index allows to get from node.list to containing chunk.
  85 * MSB of that sucker is stolen to mark taggings that we might have to
  86 * revert - several operations have very unpleasant cleanup logics and
  87 * that makes a difference.  Some.
  88 */
  89
  90static struct fsnotify_group *audit_tree_group;
  91static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
  92
  93static struct audit_tree *alloc_tree(const char *s)
  94{
  95        struct audit_tree *tree;
  96
  97        tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  98        if (tree) {
  99                refcount_set(&tree->count, 1);
 100                tree->goner = 0;
 101                INIT_LIST_HEAD(&tree->chunks);
 102                INIT_LIST_HEAD(&tree->rules);
 103                INIT_LIST_HEAD(&tree->list);
 104                INIT_LIST_HEAD(&tree->same_root);
 105                tree->root = NULL;
 106                strcpy(tree->pathname, s);
 107        }
 108        return tree;
 109}
 110
 111static inline void get_tree(struct audit_tree *tree)
 112{
 113        refcount_inc(&tree->count);
 114}
 115
 116static inline void put_tree(struct audit_tree *tree)
 117{
 118        if (refcount_dec_and_test(&tree->count))
 119                kfree_rcu(tree, head);
 120}
 121
 122/* to avoid bringing the entire thing in audit.h */
 123const char *audit_tree_path(struct audit_tree *tree)
 124{
 125        return tree->pathname;
 126}
 127
 128static void free_chunk(struct audit_chunk *chunk)
 129{
 130        int i;
 131
 132        for (i = 0; i < chunk->count; i++) {
 133                if (chunk->owners[i].owner)
 134                        put_tree(chunk->owners[i].owner);
 135        }
 136        kfree(chunk);
 137}
 138
 139void audit_put_chunk(struct audit_chunk *chunk)
 140{
 141        if (atomic_long_dec_and_test(&chunk->refs))
 142                free_chunk(chunk);
 143}
 144
 145static void __put_chunk(struct rcu_head *rcu)
 146{
 147        struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
 148        audit_put_chunk(chunk);
 149}
 150
 151/*
 152 * Drop reference to the chunk that was held by the mark. This is the reference
 153 * that gets dropped after we've removed the chunk from the hash table and we
 154 * use it to make sure chunk cannot be freed before RCU grace period expires.
 155 */
 156static void audit_mark_put_chunk(struct audit_chunk *chunk)
 157{
 158        call_rcu(&chunk->head, __put_chunk);
 159}
 160
 161static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
 162{
 163        return container_of(mark, struct audit_tree_mark, mark);
 164}
 165
 166static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
 167{
 168        return audit_mark(mark)->chunk;
 169}
 170
 171static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
 172{
 173        kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
 174}
 175
 176static struct fsnotify_mark *alloc_mark(void)
 177{
 178        struct audit_tree_mark *amark;
 179
 180        amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
 181        if (!amark)
 182                return NULL;
 183        fsnotify_init_mark(&amark->mark, audit_tree_group);
 184        amark->mark.mask = FS_IN_IGNORED;
 185        return &amark->mark;
 186}
 187
 188static struct audit_chunk *alloc_chunk(int count)
 189{
 190        struct audit_chunk *chunk;
 191        int i;
 192
 193        chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL);
 194        if (!chunk)
 195                return NULL;
 196
 197        INIT_LIST_HEAD(&chunk->hash);
 198        INIT_LIST_HEAD(&chunk->trees);
 199        chunk->count = count;
 200        atomic_long_set(&chunk->refs, 1);
 201        for (i = 0; i < count; i++) {
 202                INIT_LIST_HEAD(&chunk->owners[i].list);
 203                chunk->owners[i].index = i;
 204        }
 205        return chunk;
 206}
 207
 208enum {HASH_SIZE = 128};
 209static struct list_head chunk_hash_heads[HASH_SIZE];
 210static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
 211
 212/* Function to return search key in our hash from inode. */
 213static unsigned long inode_to_key(const struct inode *inode)
 214{
 215        /* Use address pointed to by connector->obj as the key */
 216        return (unsigned long)&inode->i_fsnotify_marks;
 217}
 218
 219static inline struct list_head *chunk_hash(unsigned long key)
 220{
 221        unsigned long n = key / L1_CACHE_BYTES;
 222        return chunk_hash_heads + n % HASH_SIZE;
 223}
 224
 225/* hash_lock & mark->group->mark_mutex is held by caller */
 226static void insert_hash(struct audit_chunk *chunk)
 227{
 228        struct list_head *list;
 229
 230        /*
 231         * Make sure chunk is fully initialized before making it visible in the
 232         * hash. Pairs with a data dependency barrier in READ_ONCE() in
 233         * audit_tree_lookup().
 234         */
 235        smp_wmb();
 236        WARN_ON_ONCE(!chunk->key);
 237        list = chunk_hash(chunk->key);
 238        list_add_rcu(&chunk->hash, list);
 239}
 240
 241/* called under rcu_read_lock */
 242struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 243{
 244        unsigned long key = inode_to_key(inode);
 245        struct list_head *list = chunk_hash(key);
 246        struct audit_chunk *p;
 247
 248        list_for_each_entry_rcu(p, list, hash) {
 249                /*
 250                 * We use a data dependency barrier in READ_ONCE() to make sure
 251                 * the chunk we see is fully initialized.
 252                 */
 253                if (READ_ONCE(p->key) == key) {
 254                        atomic_long_inc(&p->refs);
 255                        return p;
 256                }
 257        }
 258        return NULL;
 259}
 260
 261bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
 262{
 263        int n;
 264        for (n = 0; n < chunk->count; n++)
 265                if (chunk->owners[n].owner == tree)
 266                        return true;
 267        return false;
 268}
 269
 270/* tagging and untagging inodes with trees */
 271
 272static struct audit_chunk *find_chunk(struct node *p)
 273{
 274        int index = p->index & ~(1U<<31);
 275        p -= index;
 276        return container_of(p, struct audit_chunk, owners[0]);
 277}
 278
 279static void replace_mark_chunk(struct fsnotify_mark *mark,
 280                               struct audit_chunk *chunk)
 281{
 282        struct audit_chunk *old;
 283
 284        assert_spin_locked(&hash_lock);
 285        old = mark_chunk(mark);
 286        audit_mark(mark)->chunk = chunk;
 287        if (chunk)
 288                chunk->mark = mark;
 289        if (old)
 290                old->mark = NULL;
 291}
 292
 293static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
 294{
 295        struct audit_tree *owner;
 296        int i, j;
 297
 298        new->key = old->key;
 299        list_splice_init(&old->trees, &new->trees);
 300        list_for_each_entry(owner, &new->trees, same_root)
 301                owner->root = new;
 302        for (i = j = 0; j < old->count; i++, j++) {
 303                if (!old->owners[j].owner) {
 304                        i--;
 305                        continue;
 306                }
 307                owner = old->owners[j].owner;
 308                new->owners[i].owner = owner;
 309                new->owners[i].index = old->owners[j].index - j + i;
 310                if (!owner) /* result of earlier fallback */
 311                        continue;
 312                get_tree(owner);
 313                list_replace_init(&old->owners[j].list, &new->owners[i].list);
 314        }
 315        replace_mark_chunk(old->mark, new);
 316        /*
 317         * Make sure chunk is fully initialized before making it visible in the
 318         * hash. Pairs with a data dependency barrier in READ_ONCE() in
 319         * audit_tree_lookup().
 320         */
 321        smp_wmb();
 322        list_replace_rcu(&old->hash, &new->hash);
 323}
 324
 325static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
 326{
 327        struct audit_tree *owner = p->owner;
 328
 329        if (owner->root == chunk) {
 330                list_del_init(&owner->same_root);
 331                owner->root = NULL;
 332        }
 333        list_del_init(&p->list);
 334        p->owner = NULL;
 335        put_tree(owner);
 336}
 337
 338static int chunk_count_trees(struct audit_chunk *chunk)
 339{
 340        int i;
 341        int ret = 0;
 342
 343        for (i = 0; i < chunk->count; i++)
 344                if (chunk->owners[i].owner)
 345                        ret++;
 346        return ret;
 347}
 348
 349static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
 350{
 351        struct audit_chunk *new;
 352        int size;
 353
 354        mutex_lock(&audit_tree_group->mark_mutex);
 355        /*
 356         * mark_mutex stabilizes chunk attached to the mark so we can check
 357         * whether it didn't change while we've dropped hash_lock.
 358         */
 359        if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
 360            mark_chunk(mark) != chunk)
 361                goto out_mutex;
 362
 363        size = chunk_count_trees(chunk);
 364        if (!size) {
 365                spin_lock(&hash_lock);
 366                list_del_init(&chunk->trees);
 367                list_del_rcu(&chunk->hash);
 368                replace_mark_chunk(mark, NULL);
 369                spin_unlock(&hash_lock);
 370                fsnotify_detach_mark(mark);
 371                mutex_unlock(&audit_tree_group->mark_mutex);
 372                audit_mark_put_chunk(chunk);
 373                fsnotify_free_mark(mark);
 374                return;
 375        }
 376
 377        new = alloc_chunk(size);
 378        if (!new)
 379                goto out_mutex;
 380
 381        spin_lock(&hash_lock);
 382        /*
 383         * This has to go last when updating chunk as once replace_chunk() is
 384         * called, new RCU readers can see the new chunk.
 385         */
 386        replace_chunk(new, chunk);
 387        spin_unlock(&hash_lock);
 388        mutex_unlock(&audit_tree_group->mark_mutex);
 389        audit_mark_put_chunk(chunk);
 390        return;
 391
 392out_mutex:
 393        mutex_unlock(&audit_tree_group->mark_mutex);
 394}
 395
 396/* Call with group->mark_mutex held, releases it */
 397static int create_chunk(struct inode *inode, struct audit_tree *tree)
 398{
 399        struct fsnotify_mark *mark;
 400        struct audit_chunk *chunk = alloc_chunk(1);
 401
 402        if (!chunk) {
 403                mutex_unlock(&audit_tree_group->mark_mutex);
 404                return -ENOMEM;
 405        }
 406
 407        mark = alloc_mark();
 408        if (!mark) {
 409                mutex_unlock(&audit_tree_group->mark_mutex);
 410                kfree(chunk);
 411                return -ENOMEM;
 412        }
 413
 414        if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
 415                mutex_unlock(&audit_tree_group->mark_mutex);
 416                fsnotify_put_mark(mark);
 417                kfree(chunk);
 418                return -ENOSPC;
 419        }
 420
 421        spin_lock(&hash_lock);
 422        if (tree->goner) {
 423                spin_unlock(&hash_lock);
 424                fsnotify_detach_mark(mark);
 425                mutex_unlock(&audit_tree_group->mark_mutex);
 426                fsnotify_free_mark(mark);
 427                fsnotify_put_mark(mark);
 428                kfree(chunk);
 429                return 0;
 430        }
 431        replace_mark_chunk(mark, chunk);
 432        chunk->owners[0].index = (1U << 31);
 433        chunk->owners[0].owner = tree;
 434        get_tree(tree);
 435        list_add(&chunk->owners[0].list, &tree->chunks);
 436        if (!tree->root) {
 437                tree->root = chunk;
 438                list_add(&tree->same_root, &chunk->trees);
 439        }
 440        chunk->key = inode_to_key(inode);
 441        /*
 442         * Inserting into the hash table has to go last as once we do that RCU
 443         * readers can see the chunk.
 444         */
 445        insert_hash(chunk);
 446        spin_unlock(&hash_lock);
 447        mutex_unlock(&audit_tree_group->mark_mutex);
 448        /*
 449         * Drop our initial reference. When mark we point to is getting freed,
 450         * we get notification through ->freeing_mark callback and cleanup
 451         * chunk pointing to this mark.
 452         */
 453        fsnotify_put_mark(mark);
 454        return 0;
 455}
 456
 457/* the first tagged inode becomes root of tree */
 458static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 459{
 460        struct fsnotify_mark *mark;
 461        struct audit_chunk *chunk, *old;
 462        struct node *p;
 463        int n;
 464
 465        mutex_lock(&audit_tree_group->mark_mutex);
 466        mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
 467        if (!mark)
 468                return create_chunk(inode, tree);
 469
 470        /*
 471         * Found mark is guaranteed to be attached and mark_mutex protects mark
 472         * from getting detached and thus it makes sure there is chunk attached
 473         * to the mark.
 474         */
 475        /* are we already there? */
 476        spin_lock(&hash_lock);
 477        old = mark_chunk(mark);
 478        for (n = 0; n < old->count; n++) {
 479                if (old->owners[n].owner == tree) {
 480                        spin_unlock(&hash_lock);
 481                        mutex_unlock(&audit_tree_group->mark_mutex);
 482                        fsnotify_put_mark(mark);
 483                        return 0;
 484                }
 485        }
 486        spin_unlock(&hash_lock);
 487
 488        chunk = alloc_chunk(old->count + 1);
 489        if (!chunk) {
 490                mutex_unlock(&audit_tree_group->mark_mutex);
 491                fsnotify_put_mark(mark);
 492                return -ENOMEM;
 493        }
 494
 495        spin_lock(&hash_lock);
 496        if (tree->goner) {
 497                spin_unlock(&hash_lock);
 498                mutex_unlock(&audit_tree_group->mark_mutex);
 499                fsnotify_put_mark(mark);
 500                kfree(chunk);
 501                return 0;
 502        }
 503        p = &chunk->owners[chunk->count - 1];
 504        p->index = (chunk->count - 1) | (1U<<31);
 505        p->owner = tree;
 506        get_tree(tree);
 507        list_add(&p->list, &tree->chunks);
 508        if (!tree->root) {
 509                tree->root = chunk;
 510                list_add(&tree->same_root, &chunk->trees);
 511        }
 512        /*
 513         * This has to go last when updating chunk as once replace_chunk() is
 514         * called, new RCU readers can see the new chunk.
 515         */
 516        replace_chunk(chunk, old);
 517        spin_unlock(&hash_lock);
 518        mutex_unlock(&audit_tree_group->mark_mutex);
 519        fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
 520        audit_mark_put_chunk(old);
 521
 522        return 0;
 523}
 524
 525static void audit_tree_log_remove_rule(struct audit_context *context,
 526                                       struct audit_krule *rule)
 527{
 528        struct audit_buffer *ab;
 529
 530        if (!audit_enabled)
 531                return;
 532        ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 533        if (unlikely(!ab))
 534                return;
 535        audit_log_format(ab, "op=remove_rule dir=");
 536        audit_log_untrustedstring(ab, rule->tree->pathname);
 537        audit_log_key(ab, rule->filterkey);
 538        audit_log_format(ab, " list=%d res=1", rule->listnr);
 539        audit_log_end(ab);
 540}
 541
 542static void kill_rules(struct audit_context *context, struct audit_tree *tree)
 543{
 544        struct audit_krule *rule, *next;
 545        struct audit_entry *entry;
 546
 547        list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
 548                entry = container_of(rule, struct audit_entry, rule);
 549
 550                list_del_init(&rule->rlist);
 551                if (rule->tree) {
 552                        /* not a half-baked one */
 553                        audit_tree_log_remove_rule(context, rule);
 554                        if (entry->rule.exe)
 555                                audit_remove_mark(entry->rule.exe);
 556                        rule->tree = NULL;
 557                        list_del_rcu(&entry->list);
 558                        list_del(&entry->rule.list);
 559                        call_rcu(&entry->rcu, audit_free_rule_rcu);
 560                }
 561        }
 562}
 563
 564/*
 565 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
 566 * chunks. The function expects tagged chunks are all at the beginning of the
 567 * chunks list.
 568 */
 569static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
 570{
 571        spin_lock(&hash_lock);
 572        while (!list_empty(&victim->chunks)) {
 573                struct node *p;
 574                struct audit_chunk *chunk;
 575                struct fsnotify_mark *mark;
 576
 577                p = list_first_entry(&victim->chunks, struct node, list);
 578                /* have we run out of marked? */
 579                if (tagged && !(p->index & (1U<<31)))
 580                        break;
 581                chunk = find_chunk(p);
 582                mark = chunk->mark;
 583                remove_chunk_node(chunk, p);
 584                /* Racing with audit_tree_freeing_mark()? */
 585                if (!mark)
 586                        continue;
 587                fsnotify_get_mark(mark);
 588                spin_unlock(&hash_lock);
 589
 590                untag_chunk(chunk, mark);
 591                fsnotify_put_mark(mark);
 592
 593                spin_lock(&hash_lock);
 594        }
 595        spin_unlock(&hash_lock);
 596}
 597
 598/*
 599 * finish killing struct audit_tree
 600 */
 601static void prune_one(struct audit_tree *victim)
 602{
 603        prune_tree_chunks(victim, false);
 604        put_tree(victim);
 605}
 606
 607/* trim the uncommitted chunks from tree */
 608
 609static void trim_marked(struct audit_tree *tree)
 610{
 611        struct list_head *p, *q;
 612        spin_lock(&hash_lock);
 613        if (tree->goner) {
 614                spin_unlock(&hash_lock);
 615                return;
 616        }
 617        /* reorder */
 618        for (p = tree->chunks.next; p != &tree->chunks; p = q) {
 619                struct node *node = list_entry(p, struct node, list);
 620                q = p->next;
 621                if (node->index & (1U<<31)) {
 622                        list_del_init(p);
 623                        list_add(p, &tree->chunks);
 624                }
 625        }
 626        spin_unlock(&hash_lock);
 627
 628        prune_tree_chunks(tree, true);
 629
 630        spin_lock(&hash_lock);
 631        if (!tree->root && !tree->goner) {
 632                tree->goner = 1;
 633                spin_unlock(&hash_lock);
 634                mutex_lock(&audit_filter_mutex);
 635                kill_rules(audit_context(), tree);
 636                list_del_init(&tree->list);
 637                mutex_unlock(&audit_filter_mutex);
 638                prune_one(tree);
 639        } else {
 640                spin_unlock(&hash_lock);
 641        }
 642}
 643
 644static void audit_schedule_prune(void);
 645
 646/* called with audit_filter_mutex */
 647int audit_remove_tree_rule(struct audit_krule *rule)
 648{
 649        struct audit_tree *tree;
 650        tree = rule->tree;
 651        if (tree) {
 652                spin_lock(&hash_lock);
 653                list_del_init(&rule->rlist);
 654                if (list_empty(&tree->rules) && !tree->goner) {
 655                        tree->root = NULL;
 656                        list_del_init(&tree->same_root);
 657                        tree->goner = 1;
 658                        list_move(&tree->list, &prune_list);
 659                        rule->tree = NULL;
 660                        spin_unlock(&hash_lock);
 661                        audit_schedule_prune();
 662                        return 1;
 663                }
 664                rule->tree = NULL;
 665                spin_unlock(&hash_lock);
 666                return 1;
 667        }
 668        return 0;
 669}
 670
 671static int compare_root(struct vfsmount *mnt, void *arg)
 672{
 673        return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
 674               (unsigned long)arg;
 675}
 676
 677void audit_trim_trees(void)
 678{
 679        struct list_head cursor;
 680
 681        mutex_lock(&audit_filter_mutex);
 682        list_add(&cursor, &tree_list);
 683        while (cursor.next != &tree_list) {
 684                struct audit_tree *tree;
 685                struct path path;
 686                struct vfsmount *root_mnt;
 687                struct node *node;
 688                int err;
 689
 690                tree = container_of(cursor.next, struct audit_tree, list);
 691                get_tree(tree);
 692                list_move(&cursor, &tree->list);
 693                mutex_unlock(&audit_filter_mutex);
 694
 695                err = kern_path(tree->pathname, 0, &path);
 696                if (err)
 697                        goto skip_it;
 698
 699                root_mnt = collect_mounts(&path);
 700                path_put(&path);
 701                if (IS_ERR(root_mnt))
 702                        goto skip_it;
 703
 704                spin_lock(&hash_lock);
 705                list_for_each_entry(node, &tree->chunks, list) {
 706                        struct audit_chunk *chunk = find_chunk(node);
 707                        /* this could be NULL if the watch is dying else where... */
 708                        node->index |= 1U<<31;
 709                        if (iterate_mounts(compare_root,
 710                                           (void *)(chunk->key),
 711                                           root_mnt))
 712                                node->index &= ~(1U<<31);
 713                }
 714                spin_unlock(&hash_lock);
 715                trim_marked(tree);
 716                drop_collected_mounts(root_mnt);
 717skip_it:
 718                put_tree(tree);
 719                mutex_lock(&audit_filter_mutex);
 720        }
 721        list_del(&cursor);
 722        mutex_unlock(&audit_filter_mutex);
 723}
 724
 725int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 726{
 727
 728        if (pathname[0] != '/' ||
 729            rule->listnr != AUDIT_FILTER_EXIT ||
 730            op != Audit_equal ||
 731            rule->inode_f || rule->watch || rule->tree)
 732                return -EINVAL;
 733        rule->tree = alloc_tree(pathname);
 734        if (!rule->tree)
 735                return -ENOMEM;
 736        return 0;
 737}
 738
 739void audit_put_tree(struct audit_tree *tree)
 740{
 741        put_tree(tree);
 742}
 743
 744static int tag_mount(struct vfsmount *mnt, void *arg)
 745{
 746        return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
 747}
 748
 749/*
 750 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 751 * Runs from a separate thread.
 752 */
 753static int prune_tree_thread(void *unused)
 754{
 755        for (;;) {
 756                if (list_empty(&prune_list)) {
 757                        set_current_state(TASK_INTERRUPTIBLE);
 758                        schedule();
 759                }
 760
 761                audit_ctl_lock();
 762                mutex_lock(&audit_filter_mutex);
 763
 764                while (!list_empty(&prune_list)) {
 765                        struct audit_tree *victim;
 766
 767                        victim = list_entry(prune_list.next,
 768                                        struct audit_tree, list);
 769                        list_del_init(&victim->list);
 770
 771                        mutex_unlock(&audit_filter_mutex);
 772
 773                        prune_one(victim);
 774
 775                        mutex_lock(&audit_filter_mutex);
 776                }
 777
 778                mutex_unlock(&audit_filter_mutex);
 779                audit_ctl_unlock();
 780        }
 781        return 0;
 782}
 783
 784static int audit_launch_prune(void)
 785{
 786        if (prune_thread)
 787                return 0;
 788        prune_thread = kthread_run(prune_tree_thread, NULL,
 789                                "audit_prune_tree");
 790        if (IS_ERR(prune_thread)) {
 791                pr_err("cannot start thread audit_prune_tree");
 792                prune_thread = NULL;
 793                return -ENOMEM;
 794        }
 795        return 0;
 796}
 797
 798/* called with audit_filter_mutex */
 799int audit_add_tree_rule(struct audit_krule *rule)
 800{
 801        struct audit_tree *seed = rule->tree, *tree;
 802        struct path path;
 803        struct vfsmount *mnt;
 804        int err;
 805
 806        rule->tree = NULL;
 807        list_for_each_entry(tree, &tree_list, list) {
 808                if (!strcmp(seed->pathname, tree->pathname)) {
 809                        put_tree(seed);
 810                        rule->tree = tree;
 811                        list_add(&rule->rlist, &tree->rules);
 812                        return 0;
 813                }
 814        }
 815        tree = seed;
 816        list_add(&tree->list, &tree_list);
 817        list_add(&rule->rlist, &tree->rules);
 818        /* do not set rule->tree yet */
 819        mutex_unlock(&audit_filter_mutex);
 820
 821        if (unlikely(!prune_thread)) {
 822                err = audit_launch_prune();
 823                if (err)
 824                        goto Err;
 825        }
 826
 827        err = kern_path(tree->pathname, 0, &path);
 828        if (err)
 829                goto Err;
 830        mnt = collect_mounts(&path);
 831        path_put(&path);
 832        if (IS_ERR(mnt)) {
 833                err = PTR_ERR(mnt);
 834                goto Err;
 835        }
 836
 837        get_tree(tree);
 838        err = iterate_mounts(tag_mount, tree, mnt);
 839        drop_collected_mounts(mnt);
 840
 841        if (!err) {
 842                struct node *node;
 843                spin_lock(&hash_lock);
 844                list_for_each_entry(node, &tree->chunks, list)
 845                        node->index &= ~(1U<<31);
 846                spin_unlock(&hash_lock);
 847        } else {
 848                trim_marked(tree);
 849                goto Err;
 850        }
 851
 852        mutex_lock(&audit_filter_mutex);
 853        if (list_empty(&rule->rlist)) {
 854                put_tree(tree);
 855                return -ENOENT;
 856        }
 857        rule->tree = tree;
 858        put_tree(tree);
 859
 860        return 0;
 861Err:
 862        mutex_lock(&audit_filter_mutex);
 863        list_del_init(&tree->list);
 864        list_del_init(&tree->rules);
 865        put_tree(tree);
 866        return err;
 867}
 868
 869int audit_tag_tree(char *old, char *new)
 870{
 871        struct list_head cursor, barrier;
 872        int failed = 0;
 873        struct path path1, path2;
 874        struct vfsmount *tagged;
 875        int err;
 876
 877        err = kern_path(new, 0, &path2);
 878        if (err)
 879                return err;
 880        tagged = collect_mounts(&path2);
 881        path_put(&path2);
 882        if (IS_ERR(tagged))
 883                return PTR_ERR(tagged);
 884
 885        err = kern_path(old, 0, &path1);
 886        if (err) {
 887                drop_collected_mounts(tagged);
 888                return err;
 889        }
 890
 891        mutex_lock(&audit_filter_mutex);
 892        list_add(&barrier, &tree_list);
 893        list_add(&cursor, &barrier);
 894
 895        while (cursor.next != &tree_list) {
 896                struct audit_tree *tree;
 897                int good_one = 0;
 898
 899                tree = container_of(cursor.next, struct audit_tree, list);
 900                get_tree(tree);
 901                list_move(&cursor, &tree->list);
 902                mutex_unlock(&audit_filter_mutex);
 903
 904                err = kern_path(tree->pathname, 0, &path2);
 905                if (!err) {
 906                        good_one = path_is_under(&path1, &path2);
 907                        path_put(&path2);
 908                }
 909
 910                if (!good_one) {
 911                        put_tree(tree);
 912                        mutex_lock(&audit_filter_mutex);
 913                        continue;
 914                }
 915
 916                failed = iterate_mounts(tag_mount, tree, tagged);
 917                if (failed) {
 918                        put_tree(tree);
 919                        mutex_lock(&audit_filter_mutex);
 920                        break;
 921                }
 922
 923                mutex_lock(&audit_filter_mutex);
 924                spin_lock(&hash_lock);
 925                if (!tree->goner) {
 926                        list_move(&tree->list, &tree_list);
 927                }
 928                spin_unlock(&hash_lock);
 929                put_tree(tree);
 930        }
 931
 932        while (barrier.prev != &tree_list) {
 933                struct audit_tree *tree;
 934
 935                tree = container_of(barrier.prev, struct audit_tree, list);
 936                get_tree(tree);
 937                list_move(&tree->list, &barrier);
 938                mutex_unlock(&audit_filter_mutex);
 939
 940                if (!failed) {
 941                        struct node *node;
 942                        spin_lock(&hash_lock);
 943                        list_for_each_entry(node, &tree->chunks, list)
 944                                node->index &= ~(1U<<31);
 945                        spin_unlock(&hash_lock);
 946                } else {
 947                        trim_marked(tree);
 948                }
 949
 950                put_tree(tree);
 951                mutex_lock(&audit_filter_mutex);
 952        }
 953        list_del(&barrier);
 954        list_del(&cursor);
 955        mutex_unlock(&audit_filter_mutex);
 956        path_put(&path1);
 957        drop_collected_mounts(tagged);
 958        return failed;
 959}
 960
 961
 962static void audit_schedule_prune(void)
 963{
 964        wake_up_process(prune_thread);
 965}
 966
 967/*
 968 * ... and that one is done if evict_chunk() decides to delay until the end
 969 * of syscall.  Runs synchronously.
 970 */
 971void audit_kill_trees(struct audit_context *context)
 972{
 973        struct list_head *list = &context->killed_trees;
 974
 975        audit_ctl_lock();
 976        mutex_lock(&audit_filter_mutex);
 977
 978        while (!list_empty(list)) {
 979                struct audit_tree *victim;
 980
 981                victim = list_entry(list->next, struct audit_tree, list);
 982                kill_rules(context, victim);
 983                list_del_init(&victim->list);
 984
 985                mutex_unlock(&audit_filter_mutex);
 986
 987                prune_one(victim);
 988
 989                mutex_lock(&audit_filter_mutex);
 990        }
 991
 992        mutex_unlock(&audit_filter_mutex);
 993        audit_ctl_unlock();
 994}
 995
 996/*
 997 *  Here comes the stuff asynchronous to auditctl operations
 998 */
 999
1000static void evict_chunk(struct audit_chunk *chunk)
1001{
1002        struct audit_tree *owner;
1003        struct list_head *postponed = audit_killed_trees();
1004        int need_prune = 0;
1005        int n;
1006
1007        mutex_lock(&audit_filter_mutex);
1008        spin_lock(&hash_lock);
1009        while (!list_empty(&chunk->trees)) {
1010                owner = list_entry(chunk->trees.next,
1011                                   struct audit_tree, same_root);
1012                owner->goner = 1;
1013                owner->root = NULL;
1014                list_del_init(&owner->same_root);
1015                spin_unlock(&hash_lock);
1016                if (!postponed) {
1017                        kill_rules(audit_context(), owner);
1018                        list_move(&owner->list, &prune_list);
1019                        need_prune = 1;
1020                } else {
1021                        list_move(&owner->list, postponed);
1022                }
1023                spin_lock(&hash_lock);
1024        }
1025        list_del_rcu(&chunk->hash);
1026        for (n = 0; n < chunk->count; n++)
1027                list_del_init(&chunk->owners[n].list);
1028        spin_unlock(&hash_lock);
1029        mutex_unlock(&audit_filter_mutex);
1030        if (need_prune)
1031                audit_schedule_prune();
1032}
1033
1034static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
1035                                   struct inode *inode, struct inode *dir,
1036                                   const struct qstr *file_name, u32 cookie)
1037{
1038        return 0;
1039}
1040
1041static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1042                                    struct fsnotify_group *group)
1043{
1044        struct audit_chunk *chunk;
1045
1046        mutex_lock(&mark->group->mark_mutex);
1047        spin_lock(&hash_lock);
1048        chunk = mark_chunk(mark);
1049        replace_mark_chunk(mark, NULL);
1050        spin_unlock(&hash_lock);
1051        mutex_unlock(&mark->group->mark_mutex);
1052        if (chunk) {
1053                evict_chunk(chunk);
1054                audit_mark_put_chunk(chunk);
1055        }
1056
1057        /*
1058         * We are guaranteed to have at least one reference to the mark from
1059         * either the inode or the caller of fsnotify_destroy_mark().
1060         */
1061        BUG_ON(refcount_read(&mark->refcnt) < 1);
1062}
1063
1064static const struct fsnotify_ops audit_tree_ops = {
1065        .handle_inode_event = audit_tree_handle_event,
1066        .freeing_mark = audit_tree_freeing_mark,
1067        .free_mark = audit_tree_destroy_watch,
1068};
1069
1070static int __init audit_tree_init(void)
1071{
1072        int i;
1073
1074        audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1075
1076        audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1077        if (IS_ERR(audit_tree_group))
1078                audit_panic("cannot initialize fsnotify group for rectree watches");
1079
1080        for (i = 0; i < HASH_SIZE; i++)
1081                INIT_LIST_HEAD(&chunk_hash_heads[i]);
1082
1083        return 0;
1084}
1085__initcall(audit_tree_init);
1086