linux/kernel/audit_tree.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include "audit.h"
   3#include <linux/fsnotify_backend.h>
   4#include <linux/namei.h>
   5#include <linux/mount.h>
   6#include <linux/kthread.h>
   7#include <linux/refcount.h>
   8#include <linux/slab.h>
   9
  10struct audit_tree;
  11struct audit_chunk;
  12
  13struct audit_tree {
  14        refcount_t count;
  15        int goner;
  16        struct audit_chunk *root;
  17        struct list_head chunks;
  18        struct list_head rules;
  19        struct list_head list;
  20        struct list_head same_root;
  21        struct rcu_head head;
  22        char pathname[];
  23};
  24
  25struct audit_chunk {
  26        struct list_head hash;
  27        struct fsnotify_mark mark;
  28        struct list_head trees;         /* with root here */
  29        int dead;
  30        int count;
  31        atomic_long_t refs;
  32        struct rcu_head head;
  33        struct node {
  34                struct list_head list;
  35                struct audit_tree *owner;
  36                unsigned index;         /* index; upper bit indicates 'will prune' */
  37        } owners[];
  38};
  39
  40static LIST_HEAD(tree_list);
  41static LIST_HEAD(prune_list);
  42static struct task_struct *prune_thread;
  43
  44/*
  45 * One struct chunk is attached to each inode of interest.
  46 * We replace struct chunk on tagging/untagging.
  47 * Rules have pointer to struct audit_tree.
  48 * Rules have struct list_head rlist forming a list of rules over
  49 * the same tree.
  50 * References to struct chunk are collected at audit_inode{,_child}()
  51 * time and used in AUDIT_TREE rule matching.
  52 * These references are dropped at the same time we are calling
  53 * audit_free_names(), etc.
  54 *
  55 * Cyclic lists galore:
  56 * tree.chunks anchors chunk.owners[].list                      hash_lock
  57 * tree.rules anchors rule.rlist                                audit_filter_mutex
  58 * chunk.trees anchors tree.same_root                           hash_lock
  59 * chunk.hash is a hash with middle bits of watch.inode as
  60 * a hash function.                                             RCU, hash_lock
  61 *
  62 * tree is refcounted; one reference for "some rules on rules_list refer to
  63 * it", one for each chunk with pointer to it.
  64 *
  65 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
  66 * of watch contributes 1 to .refs).
  67 *
  68 * node.index allows to get from node.list to containing chunk.
  69 * MSB of that sucker is stolen to mark taggings that we might have to
  70 * revert - several operations have very unpleasant cleanup logics and
  71 * that makes a difference.  Some.
  72 */
  73
  74static struct fsnotify_group *audit_tree_group;
  75
  76static struct audit_tree *alloc_tree(const char *s)
  77{
  78        struct audit_tree *tree;
  79
  80        tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  81        if (tree) {
  82                refcount_set(&tree->count, 1);
  83                tree->goner = 0;
  84                INIT_LIST_HEAD(&tree->chunks);
  85                INIT_LIST_HEAD(&tree->rules);
  86                INIT_LIST_HEAD(&tree->list);
  87                INIT_LIST_HEAD(&tree->same_root);
  88                tree->root = NULL;
  89                strcpy(tree->pathname, s);
  90        }
  91        return tree;
  92}
  93
  94static inline void get_tree(struct audit_tree *tree)
  95{
  96        refcount_inc(&tree->count);
  97}
  98
  99static inline void put_tree(struct audit_tree *tree)
 100{
 101        if (refcount_dec_and_test(&tree->count))
 102                kfree_rcu(tree, head);
 103}
 104
 105/* to avoid bringing the entire thing in audit.h */
 106const char *audit_tree_path(struct audit_tree *tree)
 107{
 108        return tree->pathname;
 109}
 110
 111static void free_chunk(struct audit_chunk *chunk)
 112{
 113        int i;
 114
 115        for (i = 0; i < chunk->count; i++) {
 116                if (chunk->owners[i].owner)
 117                        put_tree(chunk->owners[i].owner);
 118        }
 119        kfree(chunk);
 120}
 121
 122void audit_put_chunk(struct audit_chunk *chunk)
 123{
 124        if (atomic_long_dec_and_test(&chunk->refs))
 125                free_chunk(chunk);
 126}
 127
 128static void __put_chunk(struct rcu_head *rcu)
 129{
 130        struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
 131        audit_put_chunk(chunk);
 132}
 133
 134static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
 135{
 136        struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 137        call_rcu(&chunk->head, __put_chunk);
 138}
 139
 140static struct audit_chunk *alloc_chunk(int count)
 141{
 142        struct audit_chunk *chunk;
 143        size_t size;
 144        int i;
 145
 146        size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
 147        chunk = kzalloc(size, GFP_KERNEL);
 148        if (!chunk)
 149                return NULL;
 150
 151        INIT_LIST_HEAD(&chunk->hash);
 152        INIT_LIST_HEAD(&chunk->trees);
 153        chunk->count = count;
 154        atomic_long_set(&chunk->refs, 1);
 155        for (i = 0; i < count; i++) {
 156                INIT_LIST_HEAD(&chunk->owners[i].list);
 157                chunk->owners[i].index = i;
 158        }
 159        fsnotify_init_mark(&chunk->mark, audit_tree_group);
 160        chunk->mark.mask = FS_IN_IGNORED;
 161        return chunk;
 162}
 163
 164enum {HASH_SIZE = 128};
 165static struct list_head chunk_hash_heads[HASH_SIZE];
 166static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
 167
 168/* Function to return search key in our hash from inode. */
 169static unsigned long inode_to_key(const struct inode *inode)
 170{
 171        return (unsigned long)inode;
 172}
 173
 174/*
 175 * Function to return search key in our hash from chunk. Key 0 is special and
 176 * should never be present in the hash.
 177 */
 178static unsigned long chunk_to_key(struct audit_chunk *chunk)
 179{
 180        /*
 181         * We have a reference to the mark so it should be attached to a
 182         * connector.
 183         */
 184        if (WARN_ON_ONCE(!chunk->mark.connector))
 185                return 0;
 186        return (unsigned long)chunk->mark.connector->inode;
 187}
 188
 189static inline struct list_head *chunk_hash(unsigned long key)
 190{
 191        unsigned long n = key / L1_CACHE_BYTES;
 192        return chunk_hash_heads + n % HASH_SIZE;
 193}
 194
 195/* hash_lock & entry->lock is held by caller */
 196static void insert_hash(struct audit_chunk *chunk)
 197{
 198        unsigned long key = chunk_to_key(chunk);
 199        struct list_head *list;
 200
 201        if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
 202                return;
 203        list = chunk_hash(key);
 204        list_add_rcu(&chunk->hash, list);
 205}
 206
 207/* called under rcu_read_lock */
 208struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 209{
 210        unsigned long key = inode_to_key(inode);
 211        struct list_head *list = chunk_hash(key);
 212        struct audit_chunk *p;
 213
 214        list_for_each_entry_rcu(p, list, hash) {
 215                if (chunk_to_key(p) == key) {
 216                        atomic_long_inc(&p->refs);
 217                        return p;
 218                }
 219        }
 220        return NULL;
 221}
 222
 223bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
 224{
 225        int n;
 226        for (n = 0; n < chunk->count; n++)
 227                if (chunk->owners[n].owner == tree)
 228                        return true;
 229        return false;
 230}
 231
 232/* tagging and untagging inodes with trees */
 233
 234static struct audit_chunk *find_chunk(struct node *p)
 235{
 236        int index = p->index & ~(1U<<31);
 237        p -= index;
 238        return container_of(p, struct audit_chunk, owners[0]);
 239}
 240
 241static void untag_chunk(struct node *p)
 242{
 243        struct audit_chunk *chunk = find_chunk(p);
 244        struct fsnotify_mark *entry = &chunk->mark;
 245        struct audit_chunk *new = NULL;
 246        struct audit_tree *owner;
 247        int size = chunk->count - 1;
 248        int i, j;
 249
 250        fsnotify_get_mark(entry);
 251
 252        spin_unlock(&hash_lock);
 253
 254        if (size)
 255                new = alloc_chunk(size);
 256
 257        mutex_lock(&entry->group->mark_mutex);
 258        spin_lock(&entry->lock);
 259        /*
 260         * mark_mutex protects mark from getting detached and thus also from
 261         * mark->connector->inode getting NULL.
 262         */
 263        if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 264                spin_unlock(&entry->lock);
 265                mutex_unlock(&entry->group->mark_mutex);
 266                if (new)
 267                        fsnotify_put_mark(&new->mark);
 268                goto out;
 269        }
 270
 271        owner = p->owner;
 272
 273        if (!size) {
 274                chunk->dead = 1;
 275                spin_lock(&hash_lock);
 276                list_del_init(&chunk->trees);
 277                if (owner->root == chunk)
 278                        owner->root = NULL;
 279                list_del_init(&p->list);
 280                list_del_rcu(&chunk->hash);
 281                spin_unlock(&hash_lock);
 282                spin_unlock(&entry->lock);
 283                mutex_unlock(&entry->group->mark_mutex);
 284                fsnotify_destroy_mark(entry, audit_tree_group);
 285                goto out;
 286        }
 287
 288        if (!new)
 289                goto Fallback;
 290
 291        if (fsnotify_add_mark_locked(&new->mark, entry->connector->inode,
 292                                     NULL, 1)) {
 293                fsnotify_put_mark(&new->mark);
 294                goto Fallback;
 295        }
 296
 297        chunk->dead = 1;
 298        spin_lock(&hash_lock);
 299        list_replace_init(&chunk->trees, &new->trees);
 300        if (owner->root == chunk) {
 301                list_del_init(&owner->same_root);
 302                owner->root = NULL;
 303        }
 304
 305        for (i = j = 0; j <= size; i++, j++) {
 306                struct audit_tree *s;
 307                if (&chunk->owners[j] == p) {
 308                        list_del_init(&p->list);
 309                        i--;
 310                        continue;
 311                }
 312                s = chunk->owners[j].owner;
 313                new->owners[i].owner = s;
 314                new->owners[i].index = chunk->owners[j].index - j + i;
 315                if (!s) /* result of earlier fallback */
 316                        continue;
 317                get_tree(s);
 318                list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
 319        }
 320
 321        list_replace_rcu(&chunk->hash, &new->hash);
 322        list_for_each_entry(owner, &new->trees, same_root)
 323                owner->root = new;
 324        spin_unlock(&hash_lock);
 325        spin_unlock(&entry->lock);
 326        mutex_unlock(&entry->group->mark_mutex);
 327        fsnotify_destroy_mark(entry, audit_tree_group);
 328        fsnotify_put_mark(&new->mark);  /* drop initial reference */
 329        goto out;
 330
 331Fallback:
 332        // do the best we can
 333        spin_lock(&hash_lock);
 334        if (owner->root == chunk) {
 335                list_del_init(&owner->same_root);
 336                owner->root = NULL;
 337        }
 338        list_del_init(&p->list);
 339        p->owner = NULL;
 340        put_tree(owner);
 341        spin_unlock(&hash_lock);
 342        spin_unlock(&entry->lock);
 343        mutex_unlock(&entry->group->mark_mutex);
 344out:
 345        fsnotify_put_mark(entry);
 346        spin_lock(&hash_lock);
 347}
 348
 349static int create_chunk(struct inode *inode, struct audit_tree *tree)
 350{
 351        struct fsnotify_mark *entry;
 352        struct audit_chunk *chunk = alloc_chunk(1);
 353        if (!chunk)
 354                return -ENOMEM;
 355
 356        entry = &chunk->mark;
 357        if (fsnotify_add_mark(entry, inode, NULL, 0)) {
 358                fsnotify_put_mark(entry);
 359                return -ENOSPC;
 360        }
 361
 362        spin_lock(&entry->lock);
 363        spin_lock(&hash_lock);
 364        if (tree->goner) {
 365                spin_unlock(&hash_lock);
 366                chunk->dead = 1;
 367                spin_unlock(&entry->lock);
 368                fsnotify_destroy_mark(entry, audit_tree_group);
 369                fsnotify_put_mark(entry);
 370                return 0;
 371        }
 372        chunk->owners[0].index = (1U << 31);
 373        chunk->owners[0].owner = tree;
 374        get_tree(tree);
 375        list_add(&chunk->owners[0].list, &tree->chunks);
 376        if (!tree->root) {
 377                tree->root = chunk;
 378                list_add(&tree->same_root, &chunk->trees);
 379        }
 380        insert_hash(chunk);
 381        spin_unlock(&hash_lock);
 382        spin_unlock(&entry->lock);
 383        fsnotify_put_mark(entry);       /* drop initial reference */
 384        return 0;
 385}
 386
 387/* the first tagged inode becomes root of tree */
 388static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 389{
 390        struct fsnotify_mark *old_entry, *chunk_entry;
 391        struct audit_tree *owner;
 392        struct audit_chunk *chunk, *old;
 393        struct node *p;
 394        int n;
 395
 396        old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
 397                                       audit_tree_group);
 398        if (!old_entry)
 399                return create_chunk(inode, tree);
 400
 401        old = container_of(old_entry, struct audit_chunk, mark);
 402
 403        /* are we already there? */
 404        spin_lock(&hash_lock);
 405        for (n = 0; n < old->count; n++) {
 406                if (old->owners[n].owner == tree) {
 407                        spin_unlock(&hash_lock);
 408                        fsnotify_put_mark(old_entry);
 409                        return 0;
 410                }
 411        }
 412        spin_unlock(&hash_lock);
 413
 414        chunk = alloc_chunk(old->count + 1);
 415        if (!chunk) {
 416                fsnotify_put_mark(old_entry);
 417                return -ENOMEM;
 418        }
 419
 420        chunk_entry = &chunk->mark;
 421
 422        mutex_lock(&old_entry->group->mark_mutex);
 423        spin_lock(&old_entry->lock);
 424        /*
 425         * mark_mutex protects mark from getting detached and thus also from
 426         * mark->connector->inode getting NULL.
 427         */
 428        if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 429                /* old_entry is being shot, lets just lie */
 430                spin_unlock(&old_entry->lock);
 431                mutex_unlock(&old_entry->group->mark_mutex);
 432                fsnotify_put_mark(old_entry);
 433                fsnotify_put_mark(&chunk->mark);
 434                return -ENOENT;
 435        }
 436
 437        if (fsnotify_add_mark_locked(chunk_entry,
 438                             old_entry->connector->inode, NULL, 1)) {
 439                spin_unlock(&old_entry->lock);
 440                mutex_unlock(&old_entry->group->mark_mutex);
 441                fsnotify_put_mark(chunk_entry);
 442                fsnotify_put_mark(old_entry);
 443                return -ENOSPC;
 444        }
 445
 446        /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
 447        spin_lock(&chunk_entry->lock);
 448        spin_lock(&hash_lock);
 449
 450        /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
 451        if (tree->goner) {
 452                spin_unlock(&hash_lock);
 453                chunk->dead = 1;
 454                spin_unlock(&chunk_entry->lock);
 455                spin_unlock(&old_entry->lock);
 456                mutex_unlock(&old_entry->group->mark_mutex);
 457
 458                fsnotify_destroy_mark(chunk_entry, audit_tree_group);
 459
 460                fsnotify_put_mark(chunk_entry);
 461                fsnotify_put_mark(old_entry);
 462                return 0;
 463        }
 464        list_replace_init(&old->trees, &chunk->trees);
 465        for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
 466                struct audit_tree *s = old->owners[n].owner;
 467                p->owner = s;
 468                p->index = old->owners[n].index;
 469                if (!s) /* result of fallback in untag */
 470                        continue;
 471                get_tree(s);
 472                list_replace_init(&old->owners[n].list, &p->list);
 473        }
 474        p->index = (chunk->count - 1) | (1U<<31);
 475        p->owner = tree;
 476        get_tree(tree);
 477        list_add(&p->list, &tree->chunks);
 478        list_replace_rcu(&old->hash, &chunk->hash);
 479        list_for_each_entry(owner, &chunk->trees, same_root)
 480                owner->root = chunk;
 481        old->dead = 1;
 482        if (!tree->root) {
 483                tree->root = chunk;
 484                list_add(&tree->same_root, &chunk->trees);
 485        }
 486        spin_unlock(&hash_lock);
 487        spin_unlock(&chunk_entry->lock);
 488        spin_unlock(&old_entry->lock);
 489        mutex_unlock(&old_entry->group->mark_mutex);
 490        fsnotify_destroy_mark(old_entry, audit_tree_group);
 491        fsnotify_put_mark(chunk_entry); /* drop initial reference */
 492        fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
 493        return 0;
 494}
 495
 496static void audit_tree_log_remove_rule(struct audit_krule *rule)
 497{
 498        struct audit_buffer *ab;
 499
 500        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 501        if (unlikely(!ab))
 502                return;
 503        audit_log_format(ab, "op=remove_rule");
 504        audit_log_format(ab, " dir=");
 505        audit_log_untrustedstring(ab, rule->tree->pathname);
 506        audit_log_key(ab, rule->filterkey);
 507        audit_log_format(ab, " list=%d res=1", rule->listnr);
 508        audit_log_end(ab);
 509}
 510
 511static void kill_rules(struct audit_tree *tree)
 512{
 513        struct audit_krule *rule, *next;
 514        struct audit_entry *entry;
 515
 516        list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
 517                entry = container_of(rule, struct audit_entry, rule);
 518
 519                list_del_init(&rule->rlist);
 520                if (rule->tree) {
 521                        /* not a half-baked one */
 522                        audit_tree_log_remove_rule(rule);
 523                        if (entry->rule.exe)
 524                                audit_remove_mark(entry->rule.exe);
 525                        rule->tree = NULL;
 526                        list_del_rcu(&entry->list);
 527                        list_del(&entry->rule.list);
 528                        call_rcu(&entry->rcu, audit_free_rule_rcu);
 529                }
 530        }
 531}
 532
 533/*
 534 * finish killing struct audit_tree
 535 */
 536static void prune_one(struct audit_tree *victim)
 537{
 538        spin_lock(&hash_lock);
 539        while (!list_empty(&victim->chunks)) {
 540                struct node *p;
 541
 542                p = list_entry(victim->chunks.next, struct node, list);
 543
 544                untag_chunk(p);
 545        }
 546        spin_unlock(&hash_lock);
 547        put_tree(victim);
 548}
 549
 550/* trim the uncommitted chunks from tree */
 551
 552static void trim_marked(struct audit_tree *tree)
 553{
 554        struct list_head *p, *q;
 555        spin_lock(&hash_lock);
 556        if (tree->goner) {
 557                spin_unlock(&hash_lock);
 558                return;
 559        }
 560        /* reorder */
 561        for (p = tree->chunks.next; p != &tree->chunks; p = q) {
 562                struct node *node = list_entry(p, struct node, list);
 563                q = p->next;
 564                if (node->index & (1U<<31)) {
 565                        list_del_init(p);
 566                        list_add(p, &tree->chunks);
 567                }
 568        }
 569
 570        while (!list_empty(&tree->chunks)) {
 571                struct node *node;
 572
 573                node = list_entry(tree->chunks.next, struct node, list);
 574
 575                /* have we run out of marked? */
 576                if (!(node->index & (1U<<31)))
 577                        break;
 578
 579                untag_chunk(node);
 580        }
 581        if (!tree->root && !tree->goner) {
 582                tree->goner = 1;
 583                spin_unlock(&hash_lock);
 584                mutex_lock(&audit_filter_mutex);
 585                kill_rules(tree);
 586                list_del_init(&tree->list);
 587                mutex_unlock(&audit_filter_mutex);
 588                prune_one(tree);
 589        } else {
 590                spin_unlock(&hash_lock);
 591        }
 592}
 593
 594static void audit_schedule_prune(void);
 595
 596/* called with audit_filter_mutex */
 597int audit_remove_tree_rule(struct audit_krule *rule)
 598{
 599        struct audit_tree *tree;
 600        tree = rule->tree;
 601        if (tree) {
 602                spin_lock(&hash_lock);
 603                list_del_init(&rule->rlist);
 604                if (list_empty(&tree->rules) && !tree->goner) {
 605                        tree->root = NULL;
 606                        list_del_init(&tree->same_root);
 607                        tree->goner = 1;
 608                        list_move(&tree->list, &prune_list);
 609                        rule->tree = NULL;
 610                        spin_unlock(&hash_lock);
 611                        audit_schedule_prune();
 612                        return 1;
 613                }
 614                rule->tree = NULL;
 615                spin_unlock(&hash_lock);
 616                return 1;
 617        }
 618        return 0;
 619}
 620
 621static int compare_root(struct vfsmount *mnt, void *arg)
 622{
 623        return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
 624               (unsigned long)arg;
 625}
 626
 627void audit_trim_trees(void)
 628{
 629        struct list_head cursor;
 630
 631        mutex_lock(&audit_filter_mutex);
 632        list_add(&cursor, &tree_list);
 633        while (cursor.next != &tree_list) {
 634                struct audit_tree *tree;
 635                struct path path;
 636                struct vfsmount *root_mnt;
 637                struct node *node;
 638                int err;
 639
 640                tree = container_of(cursor.next, struct audit_tree, list);
 641                get_tree(tree);
 642                list_del(&cursor);
 643                list_add(&cursor, &tree->list);
 644                mutex_unlock(&audit_filter_mutex);
 645
 646                err = kern_path(tree->pathname, 0, &path);
 647                if (err)
 648                        goto skip_it;
 649
 650                root_mnt = collect_mounts(&path);
 651                path_put(&path);
 652                if (IS_ERR(root_mnt))
 653                        goto skip_it;
 654
 655                spin_lock(&hash_lock);
 656                list_for_each_entry(node, &tree->chunks, list) {
 657                        struct audit_chunk *chunk = find_chunk(node);
 658                        /* this could be NULL if the watch is dying else where... */
 659                        node->index |= 1U<<31;
 660                        if (iterate_mounts(compare_root,
 661                                           (void *)chunk_to_key(chunk),
 662                                           root_mnt))
 663                                node->index &= ~(1U<<31);
 664                }
 665                spin_unlock(&hash_lock);
 666                trim_marked(tree);
 667                drop_collected_mounts(root_mnt);
 668skip_it:
 669                put_tree(tree);
 670                mutex_lock(&audit_filter_mutex);
 671        }
 672        list_del(&cursor);
 673        mutex_unlock(&audit_filter_mutex);
 674}
 675
 676int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 677{
 678
 679        if (pathname[0] != '/' ||
 680            rule->listnr != AUDIT_FILTER_EXIT ||
 681            op != Audit_equal ||
 682            rule->inode_f || rule->watch || rule->tree)
 683                return -EINVAL;
 684        rule->tree = alloc_tree(pathname);
 685        if (!rule->tree)
 686                return -ENOMEM;
 687        return 0;
 688}
 689
 690void audit_put_tree(struct audit_tree *tree)
 691{
 692        put_tree(tree);
 693}
 694
 695static int tag_mount(struct vfsmount *mnt, void *arg)
 696{
 697        return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
 698}
 699
 700/*
 701 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 702 * Runs from a separate thread.
 703 */
 704static int prune_tree_thread(void *unused)
 705{
 706        for (;;) {
 707                if (list_empty(&prune_list)) {
 708                        set_current_state(TASK_INTERRUPTIBLE);
 709                        schedule();
 710                }
 711
 712                audit_ctl_lock();
 713                mutex_lock(&audit_filter_mutex);
 714
 715                while (!list_empty(&prune_list)) {
 716                        struct audit_tree *victim;
 717
 718                        victim = list_entry(prune_list.next,
 719                                        struct audit_tree, list);
 720                        list_del_init(&victim->list);
 721
 722                        mutex_unlock(&audit_filter_mutex);
 723
 724                        prune_one(victim);
 725
 726                        mutex_lock(&audit_filter_mutex);
 727                }
 728
 729                mutex_unlock(&audit_filter_mutex);
 730                audit_ctl_unlock();
 731        }
 732        return 0;
 733}
 734
 735static int audit_launch_prune(void)
 736{
 737        if (prune_thread)
 738                return 0;
 739        prune_thread = kthread_run(prune_tree_thread, NULL,
 740                                "audit_prune_tree");
 741        if (IS_ERR(prune_thread)) {
 742                pr_err("cannot start thread audit_prune_tree");
 743                prune_thread = NULL;
 744                return -ENOMEM;
 745        }
 746        return 0;
 747}
 748
 749/* called with audit_filter_mutex */
 750int audit_add_tree_rule(struct audit_krule *rule)
 751{
 752        struct audit_tree *seed = rule->tree, *tree;
 753        struct path path;
 754        struct vfsmount *mnt;
 755        int err;
 756
 757        rule->tree = NULL;
 758        list_for_each_entry(tree, &tree_list, list) {
 759                if (!strcmp(seed->pathname, tree->pathname)) {
 760                        put_tree(seed);
 761                        rule->tree = tree;
 762                        list_add(&rule->rlist, &tree->rules);
 763                        return 0;
 764                }
 765        }
 766        tree = seed;
 767        list_add(&tree->list, &tree_list);
 768        list_add(&rule->rlist, &tree->rules);
 769        /* do not set rule->tree yet */
 770        mutex_unlock(&audit_filter_mutex);
 771
 772        if (unlikely(!prune_thread)) {
 773                err = audit_launch_prune();
 774                if (err)
 775                        goto Err;
 776        }
 777
 778        err = kern_path(tree->pathname, 0, &path);
 779        if (err)
 780                goto Err;
 781        mnt = collect_mounts(&path);
 782        path_put(&path);
 783        if (IS_ERR(mnt)) {
 784                err = PTR_ERR(mnt);
 785                goto Err;
 786        }
 787
 788        get_tree(tree);
 789        err = iterate_mounts(tag_mount, tree, mnt);
 790        drop_collected_mounts(mnt);
 791
 792        if (!err) {
 793                struct node *node;
 794                spin_lock(&hash_lock);
 795                list_for_each_entry(node, &tree->chunks, list)
 796                        node->index &= ~(1U<<31);
 797                spin_unlock(&hash_lock);
 798        } else {
 799                trim_marked(tree);
 800                goto Err;
 801        }
 802
 803        mutex_lock(&audit_filter_mutex);
 804        if (list_empty(&rule->rlist)) {
 805                put_tree(tree);
 806                return -ENOENT;
 807        }
 808        rule->tree = tree;
 809        put_tree(tree);
 810
 811        return 0;
 812Err:
 813        mutex_lock(&audit_filter_mutex);
 814        list_del_init(&tree->list);
 815        list_del_init(&tree->rules);
 816        put_tree(tree);
 817        return err;
 818}
 819
 820int audit_tag_tree(char *old, char *new)
 821{
 822        struct list_head cursor, barrier;
 823        int failed = 0;
 824        struct path path1, path2;
 825        struct vfsmount *tagged;
 826        int err;
 827
 828        err = kern_path(new, 0, &path2);
 829        if (err)
 830                return err;
 831        tagged = collect_mounts(&path2);
 832        path_put(&path2);
 833        if (IS_ERR(tagged))
 834                return PTR_ERR(tagged);
 835
 836        err = kern_path(old, 0, &path1);
 837        if (err) {
 838                drop_collected_mounts(tagged);
 839                return err;
 840        }
 841
 842        mutex_lock(&audit_filter_mutex);
 843        list_add(&barrier, &tree_list);
 844        list_add(&cursor, &barrier);
 845
 846        while (cursor.next != &tree_list) {
 847                struct audit_tree *tree;
 848                int good_one = 0;
 849
 850                tree = container_of(cursor.next, struct audit_tree, list);
 851                get_tree(tree);
 852                list_del(&cursor);
 853                list_add(&cursor, &tree->list);
 854                mutex_unlock(&audit_filter_mutex);
 855
 856                err = kern_path(tree->pathname, 0, &path2);
 857                if (!err) {
 858                        good_one = path_is_under(&path1, &path2);
 859                        path_put(&path2);
 860                }
 861
 862                if (!good_one) {
 863                        put_tree(tree);
 864                        mutex_lock(&audit_filter_mutex);
 865                        continue;
 866                }
 867
 868                failed = iterate_mounts(tag_mount, tree, tagged);
 869                if (failed) {
 870                        put_tree(tree);
 871                        mutex_lock(&audit_filter_mutex);
 872                        break;
 873                }
 874
 875                mutex_lock(&audit_filter_mutex);
 876                spin_lock(&hash_lock);
 877                if (!tree->goner) {
 878                        list_del(&tree->list);
 879                        list_add(&tree->list, &tree_list);
 880                }
 881                spin_unlock(&hash_lock);
 882                put_tree(tree);
 883        }
 884
 885        while (barrier.prev != &tree_list) {
 886                struct audit_tree *tree;
 887
 888                tree = container_of(barrier.prev, struct audit_tree, list);
 889                get_tree(tree);
 890                list_del(&tree->list);
 891                list_add(&tree->list, &barrier);
 892                mutex_unlock(&audit_filter_mutex);
 893
 894                if (!failed) {
 895                        struct node *node;
 896                        spin_lock(&hash_lock);
 897                        list_for_each_entry(node, &tree->chunks, list)
 898                                node->index &= ~(1U<<31);
 899                        spin_unlock(&hash_lock);
 900                } else {
 901                        trim_marked(tree);
 902                }
 903
 904                put_tree(tree);
 905                mutex_lock(&audit_filter_mutex);
 906        }
 907        list_del(&barrier);
 908        list_del(&cursor);
 909        mutex_unlock(&audit_filter_mutex);
 910        path_put(&path1);
 911        drop_collected_mounts(tagged);
 912        return failed;
 913}
 914
 915
 916static void audit_schedule_prune(void)
 917{
 918        wake_up_process(prune_thread);
 919}
 920
 921/*
 922 * ... and that one is done if evict_chunk() decides to delay until the end
 923 * of syscall.  Runs synchronously.
 924 */
 925void audit_kill_trees(struct list_head *list)
 926{
 927        audit_ctl_lock();
 928        mutex_lock(&audit_filter_mutex);
 929
 930        while (!list_empty(list)) {
 931                struct audit_tree *victim;
 932
 933                victim = list_entry(list->next, struct audit_tree, list);
 934                kill_rules(victim);
 935                list_del_init(&victim->list);
 936
 937                mutex_unlock(&audit_filter_mutex);
 938
 939                prune_one(victim);
 940
 941                mutex_lock(&audit_filter_mutex);
 942        }
 943
 944        mutex_unlock(&audit_filter_mutex);
 945        audit_ctl_unlock();
 946}
 947
 948/*
 949 *  Here comes the stuff asynchronous to auditctl operations
 950 */
 951
 952static void evict_chunk(struct audit_chunk *chunk)
 953{
 954        struct audit_tree *owner;
 955        struct list_head *postponed = audit_killed_trees();
 956        int need_prune = 0;
 957        int n;
 958
 959        if (chunk->dead)
 960                return;
 961
 962        chunk->dead = 1;
 963        mutex_lock(&audit_filter_mutex);
 964        spin_lock(&hash_lock);
 965        while (!list_empty(&chunk->trees)) {
 966                owner = list_entry(chunk->trees.next,
 967                                   struct audit_tree, same_root);
 968                owner->goner = 1;
 969                owner->root = NULL;
 970                list_del_init(&owner->same_root);
 971                spin_unlock(&hash_lock);
 972                if (!postponed) {
 973                        kill_rules(owner);
 974                        list_move(&owner->list, &prune_list);
 975                        need_prune = 1;
 976                } else {
 977                        list_move(&owner->list, postponed);
 978                }
 979                spin_lock(&hash_lock);
 980        }
 981        list_del_rcu(&chunk->hash);
 982        for (n = 0; n < chunk->count; n++)
 983                list_del_init(&chunk->owners[n].list);
 984        spin_unlock(&hash_lock);
 985        mutex_unlock(&audit_filter_mutex);
 986        if (need_prune)
 987                audit_schedule_prune();
 988}
 989
 990static int audit_tree_handle_event(struct fsnotify_group *group,
 991                                   struct inode *to_tell,
 992                                   struct fsnotify_mark *inode_mark,
 993                                   struct fsnotify_mark *vfsmount_mark,
 994                                   u32 mask, const void *data, int data_type,
 995                                   const unsigned char *file_name, u32 cookie,
 996                                   struct fsnotify_iter_info *iter_info)
 997{
 998        return 0;
 999}
1000
1001static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
1002{
1003        struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
1004
1005        evict_chunk(chunk);
1006
1007        /*
1008         * We are guaranteed to have at least one reference to the mark from
1009         * either the inode or the caller of fsnotify_destroy_mark().
1010         */
1011        BUG_ON(refcount_read(&entry->refcnt) < 1);
1012}
1013
1014static const struct fsnotify_ops audit_tree_ops = {
1015        .handle_event = audit_tree_handle_event,
1016        .freeing_mark = audit_tree_freeing_mark,
1017        .free_mark = audit_tree_destroy_watch,
1018};
1019
1020static int __init audit_tree_init(void)
1021{
1022        int i;
1023
1024        audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1025        if (IS_ERR(audit_tree_group))
1026                audit_panic("cannot initialize fsnotify group for rectree watches");
1027
1028        for (i = 0; i < HASH_SIZE; i++)
1029                INIT_LIST_HEAD(&chunk_hash_heads[i]);
1030
1031        return 0;
1032}
1033__initcall(audit_tree_init);
1034