linux/kernel/audit_tree.c
<<
>>
Prefs
   1#include "audit.h"
   2#include <linux/fsnotify_backend.h>
   3#include <linux/namei.h>
   4#include <linux/mount.h>
   5#include <linux/kthread.h>
   6#include <linux/slab.h>
   7
   8struct audit_tree;
   9struct audit_chunk;
  10
  11struct audit_tree {
  12        atomic_t count;
  13        int goner;
  14        struct audit_chunk *root;
  15        struct list_head chunks;
  16        struct list_head rules;
  17        struct list_head list;
  18        struct list_head same_root;
  19        struct rcu_head head;
  20        char pathname[];
  21};
  22
  23struct audit_chunk {
  24        struct list_head hash;
  25        struct fsnotify_mark mark;
  26        struct list_head trees;         /* with root here */
  27        int dead;
  28        int count;
  29        atomic_long_t refs;
  30        struct rcu_head head;
  31        struct node {
  32                struct list_head list;
  33                struct audit_tree *owner;
  34                unsigned index;         /* index; upper bit indicates 'will prune' */
  35        } owners[];
  36};
  37
  38static LIST_HEAD(tree_list);
  39static LIST_HEAD(prune_list);
  40
  41/*
  42 * One struct chunk is attached to each inode of interest.
  43 * We replace struct chunk on tagging/untagging.
  44 * Rules have pointer to struct audit_tree.
  45 * Rules have struct list_head rlist forming a list of rules over
  46 * the same tree.
  47 * References to struct chunk are collected at audit_inode{,_child}()
  48 * time and used in AUDIT_TREE rule matching.
  49 * These references are dropped at the same time we are calling
  50 * audit_free_names(), etc.
  51 *
  52 * Cyclic lists galore:
  53 * tree.chunks anchors chunk.owners[].list                      hash_lock
  54 * tree.rules anchors rule.rlist                                audit_filter_mutex
  55 * chunk.trees anchors tree.same_root                           hash_lock
  56 * chunk.hash is a hash with middle bits of watch.inode as
  57 * a hash function.                                             RCU, hash_lock
  58 *
  59 * tree is refcounted; one reference for "some rules on rules_list refer to
  60 * it", one for each chunk with pointer to it.
  61 *
  62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
  63 * of watch contributes 1 to .refs).
  64 *
  65 * node.index allows to get from node.list to containing chunk.
  66 * MSB of that sucker is stolen to mark taggings that we might have to
  67 * revert - several operations have very unpleasant cleanup logics and
  68 * that makes a difference.  Some.
  69 */
  70
  71static struct fsnotify_group *audit_tree_group;
  72
  73static struct audit_tree *alloc_tree(const char *s)
  74{
  75        struct audit_tree *tree;
  76
  77        tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  78        if (tree) {
  79                atomic_set(&tree->count, 1);
  80                tree->goner = 0;
  81                INIT_LIST_HEAD(&tree->chunks);
  82                INIT_LIST_HEAD(&tree->rules);
  83                INIT_LIST_HEAD(&tree->list);
  84                INIT_LIST_HEAD(&tree->same_root);
  85                tree->root = NULL;
  86                strcpy(tree->pathname, s);
  87        }
  88        return tree;
  89}
  90
  91static inline void get_tree(struct audit_tree *tree)
  92{
  93        atomic_inc(&tree->count);
  94}
  95
  96static void __put_tree(struct rcu_head *rcu)
  97{
  98        struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
  99        kfree(tree);
 100}
 101
 102static inline void put_tree(struct audit_tree *tree)
 103{
 104        if (atomic_dec_and_test(&tree->count))
 105                call_rcu(&tree->head, __put_tree);
 106}
 107
 108/* to avoid bringing the entire thing in audit.h */
 109const char *audit_tree_path(struct audit_tree *tree)
 110{
 111        return tree->pathname;
 112}
 113
 114static void free_chunk(struct audit_chunk *chunk)
 115{
 116        int i;
 117
 118        for (i = 0; i < chunk->count; i++) {
 119                if (chunk->owners[i].owner)
 120                        put_tree(chunk->owners[i].owner);
 121        }
 122        kfree(chunk);
 123}
 124
 125void audit_put_chunk(struct audit_chunk *chunk)
 126{
 127        if (atomic_long_dec_and_test(&chunk->refs))
 128                free_chunk(chunk);
 129}
 130
 131static void __put_chunk(struct rcu_head *rcu)
 132{
 133        struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
 134        audit_put_chunk(chunk);
 135}
 136
 137static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
 138{
 139        struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 140        call_rcu(&chunk->head, __put_chunk);
 141}
 142
 143static struct audit_chunk *alloc_chunk(int count)
 144{
 145        struct audit_chunk *chunk;
 146        size_t size;
 147        int i;
 148
 149        size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
 150        chunk = kzalloc(size, GFP_KERNEL);
 151        if (!chunk)
 152                return NULL;
 153
 154        INIT_LIST_HEAD(&chunk->hash);
 155        INIT_LIST_HEAD(&chunk->trees);
 156        chunk->count = count;
 157        atomic_long_set(&chunk->refs, 1);
 158        for (i = 0; i < count; i++) {
 159                INIT_LIST_HEAD(&chunk->owners[i].list);
 160                chunk->owners[i].index = i;
 161        }
 162        fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
 163        return chunk;
 164}
 165
 166enum {HASH_SIZE = 128};
 167static struct list_head chunk_hash_heads[HASH_SIZE];
 168static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
 169
 170static inline struct list_head *chunk_hash(const struct inode *inode)
 171{
 172        unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
 173        return chunk_hash_heads + n % HASH_SIZE;
 174}
 175
 176/* hash_lock & entry->lock is held by caller */
 177static void insert_hash(struct audit_chunk *chunk)
 178{
 179        struct fsnotify_mark *entry = &chunk->mark;
 180        struct list_head *list;
 181
 182        if (!entry->i.inode)
 183                return;
 184        list = chunk_hash(entry->i.inode);
 185        list_add_rcu(&chunk->hash, list);
 186}
 187
 188/* called under rcu_read_lock */
 189struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 190{
 191        struct list_head *list = chunk_hash(inode);
 192        struct audit_chunk *p;
 193
 194        list_for_each_entry_rcu(p, list, hash) {
 195                /* mark.inode may have gone NULL, but who cares? */
 196                if (p->mark.i.inode == inode) {
 197                        atomic_long_inc(&p->refs);
 198                        return p;
 199                }
 200        }
 201        return NULL;
 202}
 203
 204int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
 205{
 206        int n;
 207        for (n = 0; n < chunk->count; n++)
 208                if (chunk->owners[n].owner == tree)
 209                        return 1;
 210        return 0;
 211}
 212
 213/* tagging and untagging inodes with trees */
 214
 215static struct audit_chunk *find_chunk(struct node *p)
 216{
 217        int index = p->index & ~(1U<<31);
 218        p -= index;
 219        return container_of(p, struct audit_chunk, owners[0]);
 220}
 221
 222static void untag_chunk(struct node *p)
 223{
 224        struct audit_chunk *chunk = find_chunk(p);
 225        struct fsnotify_mark *entry = &chunk->mark;
 226        struct audit_chunk *new = NULL;
 227        struct audit_tree *owner;
 228        int size = chunk->count - 1;
 229        int i, j;
 230
 231        fsnotify_get_mark(entry);
 232
 233        spin_unlock(&hash_lock);
 234
 235        if (size)
 236                new = alloc_chunk(size);
 237
 238        spin_lock(&entry->lock);
 239        if (chunk->dead || !entry->i.inode) {
 240                spin_unlock(&entry->lock);
 241                if (new)
 242                        free_chunk(new);
 243                goto out;
 244        }
 245
 246        owner = p->owner;
 247
 248        if (!size) {
 249                chunk->dead = 1;
 250                spin_lock(&hash_lock);
 251                list_del_init(&chunk->trees);
 252                if (owner->root == chunk)
 253                        owner->root = NULL;
 254                list_del_init(&p->list);
 255                list_del_rcu(&chunk->hash);
 256                spin_unlock(&hash_lock);
 257                spin_unlock(&entry->lock);
 258                fsnotify_destroy_mark(entry);
 259                fsnotify_put_mark(entry);
 260                goto out;
 261        }
 262
 263        if (!new)
 264                goto Fallback;
 265
 266        fsnotify_duplicate_mark(&new->mark, entry);
 267        if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
 268                free_chunk(new);
 269                goto Fallback;
 270        }
 271
 272        chunk->dead = 1;
 273        spin_lock(&hash_lock);
 274        list_replace_init(&chunk->trees, &new->trees);
 275        if (owner->root == chunk) {
 276                list_del_init(&owner->same_root);
 277                owner->root = NULL;
 278        }
 279
 280        for (i = j = 0; j <= size; i++, j++) {
 281                struct audit_tree *s;
 282                if (&chunk->owners[j] == p) {
 283                        list_del_init(&p->list);
 284                        i--;
 285                        continue;
 286                }
 287                s = chunk->owners[j].owner;
 288                new->owners[i].owner = s;
 289                new->owners[i].index = chunk->owners[j].index - j + i;
 290                if (!s) /* result of earlier fallback */
 291                        continue;
 292                get_tree(s);
 293                list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
 294        }
 295
 296        list_replace_rcu(&chunk->hash, &new->hash);
 297        list_for_each_entry(owner, &new->trees, same_root)
 298                owner->root = new;
 299        spin_unlock(&hash_lock);
 300        spin_unlock(&entry->lock);
 301        fsnotify_destroy_mark(entry);
 302        fsnotify_put_mark(entry);
 303        goto out;
 304
 305Fallback:
 306        // do the best we can
 307        spin_lock(&hash_lock);
 308        if (owner->root == chunk) {
 309                list_del_init(&owner->same_root);
 310                owner->root = NULL;
 311        }
 312        list_del_init(&p->list);
 313        p->owner = NULL;
 314        put_tree(owner);
 315        spin_unlock(&hash_lock);
 316        spin_unlock(&entry->lock);
 317out:
 318        fsnotify_put_mark(entry);
 319        spin_lock(&hash_lock);
 320}
 321
 322static int create_chunk(struct inode *inode, struct audit_tree *tree)
 323{
 324        struct fsnotify_mark *entry;
 325        struct audit_chunk *chunk = alloc_chunk(1);
 326        if (!chunk)
 327                return -ENOMEM;
 328
 329        entry = &chunk->mark;
 330        if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
 331                free_chunk(chunk);
 332                return -ENOSPC;
 333        }
 334
 335        spin_lock(&entry->lock);
 336        spin_lock(&hash_lock);
 337        if (tree->goner) {
 338                spin_unlock(&hash_lock);
 339                chunk->dead = 1;
 340                spin_unlock(&entry->lock);
 341                fsnotify_destroy_mark(entry);
 342                fsnotify_put_mark(entry);
 343                return 0;
 344        }
 345        chunk->owners[0].index = (1U << 31);
 346        chunk->owners[0].owner = tree;
 347        get_tree(tree);
 348        list_add(&chunk->owners[0].list, &tree->chunks);
 349        if (!tree->root) {
 350                tree->root = chunk;
 351                list_add(&tree->same_root, &chunk->trees);
 352        }
 353        insert_hash(chunk);
 354        spin_unlock(&hash_lock);
 355        spin_unlock(&entry->lock);
 356        return 0;
 357}
 358
 359/* the first tagged inode becomes root of tree */
 360static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 361{
 362        struct fsnotify_mark *old_entry, *chunk_entry;
 363        struct audit_tree *owner;
 364        struct audit_chunk *chunk, *old;
 365        struct node *p;
 366        int n;
 367
 368        old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
 369        if (!old_entry)
 370                return create_chunk(inode, tree);
 371
 372        old = container_of(old_entry, struct audit_chunk, mark);
 373
 374        /* are we already there? */
 375        spin_lock(&hash_lock);
 376        for (n = 0; n < old->count; n++) {
 377                if (old->owners[n].owner == tree) {
 378                        spin_unlock(&hash_lock);
 379                        fsnotify_put_mark(old_entry);
 380                        return 0;
 381                }
 382        }
 383        spin_unlock(&hash_lock);
 384
 385        chunk = alloc_chunk(old->count + 1);
 386        if (!chunk) {
 387                fsnotify_put_mark(old_entry);
 388                return -ENOMEM;
 389        }
 390
 391        chunk_entry = &chunk->mark;
 392
 393        spin_lock(&old_entry->lock);
 394        if (!old_entry->i.inode) {
 395                /* old_entry is being shot, lets just lie */
 396                spin_unlock(&old_entry->lock);
 397                fsnotify_put_mark(old_entry);
 398                free_chunk(chunk);
 399                return -ENOENT;
 400        }
 401
 402        fsnotify_duplicate_mark(chunk_entry, old_entry);
 403        if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
 404                spin_unlock(&old_entry->lock);
 405                free_chunk(chunk);
 406                fsnotify_put_mark(old_entry);
 407                return -ENOSPC;
 408        }
 409
 410        /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
 411        spin_lock(&chunk_entry->lock);
 412        spin_lock(&hash_lock);
 413
 414        /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
 415        if (tree->goner) {
 416                spin_unlock(&hash_lock);
 417                chunk->dead = 1;
 418                spin_unlock(&chunk_entry->lock);
 419                spin_unlock(&old_entry->lock);
 420
 421                fsnotify_destroy_mark(chunk_entry);
 422
 423                fsnotify_put_mark(chunk_entry);
 424                fsnotify_put_mark(old_entry);
 425                return 0;
 426        }
 427        list_replace_init(&old->trees, &chunk->trees);
 428        for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
 429                struct audit_tree *s = old->owners[n].owner;
 430                p->owner = s;
 431                p->index = old->owners[n].index;
 432                if (!s) /* result of fallback in untag */
 433                        continue;
 434                get_tree(s);
 435                list_replace_init(&old->owners[n].list, &p->list);
 436        }
 437        p->index = (chunk->count - 1) | (1U<<31);
 438        p->owner = tree;
 439        get_tree(tree);
 440        list_add(&p->list, &tree->chunks);
 441        list_replace_rcu(&old->hash, &chunk->hash);
 442        list_for_each_entry(owner, &chunk->trees, same_root)
 443                owner->root = chunk;
 444        old->dead = 1;
 445        if (!tree->root) {
 446                tree->root = chunk;
 447                list_add(&tree->same_root, &chunk->trees);
 448        }
 449        spin_unlock(&hash_lock);
 450        spin_unlock(&chunk_entry->lock);
 451        spin_unlock(&old_entry->lock);
 452        fsnotify_destroy_mark(old_entry);
 453        fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
 454        fsnotify_put_mark(old_entry); /* and kill it */
 455        return 0;
 456}
 457
 458static void kill_rules(struct audit_tree *tree)
 459{
 460        struct audit_krule *rule, *next;
 461        struct audit_entry *entry;
 462        struct audit_buffer *ab;
 463
 464        list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
 465                entry = container_of(rule, struct audit_entry, rule);
 466
 467                list_del_init(&rule->rlist);
 468                if (rule->tree) {
 469                        /* not a half-baked one */
 470                        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 471                        audit_log_format(ab, "op=");
 472                        audit_log_string(ab, "remove rule");
 473                        audit_log_format(ab, " dir=");
 474                        audit_log_untrustedstring(ab, rule->tree->pathname);
 475                        audit_log_key(ab, rule->filterkey);
 476                        audit_log_format(ab, " list=%d res=1", rule->listnr);
 477                        audit_log_end(ab);
 478                        rule->tree = NULL;
 479                        list_del_rcu(&entry->list);
 480                        list_del(&entry->rule.list);
 481                        call_rcu(&entry->rcu, audit_free_rule_rcu);
 482                }
 483        }
 484}
 485
 486/*
 487 * finish killing struct audit_tree
 488 */
 489static void prune_one(struct audit_tree *victim)
 490{
 491        spin_lock(&hash_lock);
 492        while (!list_empty(&victim->chunks)) {
 493                struct node *p;
 494
 495                p = list_entry(victim->chunks.next, struct node, list);
 496
 497                untag_chunk(p);
 498        }
 499        spin_unlock(&hash_lock);
 500        put_tree(victim);
 501}
 502
 503/* trim the uncommitted chunks from tree */
 504
 505static void trim_marked(struct audit_tree *tree)
 506{
 507        struct list_head *p, *q;
 508        spin_lock(&hash_lock);
 509        if (tree->goner) {
 510                spin_unlock(&hash_lock);
 511                return;
 512        }
 513        /* reorder */
 514        for (p = tree->chunks.next; p != &tree->chunks; p = q) {
 515                struct node *node = list_entry(p, struct node, list);
 516                q = p->next;
 517                if (node->index & (1U<<31)) {
 518                        list_del_init(p);
 519                        list_add(p, &tree->chunks);
 520                }
 521        }
 522
 523        while (!list_empty(&tree->chunks)) {
 524                struct node *node;
 525
 526                node = list_entry(tree->chunks.next, struct node, list);
 527
 528                /* have we run out of marked? */
 529                if (!(node->index & (1U<<31)))
 530                        break;
 531
 532                untag_chunk(node);
 533        }
 534        if (!tree->root && !tree->goner) {
 535                tree->goner = 1;
 536                spin_unlock(&hash_lock);
 537                mutex_lock(&audit_filter_mutex);
 538                kill_rules(tree);
 539                list_del_init(&tree->list);
 540                mutex_unlock(&audit_filter_mutex);
 541                prune_one(tree);
 542        } else {
 543                spin_unlock(&hash_lock);
 544        }
 545}
 546
 547static void audit_schedule_prune(void);
 548
 549/* called with audit_filter_mutex */
 550int audit_remove_tree_rule(struct audit_krule *rule)
 551{
 552        struct audit_tree *tree;
 553        tree = rule->tree;
 554        if (tree) {
 555                spin_lock(&hash_lock);
 556                list_del_init(&rule->rlist);
 557                if (list_empty(&tree->rules) && !tree->goner) {
 558                        tree->root = NULL;
 559                        list_del_init(&tree->same_root);
 560                        tree->goner = 1;
 561                        list_move(&tree->list, &prune_list);
 562                        rule->tree = NULL;
 563                        spin_unlock(&hash_lock);
 564                        audit_schedule_prune();
 565                        return 1;
 566                }
 567                rule->tree = NULL;
 568                spin_unlock(&hash_lock);
 569                return 1;
 570        }
 571        return 0;
 572}
 573
 574static int compare_root(struct vfsmount *mnt, void *arg)
 575{
 576        return mnt->mnt_root->d_inode == arg;
 577}
 578
 579void audit_trim_trees(void)
 580{
 581        struct list_head cursor;
 582
 583        mutex_lock(&audit_filter_mutex);
 584        list_add(&cursor, &tree_list);
 585        while (cursor.next != &tree_list) {
 586                struct audit_tree *tree;
 587                struct path path;
 588                struct vfsmount *root_mnt;
 589                struct node *node;
 590                int err;
 591
 592                tree = container_of(cursor.next, struct audit_tree, list);
 593                get_tree(tree);
 594                list_del(&cursor);
 595                list_add(&cursor, &tree->list);
 596                mutex_unlock(&audit_filter_mutex);
 597
 598                err = kern_path(tree->pathname, 0, &path);
 599                if (err)
 600                        goto skip_it;
 601
 602                root_mnt = collect_mounts(&path);
 603                path_put(&path);
 604                if (!root_mnt)
 605                        goto skip_it;
 606
 607                spin_lock(&hash_lock);
 608                list_for_each_entry(node, &tree->chunks, list) {
 609                        struct audit_chunk *chunk = find_chunk(node);
 610                        /* this could be NULL if the watch is dieing else where... */
 611                        struct inode *inode = chunk->mark.i.inode;
 612                        node->index |= 1U<<31;
 613                        if (iterate_mounts(compare_root, inode, root_mnt))
 614                                node->index &= ~(1U<<31);
 615                }
 616                spin_unlock(&hash_lock);
 617                trim_marked(tree);
 618                put_tree(tree);
 619                drop_collected_mounts(root_mnt);
 620skip_it:
 621                mutex_lock(&audit_filter_mutex);
 622        }
 623        list_del(&cursor);
 624        mutex_unlock(&audit_filter_mutex);
 625}
 626
 627int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
 628{
 629
 630        if (pathname[0] != '/' ||
 631            rule->listnr != AUDIT_FILTER_EXIT ||
 632            op != Audit_equal ||
 633            rule->inode_f || rule->watch || rule->tree)
 634                return -EINVAL;
 635        rule->tree = alloc_tree(pathname);
 636        if (!rule->tree)
 637                return -ENOMEM;
 638        return 0;
 639}
 640
 641void audit_put_tree(struct audit_tree *tree)
 642{
 643        put_tree(tree);
 644}
 645
 646static int tag_mount(struct vfsmount *mnt, void *arg)
 647{
 648        return tag_chunk(mnt->mnt_root->d_inode, arg);
 649}
 650
 651/* called with audit_filter_mutex */
 652int audit_add_tree_rule(struct audit_krule *rule)
 653{
 654        struct audit_tree *seed = rule->tree, *tree;
 655        struct path path;
 656        struct vfsmount *mnt;
 657        int err;
 658
 659        list_for_each_entry(tree, &tree_list, list) {
 660                if (!strcmp(seed->pathname, tree->pathname)) {
 661                        put_tree(seed);
 662                        rule->tree = tree;
 663                        list_add(&rule->rlist, &tree->rules);
 664                        return 0;
 665                }
 666        }
 667        tree = seed;
 668        list_add(&tree->list, &tree_list);
 669        list_add(&rule->rlist, &tree->rules);
 670        /* do not set rule->tree yet */
 671        mutex_unlock(&audit_filter_mutex);
 672
 673        err = kern_path(tree->pathname, 0, &path);
 674        if (err)
 675                goto Err;
 676        mnt = collect_mounts(&path);
 677        path_put(&path);
 678        if (!mnt) {
 679                err = -ENOMEM;
 680                goto Err;
 681        }
 682
 683        get_tree(tree);
 684        err = iterate_mounts(tag_mount, tree, mnt);
 685        drop_collected_mounts(mnt);
 686
 687        if (!err) {
 688                struct node *node;
 689                spin_lock(&hash_lock);
 690                list_for_each_entry(node, &tree->chunks, list)
 691                        node->index &= ~(1U<<31);
 692                spin_unlock(&hash_lock);
 693        } else {
 694                trim_marked(tree);
 695                goto Err;
 696        }
 697
 698        mutex_lock(&audit_filter_mutex);
 699        if (list_empty(&rule->rlist)) {
 700                put_tree(tree);
 701                return -ENOENT;
 702        }
 703        rule->tree = tree;
 704        put_tree(tree);
 705
 706        return 0;
 707Err:
 708        mutex_lock(&audit_filter_mutex);
 709        list_del_init(&tree->list);
 710        list_del_init(&tree->rules);
 711        put_tree(tree);
 712        return err;
 713}
 714
 715int audit_tag_tree(char *old, char *new)
 716{
 717        struct list_head cursor, barrier;
 718        int failed = 0;
 719        struct path path1, path2;
 720        struct vfsmount *tagged;
 721        int err;
 722
 723        err = kern_path(new, 0, &path2);
 724        if (err)
 725                return err;
 726        tagged = collect_mounts(&path2);
 727        path_put(&path2);
 728        if (!tagged)
 729                return -ENOMEM;
 730
 731        err = kern_path(old, 0, &path1);
 732        if (err) {
 733                drop_collected_mounts(tagged);
 734                return err;
 735        }
 736
 737        mutex_lock(&audit_filter_mutex);
 738        list_add(&barrier, &tree_list);
 739        list_add(&cursor, &barrier);
 740
 741        while (cursor.next != &tree_list) {
 742                struct audit_tree *tree;
 743                int good_one = 0;
 744
 745                tree = container_of(cursor.next, struct audit_tree, list);
 746                get_tree(tree);
 747                list_del(&cursor);
 748                list_add(&cursor, &tree->list);
 749                mutex_unlock(&audit_filter_mutex);
 750
 751                err = kern_path(tree->pathname, 0, &path2);
 752                if (!err) {
 753                        good_one = path_is_under(&path1, &path2);
 754                        path_put(&path2);
 755                }
 756
 757                if (!good_one) {
 758                        put_tree(tree);
 759                        mutex_lock(&audit_filter_mutex);
 760                        continue;
 761                }
 762
 763                failed = iterate_mounts(tag_mount, tree, tagged);
 764                if (failed) {
 765                        put_tree(tree);
 766                        mutex_lock(&audit_filter_mutex);
 767                        break;
 768                }
 769
 770                mutex_lock(&audit_filter_mutex);
 771                spin_lock(&hash_lock);
 772                if (!tree->goner) {
 773                        list_del(&tree->list);
 774                        list_add(&tree->list, &tree_list);
 775                }
 776                spin_unlock(&hash_lock);
 777                put_tree(tree);
 778        }
 779
 780        while (barrier.prev != &tree_list) {
 781                struct audit_tree *tree;
 782
 783                tree = container_of(barrier.prev, struct audit_tree, list);
 784                get_tree(tree);
 785                list_del(&tree->list);
 786                list_add(&tree->list, &barrier);
 787                mutex_unlock(&audit_filter_mutex);
 788
 789                if (!failed) {
 790                        struct node *node;
 791                        spin_lock(&hash_lock);
 792                        list_for_each_entry(node, &tree->chunks, list)
 793                                node->index &= ~(1U<<31);
 794                        spin_unlock(&hash_lock);
 795                } else {
 796                        trim_marked(tree);
 797                }
 798
 799                put_tree(tree);
 800                mutex_lock(&audit_filter_mutex);
 801        }
 802        list_del(&barrier);
 803        list_del(&cursor);
 804        mutex_unlock(&audit_filter_mutex);
 805        path_put(&path1);
 806        drop_collected_mounts(tagged);
 807        return failed;
 808}
 809
 810/*
 811 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 812 * Runs from a separate thread.
 813 */
 814static int prune_tree_thread(void *unused)
 815{
 816        mutex_lock(&audit_cmd_mutex);
 817        mutex_lock(&audit_filter_mutex);
 818
 819        while (!list_empty(&prune_list)) {
 820                struct audit_tree *victim;
 821
 822                victim = list_entry(prune_list.next, struct audit_tree, list);
 823                list_del_init(&victim->list);
 824
 825                mutex_unlock(&audit_filter_mutex);
 826
 827                prune_one(victim);
 828
 829                mutex_lock(&audit_filter_mutex);
 830        }
 831
 832        mutex_unlock(&audit_filter_mutex);
 833        mutex_unlock(&audit_cmd_mutex);
 834        return 0;
 835}
 836
 837static void audit_schedule_prune(void)
 838{
 839        kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
 840}
 841
 842/*
 843 * ... and that one is done if evict_chunk() decides to delay until the end
 844 * of syscall.  Runs synchronously.
 845 */
 846void audit_kill_trees(struct list_head *list)
 847{
 848        mutex_lock(&audit_cmd_mutex);
 849        mutex_lock(&audit_filter_mutex);
 850
 851        while (!list_empty(list)) {
 852                struct audit_tree *victim;
 853
 854                victim = list_entry(list->next, struct audit_tree, list);
 855                kill_rules(victim);
 856                list_del_init(&victim->list);
 857
 858                mutex_unlock(&audit_filter_mutex);
 859
 860                prune_one(victim);
 861
 862                mutex_lock(&audit_filter_mutex);
 863        }
 864
 865        mutex_unlock(&audit_filter_mutex);
 866        mutex_unlock(&audit_cmd_mutex);
 867}
 868
 869/*
 870 *  Here comes the stuff asynchronous to auditctl operations
 871 */
 872
 873static void evict_chunk(struct audit_chunk *chunk)
 874{
 875        struct audit_tree *owner;
 876        struct list_head *postponed = audit_killed_trees();
 877        int need_prune = 0;
 878        int n;
 879
 880        if (chunk->dead)
 881                return;
 882
 883        chunk->dead = 1;
 884        mutex_lock(&audit_filter_mutex);
 885        spin_lock(&hash_lock);
 886        while (!list_empty(&chunk->trees)) {
 887                owner = list_entry(chunk->trees.next,
 888                                   struct audit_tree, same_root);
 889                owner->goner = 1;
 890                owner->root = NULL;
 891                list_del_init(&owner->same_root);
 892                spin_unlock(&hash_lock);
 893                if (!postponed) {
 894                        kill_rules(owner);
 895                        list_move(&owner->list, &prune_list);
 896                        need_prune = 1;
 897                } else {
 898                        list_move(&owner->list, postponed);
 899                }
 900                spin_lock(&hash_lock);
 901        }
 902        list_del_rcu(&chunk->hash);
 903        for (n = 0; n < chunk->count; n++)
 904                list_del_init(&chunk->owners[n].list);
 905        spin_unlock(&hash_lock);
 906        if (need_prune)
 907                audit_schedule_prune();
 908        mutex_unlock(&audit_filter_mutex);
 909}
 910
 911static int audit_tree_handle_event(struct fsnotify_group *group,
 912                                   struct fsnotify_mark *inode_mark,
 913                                   struct fsnotify_mark *vfsmonut_mark,
 914                                   struct fsnotify_event *event)
 915{
 916        BUG();
 917        return -EOPNOTSUPP;
 918}
 919
 920static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
 921{
 922        struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
 923
 924        evict_chunk(chunk);
 925        fsnotify_put_mark(entry);
 926}
 927
 928static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
 929                                  struct fsnotify_mark *inode_mark,
 930                                  struct fsnotify_mark *vfsmount_mark,
 931                                  __u32 mask, void *data, int data_type)
 932{
 933        return false;
 934}
 935
 936static const struct fsnotify_ops audit_tree_ops = {
 937        .handle_event = audit_tree_handle_event,
 938        .should_send_event = audit_tree_send_event,
 939        .free_group_priv = NULL,
 940        .free_event_priv = NULL,
 941        .freeing_mark = audit_tree_freeing_mark,
 942};
 943
 944static int __init audit_tree_init(void)
 945{
 946        int i;
 947
 948        audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
 949        if (IS_ERR(audit_tree_group))
 950                audit_panic("cannot initialize fsnotify group for rectree watches");
 951
 952        for (i = 0; i < HASH_SIZE; i++)
 953                INIT_LIST_HEAD(&chunk_hash_heads[i]);
 954
 955        return 0;
 956}
 957__initcall(audit_tree_init);
 958