linux/security/selinux/ss/sidtab.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Implementation of the SID table type.
   4 *
   5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
   6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
   7 *
   8 * Copyright (C) 2018 Red Hat, Inc.
   9 */
  10#include <linux/errno.h>
  11#include <linux/kernel.h>
  12#include <linux/list.h>
  13#include <linux/rcupdate.h>
  14#include <linux/slab.h>
  15#include <linux/sched.h>
  16#include <linux/spinlock.h>
  17#include <asm/barrier.h>
  18#include "flask.h"
  19#include "security.h"
  20#include "sidtab.h"
  21
  22struct sidtab_str_cache {
  23        struct rcu_head rcu_member;
  24        struct list_head lru_member;
  25        struct sidtab_entry *parent;
  26        u32 len;
  27        char str[];
  28};
  29
  30#define index_to_sid(index) (index + SECINITSID_NUM + 1)
  31#define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
  32
  33int sidtab_init(struct sidtab *s)
  34{
  35        u32 i;
  36
  37        memset(s->roots, 0, sizeof(s->roots));
  38
  39        for (i = 0; i < SECINITSID_NUM; i++)
  40                s->isids[i].set = 0;
  41
  42        s->frozen = false;
  43        s->count = 0;
  44        s->convert = NULL;
  45        hash_init(s->context_to_sid);
  46
  47        spin_lock_init(&s->lock);
  48
  49#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
  50        s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
  51        INIT_LIST_HEAD(&s->cache_lru_list);
  52        spin_lock_init(&s->cache_lock);
  53#endif
  54
  55        return 0;
  56}
  57
  58static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
  59{
  60        struct sidtab_entry *entry;
  61        u32 sid = 0;
  62
  63        rcu_read_lock();
  64        hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
  65                if (entry->hash != hash)
  66                        continue;
  67                if (context_cmp(&entry->context, context)) {
  68                        sid = entry->sid;
  69                        break;
  70                }
  71        }
  72        rcu_read_unlock();
  73        return sid;
  74}
  75
  76int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
  77{
  78        struct sidtab_isid_entry *isid;
  79        u32 hash;
  80        int rc;
  81
  82        if (sid == 0 || sid > SECINITSID_NUM)
  83                return -EINVAL;
  84
  85        isid = &s->isids[sid - 1];
  86
  87        rc = context_cpy(&isid->entry.context, context);
  88        if (rc)
  89                return rc;
  90
  91#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
  92        isid->entry.cache = NULL;
  93#endif
  94        isid->set = 1;
  95
  96        hash = context_compute_hash(context);
  97
  98        /*
  99         * Multiple initial sids may map to the same context. Check that this
 100         * context is not already represented in the context_to_sid hashtable
 101         * to avoid duplicate entries and long linked lists upon hash
 102         * collision.
 103         */
 104        if (!context_to_sid(s, context, hash)) {
 105                isid->entry.sid = sid;
 106                isid->entry.hash = hash;
 107                hash_add(s->context_to_sid, &isid->entry.list, hash);
 108        }
 109
 110        return 0;
 111}
 112
 113int sidtab_hash_stats(struct sidtab *sidtab, char *page)
 114{
 115        int i;
 116        int chain_len = 0;
 117        int slots_used = 0;
 118        int entries = 0;
 119        int max_chain_len = 0;
 120        int cur_bucket = 0;
 121        struct sidtab_entry *entry;
 122
 123        rcu_read_lock();
 124        hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
 125                entries++;
 126                if (i == cur_bucket) {
 127                        chain_len++;
 128                        if (chain_len == 1)
 129                                slots_used++;
 130                } else {
 131                        cur_bucket = i;
 132                        if (chain_len > max_chain_len)
 133                                max_chain_len = chain_len;
 134                        chain_len = 0;
 135                }
 136        }
 137        rcu_read_unlock();
 138
 139        if (chain_len > max_chain_len)
 140                max_chain_len = chain_len;
 141
 142        return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
 143                         "longest chain: %d\n", entries,
 144                         slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
 145}
 146
 147static u32 sidtab_level_from_count(u32 count)
 148{
 149        u32 capacity = SIDTAB_LEAF_ENTRIES;
 150        u32 level = 0;
 151
 152        while (count > capacity) {
 153                capacity <<= SIDTAB_INNER_SHIFT;
 154                ++level;
 155        }
 156        return level;
 157}
 158
 159static int sidtab_alloc_roots(struct sidtab *s, u32 level)
 160{
 161        u32 l;
 162
 163        if (!s->roots[0].ptr_leaf) {
 164                s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
 165                                               GFP_ATOMIC);
 166                if (!s->roots[0].ptr_leaf)
 167                        return -ENOMEM;
 168        }
 169        for (l = 1; l <= level; ++l)
 170                if (!s->roots[l].ptr_inner) {
 171                        s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
 172                                                        GFP_ATOMIC);
 173                        if (!s->roots[l].ptr_inner)
 174                                return -ENOMEM;
 175                        s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
 176                }
 177        return 0;
 178}
 179
 180static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
 181                                             int alloc)
 182{
 183        union sidtab_entry_inner *entry;
 184        u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
 185
 186        /* find the level of the subtree we need */
 187        level = sidtab_level_from_count(index + 1);
 188        capacity_shift = level * SIDTAB_INNER_SHIFT;
 189
 190        /* allocate roots if needed */
 191        if (alloc && sidtab_alloc_roots(s, level) != 0)
 192                return NULL;
 193
 194        /* lookup inside the subtree */
 195        entry = &s->roots[level];
 196        while (level != 0) {
 197                capacity_shift -= SIDTAB_INNER_SHIFT;
 198                --level;
 199
 200                entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
 201                leaf_index &= ((u32)1 << capacity_shift) - 1;
 202
 203                if (!entry->ptr_inner) {
 204                        if (alloc)
 205                                entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
 206                                                           GFP_ATOMIC);
 207                        if (!entry->ptr_inner)
 208                                return NULL;
 209                }
 210        }
 211        if (!entry->ptr_leaf) {
 212                if (alloc)
 213                        entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
 214                                                  GFP_ATOMIC);
 215                if (!entry->ptr_leaf)
 216                        return NULL;
 217        }
 218        return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
 219}
 220
 221static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
 222{
 223        /* read entries only after reading count */
 224        u32 count = smp_load_acquire(&s->count);
 225
 226        if (index >= count)
 227                return NULL;
 228
 229        return sidtab_do_lookup(s, index, 0);
 230}
 231
 232static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
 233{
 234        return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
 235}
 236
 237static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
 238                                               int force)
 239{
 240        if (sid != 0) {
 241                struct sidtab_entry *entry;
 242
 243                if (sid > SECINITSID_NUM)
 244                        entry = sidtab_lookup(s, sid_to_index(sid));
 245                else
 246                        entry = sidtab_lookup_initial(s, sid);
 247                if (entry && (!entry->context.len || force))
 248                        return entry;
 249        }
 250
 251        return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
 252}
 253
 254struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
 255{
 256        return sidtab_search_core(s, sid, 0);
 257}
 258
 259struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
 260{
 261        return sidtab_search_core(s, sid, 1);
 262}
 263
 264int sidtab_context_to_sid(struct sidtab *s, struct context *context,
 265                          u32 *sid)
 266{
 267        unsigned long flags;
 268        u32 count, hash = context_compute_hash(context);
 269        struct sidtab_convert_params *convert;
 270        struct sidtab_entry *dst, *dst_convert;
 271        int rc;
 272
 273        *sid = context_to_sid(s, context, hash);
 274        if (*sid)
 275                return 0;
 276
 277        /* lock-free search failed: lock, re-search, and insert if not found */
 278        spin_lock_irqsave(&s->lock, flags);
 279
 280        rc = 0;
 281        *sid = context_to_sid(s, context, hash);
 282        if (*sid)
 283                goto out_unlock;
 284
 285        if (unlikely(s->frozen)) {
 286                /*
 287                 * This sidtab is now frozen - tell the caller to abort and
 288                 * get the new one.
 289                 */
 290                rc = -ESTALE;
 291                goto out_unlock;
 292        }
 293
 294        count = s->count;
 295        convert = s->convert;
 296
 297        /* bail out if we already reached max entries */
 298        rc = -EOVERFLOW;
 299        if (count >= SIDTAB_MAX)
 300                goto out_unlock;
 301
 302        /* insert context into new entry */
 303        rc = -ENOMEM;
 304        dst = sidtab_do_lookup(s, count, 1);
 305        if (!dst)
 306                goto out_unlock;
 307
 308        dst->sid = index_to_sid(count);
 309        dst->hash = hash;
 310
 311        rc = context_cpy(&dst->context, context);
 312        if (rc)
 313                goto out_unlock;
 314
 315        /*
 316         * if we are building a new sidtab, we need to convert the context
 317         * and insert it there as well
 318         */
 319        if (convert) {
 320                rc = -ENOMEM;
 321                dst_convert = sidtab_do_lookup(convert->target, count, 1);
 322                if (!dst_convert) {
 323                        context_destroy(&dst->context);
 324                        goto out_unlock;
 325                }
 326
 327                rc = convert->func(context, &dst_convert->context,
 328                                   convert->args);
 329                if (rc) {
 330                        context_destroy(&dst->context);
 331                        goto out_unlock;
 332                }
 333                dst_convert->sid = index_to_sid(count);
 334                dst_convert->hash = context_compute_hash(&dst_convert->context);
 335                convert->target->count = count + 1;
 336
 337                hash_add_rcu(convert->target->context_to_sid,
 338                             &dst_convert->list, dst_convert->hash);
 339        }
 340
 341        if (context->len)
 342                pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
 343                        context->str);
 344
 345        *sid = index_to_sid(count);
 346
 347        /* write entries before updating count */
 348        smp_store_release(&s->count, count + 1);
 349        hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
 350
 351        rc = 0;
 352out_unlock:
 353        spin_unlock_irqrestore(&s->lock, flags);
 354        return rc;
 355}
 356
 357static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
 358{
 359        struct sidtab_entry *entry;
 360        u32 i;
 361
 362        for (i = 0; i < count; i++) {
 363                entry = sidtab_do_lookup(s, i, 0);
 364                entry->sid = index_to_sid(i);
 365                entry->hash = context_compute_hash(&entry->context);
 366
 367                hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
 368        }
 369}
 370
 371static int sidtab_convert_tree(union sidtab_entry_inner *edst,
 372                               union sidtab_entry_inner *esrc,
 373                               u32 *pos, u32 count, u32 level,
 374                               struct sidtab_convert_params *convert)
 375{
 376        int rc;
 377        u32 i;
 378
 379        if (level != 0) {
 380                if (!edst->ptr_inner) {
 381                        edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
 382                                                  GFP_KERNEL);
 383                        if (!edst->ptr_inner)
 384                                return -ENOMEM;
 385                }
 386                i = 0;
 387                while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
 388                        rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
 389                                                 &esrc->ptr_inner->entries[i],
 390                                                 pos, count, level - 1,
 391                                                 convert);
 392                        if (rc)
 393                                return rc;
 394                        i++;
 395                }
 396        } else {
 397                if (!edst->ptr_leaf) {
 398                        edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
 399                                                 GFP_KERNEL);
 400                        if (!edst->ptr_leaf)
 401                                return -ENOMEM;
 402                }
 403                i = 0;
 404                while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
 405                        rc = convert->func(&esrc->ptr_leaf->entries[i].context,
 406                                           &edst->ptr_leaf->entries[i].context,
 407                                           convert->args);
 408                        if (rc)
 409                                return rc;
 410                        (*pos)++;
 411                        i++;
 412                }
 413                cond_resched();
 414        }
 415        return 0;
 416}
 417
 418int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
 419{
 420        unsigned long flags;
 421        u32 count, level, pos;
 422        int rc;
 423
 424        spin_lock_irqsave(&s->lock, flags);
 425
 426        /* concurrent policy loads are not allowed */
 427        if (s->convert) {
 428                spin_unlock_irqrestore(&s->lock, flags);
 429                return -EBUSY;
 430        }
 431
 432        count = s->count;
 433        level = sidtab_level_from_count(count);
 434
 435        /* allocate last leaf in the new sidtab (to avoid race with
 436         * live convert)
 437         */
 438        rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
 439        if (rc) {
 440                spin_unlock_irqrestore(&s->lock, flags);
 441                return rc;
 442        }
 443
 444        /* set count in case no new entries are added during conversion */
 445        params->target->count = count;
 446
 447        /* enable live convert of new entries */
 448        s->convert = params;
 449
 450        /* we can safely convert the tree outside the lock */
 451        spin_unlock_irqrestore(&s->lock, flags);
 452
 453        pr_info("SELinux:  Converting %u SID table entries...\n", count);
 454
 455        /* convert all entries not covered by live convert */
 456        pos = 0;
 457        rc = sidtab_convert_tree(&params->target->roots[level],
 458                                 &s->roots[level], &pos, count, level, params);
 459        if (rc) {
 460                /* we need to keep the old table - disable live convert */
 461                spin_lock_irqsave(&s->lock, flags);
 462                s->convert = NULL;
 463                spin_unlock_irqrestore(&s->lock, flags);
 464                return rc;
 465        }
 466        /*
 467         * The hashtable can also be modified in sidtab_context_to_sid()
 468         * so we must re-acquire the lock here.
 469         */
 470        spin_lock_irqsave(&s->lock, flags);
 471        sidtab_convert_hashtable(params->target, count);
 472        spin_unlock_irqrestore(&s->lock, flags);
 473
 474        return 0;
 475}
 476
 477void sidtab_cancel_convert(struct sidtab *s)
 478{
 479        unsigned long flags;
 480
 481        /* cancelling policy load - disable live convert of sidtab */
 482        spin_lock_irqsave(&s->lock, flags);
 483        s->convert = NULL;
 484        spin_unlock_irqrestore(&s->lock, flags);
 485}
 486
 487void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
 488{
 489        spin_lock_irqsave(&s->lock, *flags);
 490        s->frozen = true;
 491        s->convert = NULL;
 492}
 493void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
 494{
 495        spin_unlock_irqrestore(&s->lock, *flags);
 496}
 497
 498static void sidtab_destroy_entry(struct sidtab_entry *entry)
 499{
 500        context_destroy(&entry->context);
 501#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
 502        kfree(rcu_dereference_raw(entry->cache));
 503#endif
 504}
 505
 506static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
 507{
 508        u32 i;
 509
 510        if (level != 0) {
 511                struct sidtab_node_inner *node = entry.ptr_inner;
 512
 513                if (!node)
 514                        return;
 515
 516                for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
 517                        sidtab_destroy_tree(node->entries[i], level - 1);
 518                kfree(node);
 519        } else {
 520                struct sidtab_node_leaf *node = entry.ptr_leaf;
 521
 522                if (!node)
 523                        return;
 524
 525                for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
 526                        sidtab_destroy_entry(&node->entries[i]);
 527                kfree(node);
 528        }
 529}
 530
 531void sidtab_destroy(struct sidtab *s)
 532{
 533        u32 i, level;
 534
 535        for (i = 0; i < SECINITSID_NUM; i++)
 536                if (s->isids[i].set)
 537                        sidtab_destroy_entry(&s->isids[i].entry);
 538
 539        level = SIDTAB_MAX_LEVEL;
 540        while (level && !s->roots[level].ptr_inner)
 541                --level;
 542
 543        sidtab_destroy_tree(s->roots[level], level);
 544        /*
 545         * The context_to_sid hashtable's objects are all shared
 546         * with the isids array and context tree, and so don't need
 547         * to be cleaned up here.
 548         */
 549}
 550
 551#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
 552
 553void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
 554                        const char *str, u32 str_len)
 555{
 556        struct sidtab_str_cache *cache, *victim = NULL;
 557        unsigned long flags;
 558
 559        /* do not cache invalid contexts */
 560        if (entry->context.len)
 561                return;
 562
 563        spin_lock_irqsave(&s->cache_lock, flags);
 564
 565        cache = rcu_dereference_protected(entry->cache,
 566                                          lockdep_is_held(&s->cache_lock));
 567        if (cache) {
 568                /* entry in cache - just bump to the head of LRU list */
 569                list_move(&cache->lru_member, &s->cache_lru_list);
 570                goto out_unlock;
 571        }
 572
 573        cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
 574        if (!cache)
 575                goto out_unlock;
 576
 577        if (s->cache_free_slots == 0) {
 578                /* pop a cache entry from the tail and free it */
 579                victim = container_of(s->cache_lru_list.prev,
 580                                      struct sidtab_str_cache, lru_member);
 581                list_del(&victim->lru_member);
 582                rcu_assign_pointer(victim->parent->cache, NULL);
 583        } else {
 584                s->cache_free_slots--;
 585        }
 586        cache->parent = entry;
 587        cache->len = str_len;
 588        memcpy(cache->str, str, str_len);
 589        list_add(&cache->lru_member, &s->cache_lru_list);
 590
 591        rcu_assign_pointer(entry->cache, cache);
 592
 593out_unlock:
 594        spin_unlock_irqrestore(&s->cache_lock, flags);
 595        kfree_rcu(victim, rcu_member);
 596}
 597
 598int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
 599                       char **out, u32 *out_len)
 600{
 601        struct sidtab_str_cache *cache;
 602        int rc = 0;
 603
 604        if (entry->context.len)
 605                return -ENOENT; /* do not cache invalid contexts */
 606
 607        rcu_read_lock();
 608
 609        cache = rcu_dereference(entry->cache);
 610        if (!cache) {
 611                rc = -ENOENT;
 612        } else {
 613                *out_len = cache->len;
 614                if (out) {
 615                        *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
 616                        if (!*out)
 617                                rc = -ENOMEM;
 618                }
 619        }
 620
 621        rcu_read_unlock();
 622
 623        if (!rc && out)
 624                sidtab_sid2str_put(s, entry, *out, *out_len);
 625        return rc;
 626}
 627
 628#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
 629