linux/security/selinux/avc.c
<<
>>
Prefs
   1/*
   2 * Implementation of the kernel access vector cache (AVC).
   3 *
   4 * Authors:  Stephen Smalley, <sds@tycho.nsa.gov>
   5 *           James Morris <jmorris@redhat.com>
   6 *
   7 * Update:   KaiGai, Kohei <kaigai@ak.jp.nec.com>
   8 *      Replaced the avc_lock spinlock by RCU.
   9 *
  10 * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  11 *
  12 *      This program is free software; you can redistribute it and/or modify
  13 *      it under the terms of the GNU General Public License version 2,
  14 *      as published by the Free Software Foundation.
  15 */
  16#include <linux/types.h>
  17#include <linux/stddef.h>
  18#include <linux/kernel.h>
  19#include <linux/slab.h>
  20#include <linux/fs.h>
  21#include <linux/dcache.h>
  22#include <linux/init.h>
  23#include <linux/skbuff.h>
  24#include <linux/percpu.h>
  25#include <linux/list.h>
  26#include <net/sock.h>
  27#include <linux/un.h>
  28#include <net/af_unix.h>
  29#include <linux/ip.h>
  30#include <linux/audit.h>
  31#include <linux/ipv6.h>
  32#include <net/ipv6.h>
  33#include "avc.h"
  34#include "avc_ss.h"
  35#include "classmap.h"
  36
  37#define AVC_CACHE_SLOTS                 512
  38#define AVC_DEF_CACHE_THRESHOLD         512
  39#define AVC_CACHE_RECLAIM               16
  40
  41#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
  42#define avc_cache_stats_incr(field)     this_cpu_inc(avc_cache_stats.field)
  43#else
  44#define avc_cache_stats_incr(field)     do {} while (0)
  45#endif
  46
  47struct avc_entry {
  48        u32                     ssid;
  49        u32                     tsid;
  50        u16                     tclass;
  51        struct av_decision      avd;
  52        struct avc_xperms_node  *xp_node;
  53};
  54
  55struct avc_node {
  56        struct avc_entry        ae;
  57        struct hlist_node       list; /* anchored in avc_cache->slots[i] */
  58        struct rcu_head         rhead;
  59};
  60
  61struct avc_xperms_decision_node {
  62        struct extended_perms_decision xpd;
  63        struct list_head xpd_list; /* list of extended_perms_decision */
  64};
  65
  66struct avc_xperms_node {
  67        struct extended_perms xp;
  68        struct list_head xpd_head; /* list head of extended_perms_decision */
  69};
  70
  71struct avc_cache {
  72        struct hlist_head       slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
  73        spinlock_t              slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
  74        atomic_t                lru_hint;       /* LRU hint for reclaim scan */
  75        atomic_t                active_nodes;
  76        u32                     latest_notif;   /* latest revocation notification */
  77};
  78
  79struct avc_callback_node {
  80        int (*callback) (u32 event);
  81        u32 events;
  82        struct avc_callback_node *next;
  83};
  84
  85#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
  86DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
  87#endif
  88
  89struct selinux_avc {
  90        unsigned int avc_cache_threshold;
  91        struct avc_cache avc_cache;
  92};
  93
  94static struct selinux_avc selinux_avc;
  95
  96void selinux_avc_init(struct selinux_avc **avc)
  97{
  98        int i;
  99
 100        selinux_avc.avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
 101        for (i = 0; i < AVC_CACHE_SLOTS; i++) {
 102                INIT_HLIST_HEAD(&selinux_avc.avc_cache.slots[i]);
 103                spin_lock_init(&selinux_avc.avc_cache.slots_lock[i]);
 104        }
 105        atomic_set(&selinux_avc.avc_cache.active_nodes, 0);
 106        atomic_set(&selinux_avc.avc_cache.lru_hint, 0);
 107        *avc = &selinux_avc;
 108}
 109
 110unsigned int avc_get_cache_threshold(struct selinux_avc *avc)
 111{
 112        return avc->avc_cache_threshold;
 113}
 114
 115void avc_set_cache_threshold(struct selinux_avc *avc,
 116                             unsigned int cache_threshold)
 117{
 118        avc->avc_cache_threshold = cache_threshold;
 119}
 120
 121static struct avc_callback_node *avc_callbacks;
 122static struct kmem_cache *avc_node_cachep;
 123static struct kmem_cache *avc_xperms_data_cachep;
 124static struct kmem_cache *avc_xperms_decision_cachep;
 125static struct kmem_cache *avc_xperms_cachep;
 126
 127static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
 128{
 129        return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
 130}
 131
 132/**
 133 * avc_init - Initialize the AVC.
 134 *
 135 * Initialize the access vector cache.
 136 */
 137void __init avc_init(void)
 138{
 139        avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
 140                                        0, SLAB_PANIC, NULL);
 141        avc_xperms_cachep = kmem_cache_create("avc_xperms_node",
 142                                        sizeof(struct avc_xperms_node),
 143                                        0, SLAB_PANIC, NULL);
 144        avc_xperms_decision_cachep = kmem_cache_create(
 145                                        "avc_xperms_decision_node",
 146                                        sizeof(struct avc_xperms_decision_node),
 147                                        0, SLAB_PANIC, NULL);
 148        avc_xperms_data_cachep = kmem_cache_create("avc_xperms_data",
 149                                        sizeof(struct extended_perms_data),
 150                                        0, SLAB_PANIC, NULL);
 151}
 152
 153int avc_get_hash_stats(struct selinux_avc *avc, char *page)
 154{
 155        int i, chain_len, max_chain_len, slots_used;
 156        struct avc_node *node;
 157        struct hlist_head *head;
 158
 159        rcu_read_lock();
 160
 161        slots_used = 0;
 162        max_chain_len = 0;
 163        for (i = 0; i < AVC_CACHE_SLOTS; i++) {
 164                head = &avc->avc_cache.slots[i];
 165                if (!hlist_empty(head)) {
 166                        slots_used++;
 167                        chain_len = 0;
 168                        hlist_for_each_entry_rcu(node, head, list)
 169                                chain_len++;
 170                        if (chain_len > max_chain_len)
 171                                max_chain_len = chain_len;
 172                }
 173        }
 174
 175        rcu_read_unlock();
 176
 177        return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
 178                         "longest chain: %d\n",
 179                         atomic_read(&avc->avc_cache.active_nodes),
 180                         slots_used, AVC_CACHE_SLOTS, max_chain_len);
 181}
 182
 183/*
 184 * using a linked list for extended_perms_decision lookup because the list is
 185 * always small. i.e. less than 5, typically 1
 186 */
 187static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver,
 188                                        struct avc_xperms_node *xp_node)
 189{
 190        struct avc_xperms_decision_node *xpd_node;
 191
 192        list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) {
 193                if (xpd_node->xpd.driver == driver)
 194                        return &xpd_node->xpd;
 195        }
 196        return NULL;
 197}
 198
 199static inline unsigned int
 200avc_xperms_has_perm(struct extended_perms_decision *xpd,
 201                                        u8 perm, u8 which)
 202{
 203        unsigned int rc = 0;
 204
 205        if ((which == XPERMS_ALLOWED) &&
 206                        (xpd->used & XPERMS_ALLOWED))
 207                rc = security_xperm_test(xpd->allowed->p, perm);
 208        else if ((which == XPERMS_AUDITALLOW) &&
 209                        (xpd->used & XPERMS_AUDITALLOW))
 210                rc = security_xperm_test(xpd->auditallow->p, perm);
 211        else if ((which == XPERMS_DONTAUDIT) &&
 212                        (xpd->used & XPERMS_DONTAUDIT))
 213                rc = security_xperm_test(xpd->dontaudit->p, perm);
 214        return rc;
 215}
 216
 217static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node,
 218                                u8 driver, u8 perm)
 219{
 220        struct extended_perms_decision *xpd;
 221        security_xperm_set(xp_node->xp.drivers.p, driver);
 222        xpd = avc_xperms_decision_lookup(driver, xp_node);
 223        if (xpd && xpd->allowed)
 224                security_xperm_set(xpd->allowed->p, perm);
 225}
 226
 227static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node)
 228{
 229        struct extended_perms_decision *xpd;
 230
 231        xpd = &xpd_node->xpd;
 232        if (xpd->allowed)
 233                kmem_cache_free(avc_xperms_data_cachep, xpd->allowed);
 234        if (xpd->auditallow)
 235                kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow);
 236        if (xpd->dontaudit)
 237                kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit);
 238        kmem_cache_free(avc_xperms_decision_cachep, xpd_node);
 239}
 240
 241static void avc_xperms_free(struct avc_xperms_node *xp_node)
 242{
 243        struct avc_xperms_decision_node *xpd_node, *tmp;
 244
 245        if (!xp_node)
 246                return;
 247
 248        list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) {
 249                list_del(&xpd_node->xpd_list);
 250                avc_xperms_decision_free(xpd_node);
 251        }
 252        kmem_cache_free(avc_xperms_cachep, xp_node);
 253}
 254
 255static void avc_copy_xperms_decision(struct extended_perms_decision *dest,
 256                                        struct extended_perms_decision *src)
 257{
 258        dest->driver = src->driver;
 259        dest->used = src->used;
 260        if (dest->used & XPERMS_ALLOWED)
 261                memcpy(dest->allowed->p, src->allowed->p,
 262                                sizeof(src->allowed->p));
 263        if (dest->used & XPERMS_AUDITALLOW)
 264                memcpy(dest->auditallow->p, src->auditallow->p,
 265                                sizeof(src->auditallow->p));
 266        if (dest->used & XPERMS_DONTAUDIT)
 267                memcpy(dest->dontaudit->p, src->dontaudit->p,
 268                                sizeof(src->dontaudit->p));
 269}
 270
 271/*
 272 * similar to avc_copy_xperms_decision, but only copy decision
 273 * information relevant to this perm
 274 */
 275static inline void avc_quick_copy_xperms_decision(u8 perm,
 276                        struct extended_perms_decision *dest,
 277                        struct extended_perms_decision *src)
 278{
 279        /*
 280         * compute index of the u32 of the 256 bits (8 u32s) that contain this
 281         * command permission
 282         */
 283        u8 i = perm >> 5;
 284
 285        dest->used = src->used;
 286        if (dest->used & XPERMS_ALLOWED)
 287                dest->allowed->p[i] = src->allowed->p[i];
 288        if (dest->used & XPERMS_AUDITALLOW)
 289                dest->auditallow->p[i] = src->auditallow->p[i];
 290        if (dest->used & XPERMS_DONTAUDIT)
 291                dest->dontaudit->p[i] = src->dontaudit->p[i];
 292}
 293
 294static struct avc_xperms_decision_node
 295                *avc_xperms_decision_alloc(u8 which)
 296{
 297        struct avc_xperms_decision_node *xpd_node;
 298        struct extended_perms_decision *xpd;
 299
 300        xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
 301        if (!xpd_node)
 302                return NULL;
 303
 304        xpd = &xpd_node->xpd;
 305        if (which & XPERMS_ALLOWED) {
 306                xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
 307                                                GFP_NOWAIT);
 308                if (!xpd->allowed)
 309                        goto error;
 310        }
 311        if (which & XPERMS_AUDITALLOW) {
 312                xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
 313                                                GFP_NOWAIT);
 314                if (!xpd->auditallow)
 315                        goto error;
 316        }
 317        if (which & XPERMS_DONTAUDIT) {
 318                xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
 319                                                GFP_NOWAIT);
 320                if (!xpd->dontaudit)
 321                        goto error;
 322        }
 323        return xpd_node;
 324error:
 325        avc_xperms_decision_free(xpd_node);
 326        return NULL;
 327}
 328
 329static int avc_add_xperms_decision(struct avc_node *node,
 330                        struct extended_perms_decision *src)
 331{
 332        struct avc_xperms_decision_node *dest_xpd;
 333
 334        node->ae.xp_node->xp.len++;
 335        dest_xpd = avc_xperms_decision_alloc(src->used);
 336        if (!dest_xpd)
 337                return -ENOMEM;
 338        avc_copy_xperms_decision(&dest_xpd->xpd, src);
 339        list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
 340        return 0;
 341}
 342
 343static struct avc_xperms_node *avc_xperms_alloc(void)
 344{
 345        struct avc_xperms_node *xp_node;
 346
 347        xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
 348        if (!xp_node)
 349                return xp_node;
 350        INIT_LIST_HEAD(&xp_node->xpd_head);
 351        return xp_node;
 352}
 353
 354static int avc_xperms_populate(struct avc_node *node,
 355                                struct avc_xperms_node *src)
 356{
 357        struct avc_xperms_node *dest;
 358        struct avc_xperms_decision_node *dest_xpd;
 359        struct avc_xperms_decision_node *src_xpd;
 360
 361        if (src->xp.len == 0)
 362                return 0;
 363        dest = avc_xperms_alloc();
 364        if (!dest)
 365                return -ENOMEM;
 366
 367        memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
 368        dest->xp.len = src->xp.len;
 369
 370        /* for each source xpd allocate a destination xpd and copy */
 371        list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) {
 372                dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used);
 373                if (!dest_xpd)
 374                        goto error;
 375                avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd);
 376                list_add(&dest_xpd->xpd_list, &dest->xpd_head);
 377        }
 378        node->ae.xp_node = dest;
 379        return 0;
 380error:
 381        avc_xperms_free(dest);
 382        return -ENOMEM;
 383
 384}
 385
 386static inline u32 avc_xperms_audit_required(u32 requested,
 387                                        struct av_decision *avd,
 388                                        struct extended_perms_decision *xpd,
 389                                        u8 perm,
 390                                        int result,
 391                                        u32 *deniedp)
 392{
 393        u32 denied, audited;
 394
 395        denied = requested & ~avd->allowed;
 396        if (unlikely(denied)) {
 397                audited = denied & avd->auditdeny;
 398                if (audited && xpd) {
 399                        if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT))
 400                                audited &= ~requested;
 401                }
 402        } else if (result) {
 403                audited = denied = requested;
 404        } else {
 405                audited = requested & avd->auditallow;
 406                if (audited && xpd) {
 407                        if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW))
 408                                audited &= ~requested;
 409                }
 410        }
 411
 412        *deniedp = denied;
 413        return audited;
 414}
 415
 416static inline int avc_xperms_audit(struct selinux_state *state,
 417                                   u32 ssid, u32 tsid, u16 tclass,
 418                                   u32 requested, struct av_decision *avd,
 419                                   struct extended_perms_decision *xpd,
 420                                   u8 perm, int result,
 421                                   struct common_audit_data *ad)
 422{
 423        u32 audited, denied;
 424
 425        audited = avc_xperms_audit_required(
 426                        requested, avd, xpd, perm, result, &denied);
 427        if (likely(!audited))
 428                return 0;
 429        return slow_avc_audit(state, ssid, tsid, tclass, requested,
 430                        audited, denied, result, ad, 0);
 431}
 432
 433static void avc_node_free(struct rcu_head *rhead)
 434{
 435        struct avc_node *node = container_of(rhead, struct avc_node, rhead);
 436        avc_xperms_free(node->ae.xp_node);
 437        kmem_cache_free(avc_node_cachep, node);
 438        avc_cache_stats_incr(frees);
 439}
 440
 441static void avc_node_delete(struct selinux_avc *avc, struct avc_node *node)
 442{
 443        hlist_del_rcu(&node->list);
 444        call_rcu(&node->rhead, avc_node_free);
 445        atomic_dec(&avc->avc_cache.active_nodes);
 446}
 447
 448static void avc_node_kill(struct selinux_avc *avc, struct avc_node *node)
 449{
 450        avc_xperms_free(node->ae.xp_node);
 451        kmem_cache_free(avc_node_cachep, node);
 452        avc_cache_stats_incr(frees);
 453        atomic_dec(&avc->avc_cache.active_nodes);
 454}
 455
 456static void avc_node_replace(struct selinux_avc *avc,
 457                             struct avc_node *new, struct avc_node *old)
 458{
 459        hlist_replace_rcu(&old->list, &new->list);
 460        call_rcu(&old->rhead, avc_node_free);
 461        atomic_dec(&avc->avc_cache.active_nodes);
 462}
 463
 464static inline int avc_reclaim_node(struct selinux_avc *avc)
 465{
 466        struct avc_node *node;
 467        int hvalue, try, ecx;
 468        unsigned long flags;
 469        struct hlist_head *head;
 470        spinlock_t *lock;
 471
 472        for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
 473                hvalue = atomic_inc_return(&avc->avc_cache.lru_hint) &
 474                        (AVC_CACHE_SLOTS - 1);
 475                head = &avc->avc_cache.slots[hvalue];
 476                lock = &avc->avc_cache.slots_lock[hvalue];
 477
 478                if (!spin_trylock_irqsave(lock, flags))
 479                        continue;
 480
 481                rcu_read_lock();
 482                hlist_for_each_entry(node, head, list) {
 483                        avc_node_delete(avc, node);
 484                        avc_cache_stats_incr(reclaims);
 485                        ecx++;
 486                        if (ecx >= AVC_CACHE_RECLAIM) {
 487                                rcu_read_unlock();
 488                                spin_unlock_irqrestore(lock, flags);
 489                                goto out;
 490                        }
 491                }
 492                rcu_read_unlock();
 493                spin_unlock_irqrestore(lock, flags);
 494        }
 495out:
 496        return ecx;
 497}
 498
 499static struct avc_node *avc_alloc_node(struct selinux_avc *avc)
 500{
 501        struct avc_node *node;
 502
 503        node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
 504        if (!node)
 505                goto out;
 506
 507        INIT_HLIST_NODE(&node->list);
 508        avc_cache_stats_incr(allocations);
 509
 510        if (atomic_inc_return(&avc->avc_cache.active_nodes) >
 511            avc->avc_cache_threshold)
 512                avc_reclaim_node(avc);
 513
 514out:
 515        return node;
 516}
 517
 518static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
 519{
 520        node->ae.ssid = ssid;
 521        node->ae.tsid = tsid;
 522        node->ae.tclass = tclass;
 523        memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
 524}
 525
 526static inline struct avc_node *avc_search_node(struct selinux_avc *avc,
 527                                               u32 ssid, u32 tsid, u16 tclass)
 528{
 529        struct avc_node *node, *ret = NULL;
 530        int hvalue;
 531        struct hlist_head *head;
 532
 533        hvalue = avc_hash(ssid, tsid, tclass);
 534        head = &avc->avc_cache.slots[hvalue];
 535        hlist_for_each_entry_rcu(node, head, list) {
 536                if (ssid == node->ae.ssid &&
 537                    tclass == node->ae.tclass &&
 538                    tsid == node->ae.tsid) {
 539                        ret = node;
 540                        break;
 541                }
 542        }
 543
 544        return ret;
 545}
 546
 547/**
 548 * avc_lookup - Look up an AVC entry.
 549 * @ssid: source security identifier
 550 * @tsid: target security identifier
 551 * @tclass: target security class
 552 *
 553 * Look up an AVC entry that is valid for the
 554 * (@ssid, @tsid), interpreting the permissions
 555 * based on @tclass.  If a valid AVC entry exists,
 556 * then this function returns the avc_node.
 557 * Otherwise, this function returns NULL.
 558 */
 559static struct avc_node *avc_lookup(struct selinux_avc *avc,
 560                                   u32 ssid, u32 tsid, u16 tclass)
 561{
 562        struct avc_node *node;
 563
 564        avc_cache_stats_incr(lookups);
 565        node = avc_search_node(avc, ssid, tsid, tclass);
 566
 567        if (node)
 568                return node;
 569
 570        avc_cache_stats_incr(misses);
 571        return NULL;
 572}
 573
 574static int avc_latest_notif_update(struct selinux_avc *avc,
 575                                   int seqno, int is_insert)
 576{
 577        int ret = 0;
 578        static DEFINE_SPINLOCK(notif_lock);
 579        unsigned long flag;
 580
 581        spin_lock_irqsave(&notif_lock, flag);
 582        if (is_insert) {
 583                if (seqno < avc->avc_cache.latest_notif) {
 584                        pr_warn("SELinux: avc:  seqno %d < latest_notif %d\n",
 585                               seqno, avc->avc_cache.latest_notif);
 586                        ret = -EAGAIN;
 587                }
 588        } else {
 589                if (seqno > avc->avc_cache.latest_notif)
 590                        avc->avc_cache.latest_notif = seqno;
 591        }
 592        spin_unlock_irqrestore(&notif_lock, flag);
 593
 594        return ret;
 595}
 596
 597/**
 598 * avc_insert - Insert an AVC entry.
 599 * @ssid: source security identifier
 600 * @tsid: target security identifier
 601 * @tclass: target security class
 602 * @avd: resulting av decision
 603 * @xp_node: resulting extended permissions
 604 *
 605 * Insert an AVC entry for the SID pair
 606 * (@ssid, @tsid) and class @tclass.
 607 * The access vectors and the sequence number are
 608 * normally provided by the security server in
 609 * response to a security_compute_av() call.  If the
 610 * sequence number @avd->seqno is not less than the latest
 611 * revocation notification, then the function copies
 612 * the access vectors into a cache entry, returns
 613 * avc_node inserted. Otherwise, this function returns NULL.
 614 */
 615static struct avc_node *avc_insert(struct selinux_avc *avc,
 616                                   u32 ssid, u32 tsid, u16 tclass,
 617                                   struct av_decision *avd,
 618                                   struct avc_xperms_node *xp_node)
 619{
 620        struct avc_node *pos, *node = NULL;
 621        int hvalue;
 622        unsigned long flag;
 623
 624        if (avc_latest_notif_update(avc, avd->seqno, 1))
 625                goto out;
 626
 627        node = avc_alloc_node(avc);
 628        if (node) {
 629                struct hlist_head *head;
 630                spinlock_t *lock;
 631                int rc = 0;
 632
 633                hvalue = avc_hash(ssid, tsid, tclass);
 634                avc_node_populate(node, ssid, tsid, tclass, avd);
 635                rc = avc_xperms_populate(node, xp_node);
 636                if (rc) {
 637                        kmem_cache_free(avc_node_cachep, node);
 638                        return NULL;
 639                }
 640                head = &avc->avc_cache.slots[hvalue];
 641                lock = &avc->avc_cache.slots_lock[hvalue];
 642
 643                spin_lock_irqsave(lock, flag);
 644                hlist_for_each_entry(pos, head, list) {
 645                        if (pos->ae.ssid == ssid &&
 646                            pos->ae.tsid == tsid &&
 647                            pos->ae.tclass == tclass) {
 648                                avc_node_replace(avc, node, pos);
 649                                goto found;
 650                        }
 651                }
 652                hlist_add_head_rcu(&node->list, head);
 653found:
 654                spin_unlock_irqrestore(lock, flag);
 655        }
 656out:
 657        return node;
 658}
 659
 660/**
 661 * avc_audit_pre_callback - SELinux specific information
 662 * will be called by generic audit code
 663 * @ab: the audit buffer
 664 * @a: audit_data
 665 */
 666static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
 667{
 668        struct common_audit_data *ad = a;
 669        struct selinux_audit_data *sad = ad->selinux_audit_data;
 670        u32 av = sad->audited;
 671        const char **perms;
 672        int i, perm;
 673
 674        audit_log_format(ab, "avc:  %s ", sad->denied ? "denied" : "granted");
 675
 676        if (av == 0) {
 677                audit_log_format(ab, " null");
 678                return;
 679        }
 680
 681        perms = secclass_map[sad->tclass-1].perms;
 682
 683        audit_log_format(ab, " {");
 684        i = 0;
 685        perm = 1;
 686        while (i < (sizeof(av) * 8)) {
 687                if ((perm & av) && perms[i]) {
 688                        audit_log_format(ab, " %s", perms[i]);
 689                        av &= ~perm;
 690                }
 691                i++;
 692                perm <<= 1;
 693        }
 694
 695        if (av)
 696                audit_log_format(ab, " 0x%x", av);
 697
 698        audit_log_format(ab, " } for ");
 699}
 700
 701/**
 702 * avc_audit_post_callback - SELinux specific information
 703 * will be called by generic audit code
 704 * @ab: the audit buffer
 705 * @a: audit_data
 706 */
 707static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
 708{
 709        struct common_audit_data *ad = a;
 710        struct selinux_audit_data *sad = ad->selinux_audit_data;
 711        char *scontext;
 712        u32 scontext_len;
 713        int rc;
 714
 715        rc = security_sid_to_context(sad->state, sad->ssid, &scontext,
 716                                     &scontext_len);
 717        if (rc)
 718                audit_log_format(ab, " ssid=%d", sad->ssid);
 719        else {
 720                audit_log_format(ab, " scontext=%s", scontext);
 721                kfree(scontext);
 722        }
 723
 724        rc = security_sid_to_context(sad->state, sad->tsid, &scontext,
 725                                     &scontext_len);
 726        if (rc)
 727                audit_log_format(ab, " tsid=%d", sad->tsid);
 728        else {
 729                audit_log_format(ab, " tcontext=%s", scontext);
 730                kfree(scontext);
 731        }
 732
 733        audit_log_format(ab, " tclass=%s", secclass_map[sad->tclass-1].name);
 734
 735        if (sad->denied)
 736                audit_log_format(ab, " permissive=%u", sad->result ? 0 : 1);
 737
 738        /* in case of invalid context report also the actual context string */
 739        rc = security_sid_to_context_inval(sad->state, sad->ssid, &scontext,
 740                                           &scontext_len);
 741        if (!rc && scontext) {
 742                if (scontext_len && scontext[scontext_len - 1] == '\0')
 743                        scontext_len--;
 744                audit_log_format(ab, " srawcon=");
 745                audit_log_n_untrustedstring(ab, scontext, scontext_len);
 746                kfree(scontext);
 747        }
 748
 749        rc = security_sid_to_context_inval(sad->state, sad->tsid, &scontext,
 750                                           &scontext_len);
 751        if (!rc && scontext) {
 752                if (scontext_len && scontext[scontext_len - 1] == '\0')
 753                        scontext_len--;
 754                audit_log_format(ab, " trawcon=");
 755                audit_log_n_untrustedstring(ab, scontext, scontext_len);
 756                kfree(scontext);
 757        }
 758}
 759
 760/* This is the slow part of avc audit with big stack footprint */
 761noinline int slow_avc_audit(struct selinux_state *state,
 762                            u32 ssid, u32 tsid, u16 tclass,
 763                            u32 requested, u32 audited, u32 denied, int result,
 764                            struct common_audit_data *a,
 765                            unsigned int flags)
 766{
 767        struct common_audit_data stack_data;
 768        struct selinux_audit_data sad;
 769
 770        if (WARN_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map)))
 771                return -EINVAL;
 772
 773        if (!a) {
 774                a = &stack_data;
 775                a->type = LSM_AUDIT_DATA_NONE;
 776        }
 777
 778        /*
 779         * When in a RCU walk do the audit on the RCU retry.  This is because
 780         * the collection of the dname in an inode audit message is not RCU
 781         * safe.  Note this may drop some audits when the situation changes
 782         * during retry. However this is logically just as if the operation
 783         * happened a little later.
 784         */
 785        if ((a->type == LSM_AUDIT_DATA_INODE) &&
 786            (flags & MAY_NOT_BLOCK))
 787                return -ECHILD;
 788
 789        sad.tclass = tclass;
 790        sad.requested = requested;
 791        sad.ssid = ssid;
 792        sad.tsid = tsid;
 793        sad.audited = audited;
 794        sad.denied = denied;
 795        sad.result = result;
 796        sad.state = state;
 797
 798        a->selinux_audit_data = &sad;
 799
 800        common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
 801        return 0;
 802}
 803
 804/**
 805 * avc_add_callback - Register a callback for security events.
 806 * @callback: callback function
 807 * @events: security events
 808 *
 809 * Register a callback function for events in the set @events.
 810 * Returns %0 on success or -%ENOMEM if insufficient memory
 811 * exists to add the callback.
 812 */
 813int __init avc_add_callback(int (*callback)(u32 event), u32 events)
 814{
 815        struct avc_callback_node *c;
 816        int rc = 0;
 817
 818        c = kmalloc(sizeof(*c), GFP_KERNEL);
 819        if (!c) {
 820                rc = -ENOMEM;
 821                goto out;
 822        }
 823
 824        c->callback = callback;
 825        c->events = events;
 826        c->next = avc_callbacks;
 827        avc_callbacks = c;
 828out:
 829        return rc;
 830}
 831
 832/**
 833 * avc_update_node Update an AVC entry
 834 * @event : Updating event
 835 * @perms : Permission mask bits
 836 * @ssid,@tsid,@tclass : identifier of an AVC entry
 837 * @seqno : sequence number when decision was made
 838 * @xpd: extended_perms_decision to be added to the node
 839 * @flags: the AVC_* flags, e.g. AVC_NONBLOCKING, AVC_EXTENDED_PERMS, or 0.
 840 *
 841 * if a valid AVC entry doesn't exist,this function returns -ENOENT.
 842 * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
 843 * otherwise, this function updates the AVC entry. The original AVC-entry object
 844 * will release later by RCU.
 845 */
 846static int avc_update_node(struct selinux_avc *avc,
 847                           u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
 848                           u32 tsid, u16 tclass, u32 seqno,
 849                           struct extended_perms_decision *xpd,
 850                           u32 flags)
 851{
 852        int hvalue, rc = 0;
 853        unsigned long flag;
 854        struct avc_node *pos, *node, *orig = NULL;
 855        struct hlist_head *head;
 856        spinlock_t *lock;
 857
 858        /*
 859         * If we are in a non-blocking code path, e.g. VFS RCU walk,
 860         * then we must not add permissions to a cache entry
 861         * because we cannot safely audit the denial.  Otherwise,
 862         * during the subsequent blocking retry (e.g. VFS ref walk), we
 863         * will find the permissions already granted in the cache entry
 864         * and won't audit anything at all, leading to silent denials in
 865         * permissive mode that only appear when in enforcing mode.
 866         *
 867         * See the corresponding handling in slow_avc_audit(), and the
 868         * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
 869         * which is transliterated into AVC_NONBLOCKING.
 870         */
 871        if (flags & AVC_NONBLOCKING)
 872                return 0;
 873
 874        node = avc_alloc_node(avc);
 875        if (!node) {
 876                rc = -ENOMEM;
 877                goto out;
 878        }
 879
 880        /* Lock the target slot */
 881        hvalue = avc_hash(ssid, tsid, tclass);
 882
 883        head = &avc->avc_cache.slots[hvalue];
 884        lock = &avc->avc_cache.slots_lock[hvalue];
 885
 886        spin_lock_irqsave(lock, flag);
 887
 888        hlist_for_each_entry(pos, head, list) {
 889                if (ssid == pos->ae.ssid &&
 890                    tsid == pos->ae.tsid &&
 891                    tclass == pos->ae.tclass &&
 892                    seqno == pos->ae.avd.seqno){
 893                        orig = pos;
 894                        break;
 895                }
 896        }
 897
 898        if (!orig) {
 899                rc = -ENOENT;
 900                avc_node_kill(avc, node);
 901                goto out_unlock;
 902        }
 903
 904        /*
 905         * Copy and replace original node.
 906         */
 907
 908        avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
 909
 910        if (orig->ae.xp_node) {
 911                rc = avc_xperms_populate(node, orig->ae.xp_node);
 912                if (rc) {
 913                        kmem_cache_free(avc_node_cachep, node);
 914                        goto out_unlock;
 915                }
 916        }
 917
 918        switch (event) {
 919        case AVC_CALLBACK_GRANT:
 920                node->ae.avd.allowed |= perms;
 921                if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
 922                        avc_xperms_allow_perm(node->ae.xp_node, driver, xperm);
 923                break;
 924        case AVC_CALLBACK_TRY_REVOKE:
 925        case AVC_CALLBACK_REVOKE:
 926                node->ae.avd.allowed &= ~perms;
 927                break;
 928        case AVC_CALLBACK_AUDITALLOW_ENABLE:
 929                node->ae.avd.auditallow |= perms;
 930                break;
 931        case AVC_CALLBACK_AUDITALLOW_DISABLE:
 932                node->ae.avd.auditallow &= ~perms;
 933                break;
 934        case AVC_CALLBACK_AUDITDENY_ENABLE:
 935                node->ae.avd.auditdeny |= perms;
 936                break;
 937        case AVC_CALLBACK_AUDITDENY_DISABLE:
 938                node->ae.avd.auditdeny &= ~perms;
 939                break;
 940        case AVC_CALLBACK_ADD_XPERMS:
 941                avc_add_xperms_decision(node, xpd);
 942                break;
 943        }
 944        avc_node_replace(avc, node, orig);
 945out_unlock:
 946        spin_unlock_irqrestore(lock, flag);
 947out:
 948        return rc;
 949}
 950
 951/**
 952 * avc_flush - Flush the cache
 953 */
 954static void avc_flush(struct selinux_avc *avc)
 955{
 956        struct hlist_head *head;
 957        struct avc_node *node;
 958        spinlock_t *lock;
 959        unsigned long flag;
 960        int i;
 961
 962        for (i = 0; i < AVC_CACHE_SLOTS; i++) {
 963                head = &avc->avc_cache.slots[i];
 964                lock = &avc->avc_cache.slots_lock[i];
 965
 966                spin_lock_irqsave(lock, flag);
 967                /*
 968                 * With preemptable RCU, the outer spinlock does not
 969                 * prevent RCU grace periods from ending.
 970                 */
 971                rcu_read_lock();
 972                hlist_for_each_entry(node, head, list)
 973                        avc_node_delete(avc, node);
 974                rcu_read_unlock();
 975                spin_unlock_irqrestore(lock, flag);
 976        }
 977}
 978
 979/**
 980 * avc_ss_reset - Flush the cache and revalidate migrated permissions.
 981 * @seqno: policy sequence number
 982 */
 983int avc_ss_reset(struct selinux_avc *avc, u32 seqno)
 984{
 985        struct avc_callback_node *c;
 986        int rc = 0, tmprc;
 987
 988        avc_flush(avc);
 989
 990        for (c = avc_callbacks; c; c = c->next) {
 991                if (c->events & AVC_CALLBACK_RESET) {
 992                        tmprc = c->callback(AVC_CALLBACK_RESET);
 993                        /* save the first error encountered for the return
 994                           value and continue processing the callbacks */
 995                        if (!rc)
 996                                rc = tmprc;
 997                }
 998        }
 999
1000        avc_latest_notif_update(avc, seqno, 0);
1001        return rc;
1002}
1003
1004/*
1005 * Slow-path helper function for avc_has_perm_noaudit,
1006 * when the avc_node lookup fails. We get called with
1007 * the RCU read lock held, and need to return with it
1008 * still held, but drop if for the security compute.
1009 *
1010 * Don't inline this, since it's the slow-path and just
1011 * results in a bigger stack frame.
1012 */
1013static noinline
1014struct avc_node *avc_compute_av(struct selinux_state *state,
1015                                u32 ssid, u32 tsid,
1016                                u16 tclass, struct av_decision *avd,
1017                                struct avc_xperms_node *xp_node)
1018{
1019        rcu_read_unlock();
1020        INIT_LIST_HEAD(&xp_node->xpd_head);
1021        security_compute_av(state, ssid, tsid, tclass, avd, &xp_node->xp);
1022        rcu_read_lock();
1023        return avc_insert(state->avc, ssid, tsid, tclass, avd, xp_node);
1024}
1025
1026static noinline int avc_denied(struct selinux_state *state,
1027                               u32 ssid, u32 tsid,
1028                               u16 tclass, u32 requested,
1029                               u8 driver, u8 xperm, unsigned int flags,
1030                               struct av_decision *avd)
1031{
1032        if (flags & AVC_STRICT)
1033                return -EACCES;
1034
1035        if (enforcing_enabled(state) &&
1036            !(avd->flags & AVD_FLAGS_PERMISSIVE))
1037                return -EACCES;
1038
1039        avc_update_node(state->avc, AVC_CALLBACK_GRANT, requested, driver,
1040                        xperm, ssid, tsid, tclass, avd->seqno, NULL, flags);
1041        return 0;
1042}
1043
1044/*
1045 * The avc extended permissions logic adds an additional 256 bits of
1046 * permissions to an avc node when extended permissions for that node are
1047 * specified in the avtab. If the additional 256 permissions is not adequate,
1048 * as-is the case with ioctls, then multiple may be chained together and the
1049 * driver field is used to specify which set contains the permission.
1050 */
1051int avc_has_extended_perms(struct selinux_state *state,
1052                           u32 ssid, u32 tsid, u16 tclass, u32 requested,
1053                           u8 driver, u8 xperm, struct common_audit_data *ad)
1054{
1055        struct avc_node *node;
1056        struct av_decision avd;
1057        u32 denied;
1058        struct extended_perms_decision local_xpd;
1059        struct extended_perms_decision *xpd = NULL;
1060        struct extended_perms_data allowed;
1061        struct extended_perms_data auditallow;
1062        struct extended_perms_data dontaudit;
1063        struct avc_xperms_node local_xp_node;
1064        struct avc_xperms_node *xp_node;
1065        int rc = 0, rc2;
1066
1067        xp_node = &local_xp_node;
1068        if (WARN_ON(!requested))
1069                return -EACCES;
1070
1071        rcu_read_lock();
1072
1073        node = avc_lookup(state->avc, ssid, tsid, tclass);
1074        if (unlikely(!node)) {
1075                node = avc_compute_av(state, ssid, tsid, tclass, &avd, xp_node);
1076        } else {
1077                memcpy(&avd, &node->ae.avd, sizeof(avd));
1078                xp_node = node->ae.xp_node;
1079        }
1080        /* if extended permissions are not defined, only consider av_decision */
1081        if (!xp_node || !xp_node->xp.len)
1082                goto decision;
1083
1084        local_xpd.allowed = &allowed;
1085        local_xpd.auditallow = &auditallow;
1086        local_xpd.dontaudit = &dontaudit;
1087
1088        xpd = avc_xperms_decision_lookup(driver, xp_node);
1089        if (unlikely(!xpd)) {
1090                /*
1091                 * Compute the extended_perms_decision only if the driver
1092                 * is flagged
1093                 */
1094                if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
1095                        avd.allowed &= ~requested;
1096                        goto decision;
1097                }
1098                rcu_read_unlock();
1099                security_compute_xperms_decision(state, ssid, tsid, tclass,
1100                                                 driver, &local_xpd);
1101                rcu_read_lock();
1102                avc_update_node(state->avc, AVC_CALLBACK_ADD_XPERMS, requested,
1103                                driver, xperm, ssid, tsid, tclass, avd.seqno,
1104                                &local_xpd, 0);
1105        } else {
1106                avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
1107        }
1108        xpd = &local_xpd;
1109
1110        if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED))
1111                avd.allowed &= ~requested;
1112
1113decision:
1114        denied = requested & ~(avd.allowed);
1115        if (unlikely(denied))
1116                rc = avc_denied(state, ssid, tsid, tclass, requested,
1117                                driver, xperm, AVC_EXTENDED_PERMS, &avd);
1118
1119        rcu_read_unlock();
1120
1121        rc2 = avc_xperms_audit(state, ssid, tsid, tclass, requested,
1122                        &avd, xpd, xperm, rc, ad);
1123        if (rc2)
1124                return rc2;
1125        return rc;
1126}
1127
1128/**
1129 * avc_has_perm_noaudit - Check permissions but perform no auditing.
1130 * @ssid: source security identifier
1131 * @tsid: target security identifier
1132 * @tclass: target security class
1133 * @requested: requested permissions, interpreted based on @tclass
1134 * @flags:  AVC_STRICT, AVC_NONBLOCKING, or 0
1135 * @avd: access vector decisions
1136 *
1137 * Check the AVC to determine whether the @requested permissions are granted
1138 * for the SID pair (@ssid, @tsid), interpreting the permissions
1139 * based on @tclass, and call the security server on a cache miss to obtain
1140 * a new decision and add it to the cache.  Return a copy of the decisions
1141 * in @avd.  Return %0 if all @requested permissions are granted,
1142 * -%EACCES if any permissions are denied, or another -errno upon
1143 * other errors.  This function is typically called by avc_has_perm(),
1144 * but may also be called directly to separate permission checking from
1145 * auditing, e.g. in cases where a lock must be held for the check but
1146 * should be released for the auditing.
1147 */
1148inline int avc_has_perm_noaudit(struct selinux_state *state,
1149                                u32 ssid, u32 tsid,
1150                                u16 tclass, u32 requested,
1151                                unsigned int flags,
1152                                struct av_decision *avd)
1153{
1154        struct avc_node *node;
1155        struct avc_xperms_node xp_node;
1156        int rc = 0;
1157        u32 denied;
1158
1159        if (WARN_ON(!requested))
1160                return -EACCES;
1161
1162        rcu_read_lock();
1163
1164        node = avc_lookup(state->avc, ssid, tsid, tclass);
1165        if (unlikely(!node))
1166                node = avc_compute_av(state, ssid, tsid, tclass, avd, &xp_node);
1167        else
1168                memcpy(avd, &node->ae.avd, sizeof(*avd));
1169
1170        denied = requested & ~(avd->allowed);
1171        if (unlikely(denied))
1172                rc = avc_denied(state, ssid, tsid, tclass, requested, 0, 0,
1173                                flags, avd);
1174
1175        rcu_read_unlock();
1176        return rc;
1177}
1178
1179/**
1180 * avc_has_perm - Check permissions and perform any appropriate auditing.
1181 * @ssid: source security identifier
1182 * @tsid: target security identifier
1183 * @tclass: target security class
1184 * @requested: requested permissions, interpreted based on @tclass
1185 * @auditdata: auxiliary audit data
1186 *
1187 * Check the AVC to determine whether the @requested permissions are granted
1188 * for the SID pair (@ssid, @tsid), interpreting the permissions
1189 * based on @tclass, and call the security server on a cache miss to obtain
1190 * a new decision and add it to the cache.  Audit the granting or denial of
1191 * permissions in accordance with the policy.  Return %0 if all @requested
1192 * permissions are granted, -%EACCES if any permissions are denied, or
1193 * another -errno upon other errors.
1194 */
1195int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
1196                 u32 requested, struct common_audit_data *auditdata)
1197{
1198        struct av_decision avd;
1199        int rc, rc2;
1200
1201        rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, 0,
1202                                  &avd);
1203
1204        rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
1205                        auditdata, 0);
1206        if (rc2)
1207                return rc2;
1208        return rc;
1209}
1210
1211u32 avc_policy_seqno(struct selinux_state *state)
1212{
1213        return state->avc->avc_cache.latest_notif;
1214}
1215
1216void avc_disable(void)
1217{
1218        /*
1219         * If you are looking at this because you have realized that we are
1220         * not destroying the avc_node_cachep it might be easy to fix, but
1221         * I don't know the memory barrier semantics well enough to know.  It's
1222         * possible that some other task dereferenced security_ops when
1223         * it still pointed to selinux operations.  If that is the case it's
1224         * possible that it is about to use the avc and is about to need the
1225         * avc_node_cachep.  I know I could wrap the security.c security_ops call
1226         * in an rcu_lock, but seriously, it's not worth it.  Instead I just flush
1227         * the cache and get that memory back.
1228         */
1229        if (avc_node_cachep) {
1230                avc_flush(selinux_state.avc);
1231                /* kmem_cache_destroy(avc_node_cachep); */
1232        }
1233}
1234