linux/kernel/scs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Shadow Call Stack support.
   4 *
   5 * Copyright (C) 2019 Google LLC
   6 */
   7
   8#include <linux/cpuhotplug.h>
   9#include <linux/kasan.h>
  10#include <linux/mm.h>
  11#include <linux/scs.h>
  12#include <linux/vmalloc.h>
  13#include <linux/vmstat.h>
  14
  15static void __scs_account(void *s, int account)
  16{
  17        struct page *scs_page = vmalloc_to_page(s);
  18
  19        mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
  20                            account * (SCS_SIZE / SZ_1K));
  21}
  22
  23/* Matches NR_CACHED_STACKS for VMAP_STACK */
  24#define NR_CACHED_SCS 2
  25static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
  26
  27static void *__scs_alloc(int node)
  28{
  29        int i;
  30        void *s;
  31
  32        for (i = 0; i < NR_CACHED_SCS; i++) {
  33                s = this_cpu_xchg(scs_cache[i], NULL);
  34                if (s) {
  35                        kasan_unpoison_vmalloc(s, SCS_SIZE);
  36                        memset(s, 0, SCS_SIZE);
  37                        return s;
  38                }
  39        }
  40
  41        return __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END,
  42                                    GFP_SCS, PAGE_KERNEL, 0, node,
  43                                    __builtin_return_address(0));
  44}
  45
  46void *scs_alloc(int node)
  47{
  48        void *s;
  49
  50        s = __scs_alloc(node);
  51        if (!s)
  52                return NULL;
  53
  54        *__scs_magic(s) = SCS_END_MAGIC;
  55
  56        /*
  57         * Poison the allocation to catch unintentional accesses to
  58         * the shadow stack when KASAN is enabled.
  59         */
  60        kasan_poison_vmalloc(s, SCS_SIZE);
  61        __scs_account(s, 1);
  62        return s;
  63}
  64
  65void scs_free(void *s)
  66{
  67        int i;
  68
  69        __scs_account(s, -1);
  70
  71        /*
  72         * We cannot sleep as this can be called in interrupt context,
  73         * so use this_cpu_cmpxchg to update the cache, and vfree_atomic
  74         * to free the stack.
  75         */
  76
  77        for (i = 0; i < NR_CACHED_SCS; i++)
  78                if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
  79                        return;
  80
  81        vfree_atomic(s);
  82}
  83
  84static int scs_cleanup(unsigned int cpu)
  85{
  86        int i;
  87        void **cache = per_cpu_ptr(scs_cache, cpu);
  88
  89        for (i = 0; i < NR_CACHED_SCS; i++) {
  90                vfree(cache[i]);
  91                cache[i] = NULL;
  92        }
  93
  94        return 0;
  95}
  96
  97void __init scs_init(void)
  98{
  99        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
 100                          scs_cleanup);
 101}
 102
 103int scs_prepare(struct task_struct *tsk, int node)
 104{
 105        void *s = scs_alloc(node);
 106
 107        if (!s)
 108                return -ENOMEM;
 109
 110        task_scs(tsk) = task_scs_sp(tsk) = s;
 111        return 0;
 112}
 113
 114static void scs_check_usage(struct task_struct *tsk)
 115{
 116        static unsigned long highest;
 117
 118        unsigned long *p, prev, curr = highest, used = 0;
 119
 120        if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE))
 121                return;
 122
 123        for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
 124                if (!READ_ONCE_NOCHECK(*p))
 125                        break;
 126                used += sizeof(*p);
 127        }
 128
 129        while (used > curr) {
 130                prev = cmpxchg_relaxed(&highest, curr, used);
 131
 132                if (prev == curr) {
 133                        pr_info("%s (%d): highest shadow stack usage: %lu bytes\n",
 134                                tsk->comm, task_pid_nr(tsk), used);
 135                        break;
 136                }
 137
 138                curr = prev;
 139        }
 140}
 141
 142void scs_release(struct task_struct *tsk)
 143{
 144        void *s = task_scs(tsk);
 145
 146        if (!s)
 147                return;
 148
 149        WARN(task_scs_end_corrupted(tsk),
 150             "corrupted shadow stack detected when freeing task\n");
 151        scs_check_usage(tsk);
 152        scs_free(s);
 153}
 154