linux/include/linux/randomize_kstack.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2#ifndef _LINUX_RANDOMIZE_KSTACK_H
   3#define _LINUX_RANDOMIZE_KSTACK_H
   4
   5#include <linux/kernel.h>
   6#include <linux/jump_label.h>
   7#include <linux/percpu-defs.h>
   8
   9DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
  10                         randomize_kstack_offset);
  11DECLARE_PER_CPU(u32, kstack_offset);
  12
  13/*
  14 * Do not use this anywhere else in the kernel. This is used here because
  15 * it provides an arch-agnostic way to grow the stack with correct
  16 * alignment. Also, since this use is being explicitly masked to a max of
  17 * 10 bits, stack-clash style attacks are unlikely. For more details see
  18 * "VLAs" in Documentation/process/deprecated.rst
  19 */
  20void *__builtin_alloca(size_t size);
  21/*
  22 * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
  23 * "VLA" from being unbounded (see above). 10 bits leaves enough room for
  24 * per-arch offset masks to reduce entropy (by removing higher bits, since
  25 * high entropy may overly constrain usable stack space), and for
  26 * compiler/arch-specific stack alignment to remove the lower bits.
  27 */
  28#define KSTACK_OFFSET_MAX(x)    ((x) & 0x3FF)
  29
  30/*
  31 * These macros must be used during syscall entry when interrupts and
  32 * preempt are disabled, and after user registers have been stored to
  33 * the stack.
  34 */
  35#define add_random_kstack_offset() do {                                 \
  36        if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
  37                                &randomize_kstack_offset)) {            \
  38                u32 offset = raw_cpu_read(kstack_offset);               \
  39                u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset));  \
  40                /* Keep allocation even after "ptr" loses scope. */     \
  41                asm volatile("" :: "r"(ptr) : "memory");                \
  42        }                                                               \
  43} while (0)
  44
  45#define choose_random_kstack_offset(rand) do {                          \
  46        if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
  47                                &randomize_kstack_offset)) {            \
  48                u32 offset = raw_cpu_read(kstack_offset);               \
  49                offset ^= (rand);                                       \
  50                raw_cpu_write(kstack_offset, offset);                   \
  51        }                                                               \
  52} while (0)
  53
  54#endif
  55