linux/arch/x86/include/asm/stackprotector.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * GCC stack protector support.
   4 *
   5 * Stack protector works by putting predefined pattern at the start of
   6 * the stack frame and verifying that it hasn't been overwritten when
   7 * returning from the function.  The pattern is called stack canary
   8 * and unfortunately gcc requires it to be at a fixed offset from %gs.
   9 * On x86_64, the offset is 40 bytes and on x86_32 20 bytes.  x86_64
  10 * and x86_32 use segment registers differently and thus handles this
  11 * requirement differently.
  12 *
  13 * On x86_64, %gs is shared by percpu area and stack canary.  All
  14 * percpu symbols are zero based and %gs points to the base of percpu
  15 * area.  The first occupant of the percpu area is always
  16 * fixed_percpu_data which contains stack_canary at offset 40.  Userland
  17 * %gs is always saved and restored on kernel entry and exit using
  18 * swapgs, so stack protector doesn't add any complexity there.
  19 *
  20 * On x86_32, it's slightly more complicated.  As in x86_64, %gs is
  21 * used for userland TLS.  Unfortunately, some processors are much
  22 * slower at loading segment registers with different value when
  23 * entering and leaving the kernel, so the kernel uses %fs for percpu
  24 * area and manages %gs lazily so that %gs is switched only when
  25 * necessary, usually during task switch.
  26 *
  27 * As gcc requires the stack canary at %gs:20, %gs can't be managed
  28 * lazily if stack protector is enabled, so the kernel saves and
  29 * restores userland %gs on kernel entry and exit.  This behavior is
  30 * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
  31 * system.h to hide the details.
  32 */
  33
  34#ifndef _ASM_STACKPROTECTOR_H
  35#define _ASM_STACKPROTECTOR_H 1
  36
  37#ifdef CONFIG_STACKPROTECTOR
  38
  39#include <asm/tsc.h>
  40#include <asm/processor.h>
  41#include <asm/percpu.h>
  42#include <asm/desc.h>
  43
  44#include <linux/random.h>
  45#include <linux/sched.h>
  46
  47/*
  48 * 24 byte read-only segment initializer for stack canary.  Linker
  49 * can't handle the address bit shifting.  Address will be set in
  50 * head_32 for boot CPU and setup_per_cpu_areas() for others.
  51 */
  52#define GDT_STACK_CANARY_INIT                                           \
  53        [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
  54
  55/*
  56 * Initialize the stackprotector canary value.
  57 *
  58 * NOTE: this must only be called from functions that never return
  59 * and it must always be inlined.
  60 *
  61 * In addition, it should be called from a compilation unit for which
  62 * stack protector is disabled. Alternatively, the caller should not end
  63 * with a function call which gets tail-call optimized as that would
  64 * lead to checking a modified canary value.
  65 */
  66static __always_inline void boot_init_stack_canary(void)
  67{
  68        u64 canary;
  69        u64 tsc;
  70
  71#ifdef CONFIG_X86_64
  72        BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
  73#endif
  74        /*
  75         * We both use the random pool and the current TSC as a source
  76         * of randomness. The TSC only matters for very early init,
  77         * there it already has some randomness on most systems. Later
  78         * on during the bootup the random pool has true entropy too.
  79         */
  80        get_random_bytes(&canary, sizeof(canary));
  81        tsc = rdtsc();
  82        canary += tsc + (tsc << 32UL);
  83        canary &= CANARY_MASK;
  84
  85        current->stack_canary = canary;
  86#ifdef CONFIG_X86_64
  87        this_cpu_write(fixed_percpu_data.stack_canary, canary);
  88#else
  89        this_cpu_write(stack_canary.canary, canary);
  90#endif
  91}
  92
  93static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
  94{
  95#ifdef CONFIG_X86_64
  96        per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
  97#else
  98        per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
  99#endif
 100}
 101
 102static inline void setup_stack_canary_segment(int cpu)
 103{
 104#ifdef CONFIG_X86_32
 105        unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
 106        struct desc_struct *gdt_table = get_cpu_gdt_rw(cpu);
 107        struct desc_struct desc;
 108
 109        desc = gdt_table[GDT_ENTRY_STACK_CANARY];
 110        set_desc_base(&desc, canary);
 111        write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
 112#endif
 113}
 114
 115static inline void load_stack_canary_segment(void)
 116{
 117#ifdef CONFIG_X86_32
 118        asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
 119#endif
 120}
 121
 122#else   /* STACKPROTECTOR */
 123
 124#define GDT_STACK_CANARY_INIT
 125
 126/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
 127
 128static inline void setup_stack_canary_segment(int cpu)
 129{ }
 130
 131static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
 132{ }
 133
 134static inline void load_stack_canary_segment(void)
 135{
 136#ifdef CONFIG_X86_32
 137        asm volatile ("mov %0, %%gs" : : "r" (0));
 138#endif
 139}
 140
 141#endif  /* STACKPROTECTOR */
 142#endif  /* _ASM_STACKPROTECTOR_H */
 143