linux/arch/x86/kernel/irq_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
   4 *
   5 * This file contains the lowest level x86-specific interrupt
   6 * entry, irq-stacks and irq statistics code. All the remaining
   7 * irq logic is done by the generic kernel/irq/ code and
   8 * by the x86-specific irq controller code. (e.g. i8259.c and
   9 * io_apic.c.)
  10 */
  11
  12#include <linux/seq_file.h>
  13#include <linux/interrupt.h>
  14#include <linux/irq.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/notifier.h>
  17#include <linux/cpu.h>
  18#include <linux/delay.h>
  19#include <linux/uaccess.h>
  20#include <linux/percpu.h>
  21#include <linux/mm.h>
  22
  23#include <asm/apic.h>
  24#include <asm/nospec-branch.h>
  25
  26#ifdef CONFIG_DEBUG_STACKOVERFLOW
  27
  28int sysctl_panic_on_stackoverflow __read_mostly;
  29
  30/* Debugging check for stack overflow: is there less than 1KB free? */
  31static int check_stack_overflow(void)
  32{
  33        long sp;
  34
  35        __asm__ __volatile__("andl %%esp,%0" :
  36                             "=r" (sp) : "0" (THREAD_SIZE - 1));
  37
  38        return sp < (sizeof(struct thread_info) + STACK_WARN);
  39}
  40
  41static void print_stack_overflow(void)
  42{
  43        printk(KERN_WARNING "low stack detected by irq handler\n");
  44        dump_stack();
  45        if (sysctl_panic_on_stackoverflow)
  46                panic("low stack detected by irq handler - check messages\n");
  47}
  48
  49#else
  50static inline int check_stack_overflow(void) { return 0; }
  51static inline void print_stack_overflow(void) { }
  52#endif
  53
  54DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
  55DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
  56
  57static void call_on_stack(void *func, void *stack)
  58{
  59        asm volatile("xchgl     %%ebx,%%esp     \n"
  60                     CALL_NOSPEC
  61                     "movl      %%ebx,%%esp     \n"
  62                     : "=b" (stack)
  63                     : "0" (stack),
  64                       [thunk_target] "D"(func)
  65                     : "memory", "cc", "edx", "ecx", "eax");
  66}
  67
  68static inline void *current_stack(void)
  69{
  70        return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
  71}
  72
  73static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
  74{
  75        struct irq_stack *curstk, *irqstk;
  76        u32 *isp, *prev_esp, arg1;
  77
  78        curstk = (struct irq_stack *) current_stack();
  79        irqstk = __this_cpu_read(hardirq_stack_ptr);
  80
  81        /*
  82         * this is where we switch to the IRQ stack. However, if we are
  83         * already using the IRQ stack (because we interrupted a hardirq
  84         * handler) we can't do that and just have to keep using the
  85         * current stack (which is the irq stack already after all)
  86         */
  87        if (unlikely(curstk == irqstk))
  88                return 0;
  89
  90        isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
  91
  92        /* Save the next esp at the bottom of the stack */
  93        prev_esp = (u32 *)irqstk;
  94        *prev_esp = current_stack_pointer;
  95
  96        if (unlikely(overflow))
  97                call_on_stack(print_stack_overflow, isp);
  98
  99        asm volatile("xchgl     %%ebx,%%esp     \n"
 100                     CALL_NOSPEC
 101                     "movl      %%ebx,%%esp     \n"
 102                     : "=a" (arg1), "=b" (isp)
 103                     :  "0" (desc),   "1" (isp),
 104                        [thunk_target] "D" (desc->handle_irq)
 105                     : "memory", "cc", "ecx");
 106        return 1;
 107}
 108
 109/*
 110 * Allocate per-cpu stacks for hardirq and softirq processing
 111 */
 112int irq_init_percpu_irqstack(unsigned int cpu)
 113{
 114        int node = cpu_to_node(cpu);
 115        struct page *ph, *ps;
 116
 117        if (per_cpu(hardirq_stack_ptr, cpu))
 118                return 0;
 119
 120        ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
 121        if (!ph)
 122                return -ENOMEM;
 123        ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
 124        if (!ps) {
 125                __free_pages(ph, THREAD_SIZE_ORDER);
 126                return -ENOMEM;
 127        }
 128
 129        per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
 130        per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
 131        return 0;
 132}
 133
 134void do_softirq_own_stack(void)
 135{
 136        struct irq_stack *irqstk;
 137        u32 *isp, *prev_esp;
 138
 139        irqstk = __this_cpu_read(softirq_stack_ptr);
 140
 141        /* build the stack frame on the softirq stack */
 142        isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
 143
 144        /* Push the previous esp onto the stack */
 145        prev_esp = (u32 *)irqstk;
 146        *prev_esp = current_stack_pointer;
 147
 148        call_on_stack(__do_softirq, isp);
 149}
 150
 151bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 152{
 153        int overflow = check_stack_overflow();
 154
 155        if (IS_ERR_OR_NULL(desc))
 156                return false;
 157
 158        if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
 159                if (unlikely(overflow))
 160                        print_stack_overflow();
 161                generic_handle_irq_desc(desc);
 162        }
 163
 164        return true;
 165}
 166