linux/arch/x86/include/asm/ipi.h
<<
>>
Prefs
   1#ifndef _ASM_X86_IPI_H
   2#define _ASM_X86_IPI_H
   3
   4#ifdef CONFIG_X86_LOCAL_APIC
   5
   6/*
   7 * Copyright 2004 James Cleverdon, IBM.
   8 * Subject to the GNU Public License, v.2
   9 *
  10 * Generic APIC InterProcessor Interrupt code.
  11 *
  12 * Moved to include file by James Cleverdon from
  13 * arch/x86-64/kernel/smp.c
  14 *
  15 * Copyrights from kernel/smp.c:
  16 *
  17 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  18 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  19 * (c) 2002,2003 Andi Kleen, SuSE Labs.
  20 * Subject to the GNU Public License, v.2
  21 */
  22
  23#include <asm/hw_irq.h>
  24#include <asm/apic.h>
  25#include <asm/smp.h>
  26
  27/*
  28 * the following functions deal with sending IPIs between CPUs.
  29 *
  30 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  31 */
  32
  33static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
  34                                         unsigned int dest)
  35{
  36        unsigned int icr = shortcut | dest;
  37
  38        switch (vector) {
  39        default:
  40                icr |= APIC_DM_FIXED | vector;
  41                break;
  42        case NMI_VECTOR:
  43                icr |= APIC_DM_NMI;
  44                break;
  45        }
  46        return icr;
  47}
  48
  49static inline int __prepare_ICR2(unsigned int mask)
  50{
  51        return SET_APIC_DEST_FIELD(mask);
  52}
  53
  54static inline void __xapic_wait_icr_idle(void)
  55{
  56        while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
  57                cpu_relax();
  58}
  59
  60static inline void
  61__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
  62{
  63        /*
  64         * Subtle. In the case of the 'never do double writes' workaround
  65         * we have to lock out interrupts to be safe.  As we don't care
  66         * of the value read we use an atomic rmw access to avoid costly
  67         * cli/sti.  Otherwise we use an even cheaper single atomic write
  68         * to the APIC.
  69         */
  70        unsigned int cfg;
  71
  72        /*
  73         * Wait for idle.
  74         */
  75        __xapic_wait_icr_idle();
  76
  77        /*
  78         * No need to touch the target chip field
  79         */
  80        cfg = __prepare_ICR(shortcut, vector, dest);
  81
  82        /*
  83         * Send the IPI. The write to APIC_ICR fires this off.
  84         */
  85        native_apic_mem_write(APIC_ICR, cfg);
  86}
  87
  88/*
  89 * This is used to send an IPI with no shorthand notation (the destination is
  90 * specified in bits 56 to 63 of the ICR).
  91 */
  92static inline void
  93 __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
  94{
  95        unsigned long cfg;
  96
  97        /*
  98         * Wait for idle.
  99         */
 100        if (unlikely(vector == NMI_VECTOR))
 101                safe_apic_wait_icr_idle();
 102        else
 103                __xapic_wait_icr_idle();
 104
 105        /*
 106         * prepare target chip field
 107         */
 108        cfg = __prepare_ICR2(mask);
 109        native_apic_mem_write(APIC_ICR2, cfg);
 110
 111        /*
 112         * program the ICR
 113         */
 114        cfg = __prepare_ICR(0, vector, dest);
 115
 116        /*
 117         * Send the IPI. The write to APIC_ICR fires this off.
 118         */
 119        native_apic_mem_write(APIC_ICR, cfg);
 120}
 121
 122extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
 123                                                 int vector);
 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
 125                                                         int vector);
 126
 127/* Avoid include hell */
 128#define NMI_VECTOR 0x02
 129
 130extern int no_broadcast;
 131
 132static inline void __default_local_send_IPI_allbutself(int vector)
 133{
 134        if (no_broadcast || vector == NMI_VECTOR)
 135                apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
 136        else
 137                __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
 138}
 139
 140static inline void __default_local_send_IPI_all(int vector)
 141{
 142        if (no_broadcast || vector == NMI_VECTOR)
 143                apic->send_IPI_mask(cpu_online_mask, vector);
 144        else
 145                __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
 146}
 147
 148#ifdef CONFIG_X86_32
 149extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
 150                                                         int vector);
 151extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
 152                                                         int vector);
 153extern void default_send_IPI_mask_logical(const struct cpumask *mask,
 154                                                 int vector);
 155extern void default_send_IPI_allbutself(int vector);
 156extern void default_send_IPI_all(int vector);
 157extern void default_send_IPI_self(int vector);
 158#endif
 159
 160#endif
 161
 162#endif /* _ASM_X86_IPI_H */
 163