linux/arch/x86/kernel/apic/x2apic_phys.c
<<
>>
Prefs
   1#include <linux/threads.h>
   2#include <linux/cpumask.h>
   3#include <linux/string.h>
   4#include <linux/kernel.h>
   5#include <linux/ctype.h>
   6#include <linux/init.h>
   7#include <linux/dmar.h>
   8
   9#include <asm/smp.h>
  10#include <asm/apic.h>
  11#include <asm/ipi.h>
  12
  13int x2apic_phys;
  14
  15static int set_x2apic_phys_mode(char *arg)
  16{
  17        x2apic_phys = 1;
  18        return 0;
  19}
  20early_param("x2apic_phys", set_x2apic_phys_mode);
  21
  22static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  23{
  24        if (x2apic_phys)
  25                return x2apic_enabled();
  26        else
  27                return 0;
  28}
  29
  30/*
  31 * need to use more than cpu 0, because we need more vectors when
  32 * MSI-X are used.
  33 */
  34static const struct cpumask *x2apic_target_cpus(void)
  35{
  36        return cpu_online_mask;
  37}
  38
  39static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
  40{
  41        cpumask_clear(retmask);
  42        cpumask_set_cpu(cpu, retmask);
  43}
  44
  45static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
  46                                   unsigned int dest)
  47{
  48        unsigned long cfg;
  49
  50        cfg = __prepare_ICR(0, vector, dest);
  51
  52        /*
  53         * send the IPI.
  54         */
  55        native_x2apic_icr_write(cfg, apicid);
  56}
  57
  58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
  59{
  60        unsigned long query_cpu;
  61        unsigned long flags;
  62
  63        x2apic_wrmsr_fence();
  64
  65        local_irq_save(flags);
  66        for_each_cpu(query_cpu, mask) {
  67                __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
  68                                       vector, APIC_DEST_PHYSICAL);
  69        }
  70        local_irq_restore(flags);
  71}
  72
  73static void
  74 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
  75{
  76        unsigned long this_cpu = smp_processor_id();
  77        unsigned long query_cpu;
  78        unsigned long flags;
  79
  80        x2apic_wrmsr_fence();
  81
  82        local_irq_save(flags);
  83        for_each_cpu(query_cpu, mask) {
  84                if (query_cpu != this_cpu)
  85                        __x2apic_send_IPI_dest(
  86                                per_cpu(x86_cpu_to_apicid, query_cpu),
  87                                vector, APIC_DEST_PHYSICAL);
  88        }
  89        local_irq_restore(flags);
  90}
  91
  92static void x2apic_send_IPI_allbutself(int vector)
  93{
  94        unsigned long this_cpu = smp_processor_id();
  95        unsigned long query_cpu;
  96        unsigned long flags;
  97
  98        x2apic_wrmsr_fence();
  99
 100        local_irq_save(flags);
 101        for_each_online_cpu(query_cpu) {
 102                if (query_cpu == this_cpu)
 103                        continue;
 104                __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
 105                                       vector, APIC_DEST_PHYSICAL);
 106        }
 107        local_irq_restore(flags);
 108}
 109
 110static void x2apic_send_IPI_all(int vector)
 111{
 112        x2apic_send_IPI_mask(cpu_online_mask, vector);
 113}
 114
 115static int x2apic_apic_id_registered(void)
 116{
 117        return 1;
 118}
 119
 120static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
 121{
 122        /*
 123         * We're using fixed IRQ delivery, can only return one phys APIC ID.
 124         * May as well be the first.
 125         */
 126        int cpu = cpumask_first(cpumask);
 127
 128        if ((unsigned)cpu < nr_cpu_ids)
 129                return per_cpu(x86_cpu_to_apicid, cpu);
 130        else
 131                return BAD_APICID;
 132}
 133
 134static unsigned int
 135x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
 136                              const struct cpumask *andmask)
 137{
 138        int cpu;
 139
 140        /*
 141         * We're using fixed IRQ delivery, can only return one phys APIC ID.
 142         * May as well be the first.
 143         */
 144        for_each_cpu_and(cpu, cpumask, andmask) {
 145                if (cpumask_test_cpu(cpu, cpu_online_mask))
 146                        break;
 147        }
 148
 149        if (cpu < nr_cpu_ids)
 150                return per_cpu(x86_cpu_to_apicid, cpu);
 151
 152        return BAD_APICID;
 153}
 154
 155static unsigned int x2apic_phys_get_apic_id(unsigned long x)
 156{
 157        return x;
 158}
 159
 160static unsigned long set_apic_id(unsigned int id)
 161{
 162        return id;
 163}
 164
 165static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
 166{
 167        return initial_apicid >> index_msb;
 168}
 169
 170static void x2apic_send_IPI_self(int vector)
 171{
 172        apic_write(APIC_SELF_IPI, vector);
 173}
 174
 175static void init_x2apic_ldr(void)
 176{
 177}
 178
 179struct apic apic_x2apic_phys = {
 180
 181        .name                           = "physical x2apic",
 182        .probe                          = NULL,
 183        .acpi_madt_oem_check            = x2apic_acpi_madt_oem_check,
 184        .apic_id_registered             = x2apic_apic_id_registered,
 185
 186        .irq_delivery_mode              = dest_Fixed,
 187        .irq_dest_mode                  = 0, /* physical */
 188
 189        .target_cpus                    = x2apic_target_cpus,
 190        .disable_esr                    = 0,
 191        .dest_logical                   = 0,
 192        .check_apicid_used              = NULL,
 193        .check_apicid_present           = NULL,
 194
 195        .vector_allocation_domain       = x2apic_vector_allocation_domain,
 196        .init_apic_ldr                  = init_x2apic_ldr,
 197
 198        .ioapic_phys_id_map             = NULL,
 199        .setup_apic_routing             = NULL,
 200        .multi_timer_check              = NULL,
 201        .apicid_to_node                 = NULL,
 202        .cpu_to_logical_apicid          = NULL,
 203        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
 204        .apicid_to_cpu_present          = NULL,
 205        .setup_portio_remap             = NULL,
 206        .check_phys_apicid_present      = default_check_phys_apicid_present,
 207        .enable_apic_mode               = NULL,
 208        .phys_pkg_id                    = x2apic_phys_pkg_id,
 209        .mps_oem_check                  = NULL,
 210
 211        .get_apic_id                    = x2apic_phys_get_apic_id,
 212        .set_apic_id                    = set_apic_id,
 213        .apic_id_mask                   = 0xFFFFFFFFu,
 214
 215        .cpu_mask_to_apicid             = x2apic_cpu_mask_to_apicid,
 216        .cpu_mask_to_apicid_and         = x2apic_cpu_mask_to_apicid_and,
 217
 218        .send_IPI_mask                  = x2apic_send_IPI_mask,
 219        .send_IPI_mask_allbutself       = x2apic_send_IPI_mask_allbutself,
 220        .send_IPI_allbutself            = x2apic_send_IPI_allbutself,
 221        .send_IPI_all                   = x2apic_send_IPI_all,
 222        .send_IPI_self                  = x2apic_send_IPI_self,
 223
 224        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
 225        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
 226        .wait_for_init_deassert         = NULL,
 227        .smp_callin_clear_local_apic    = NULL,
 228        .inquire_remote_apic            = NULL,
 229
 230        .read                           = native_apic_msr_read,
 231        .write                          = native_apic_msr_write,
 232        .icr_read                       = native_x2apic_icr_read,
 233        .icr_write                      = native_x2apic_icr_write,
 234        .wait_icr_idle                  = native_x2apic_wait_icr_idle,
 235        .safe_wait_icr_idle             = native_safe_x2apic_wait_icr_idle,
 236};
 237