linux/arch/x86/kernel/apic/x2apic_cluster.c
<<
>>
Prefs
   1#include <linux/threads.h>
   2#include <linux/cpumask.h>
   3#include <linux/string.h>
   4#include <linux/kernel.h>
   5#include <linux/ctype.h>
   6#include <linux/init.h>
   7#include <linux/dmar.h>
   8
   9#include <asm/smp.h>
  10#include <asm/apic.h>
  11#include <asm/ipi.h>
  12
  13static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
  14
  15static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  16{
  17        return x2apic_enabled();
  18}
  19
  20/*
  21 * need to use more than cpu 0, because we need more vectors when
  22 * MSI-X are used.
  23 */
  24static const struct cpumask *x2apic_target_cpus(void)
  25{
  26        return cpu_online_mask;
  27}
  28
  29/*
  30 * for now each logical cpu is in its own vector allocation domain.
  31 */
  32static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
  33{
  34        cpumask_clear(retmask);
  35        cpumask_set_cpu(cpu, retmask);
  36}
  37
  38static void
  39 __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
  40{
  41        unsigned long cfg;
  42
  43        cfg = __prepare_ICR(0, vector, dest);
  44
  45        /*
  46         * send the IPI.
  47         */
  48        native_x2apic_icr_write(cfg, apicid);
  49}
  50
  51/*
  52 * for now, we send the IPI's one by one in the cpumask.
  53 * TBD: Based on the cpu mask, we can send the IPI's to the cluster group
  54 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
  55 * writes.
  56 */
  57static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
  58{
  59        unsigned long query_cpu;
  60        unsigned long flags;
  61
  62        x2apic_wrmsr_fence();
  63
  64        local_irq_save(flags);
  65        for_each_cpu(query_cpu, mask) {
  66                __x2apic_send_IPI_dest(
  67                        per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  68                        vector, apic->dest_logical);
  69        }
  70        local_irq_restore(flags);
  71}
  72
  73static void
  74 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
  75{
  76        unsigned long this_cpu = smp_processor_id();
  77        unsigned long query_cpu;
  78        unsigned long flags;
  79
  80        x2apic_wrmsr_fence();
  81
  82        local_irq_save(flags);
  83        for_each_cpu(query_cpu, mask) {
  84                if (query_cpu == this_cpu)
  85                        continue;
  86                __x2apic_send_IPI_dest(
  87                                per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  88                                vector, apic->dest_logical);
  89        }
  90        local_irq_restore(flags);
  91}
  92
  93static void x2apic_send_IPI_allbutself(int vector)
  94{
  95        unsigned long this_cpu = smp_processor_id();
  96        unsigned long query_cpu;
  97        unsigned long flags;
  98
  99        x2apic_wrmsr_fence();
 100
 101        local_irq_save(flags);
 102        for_each_online_cpu(query_cpu) {
 103                if (query_cpu == this_cpu)
 104                        continue;
 105                __x2apic_send_IPI_dest(
 106                                per_cpu(x86_cpu_to_logical_apicid, query_cpu),
 107                                vector, apic->dest_logical);
 108        }
 109        local_irq_restore(flags);
 110}
 111
 112static void x2apic_send_IPI_all(int vector)
 113{
 114        x2apic_send_IPI_mask(cpu_online_mask, vector);
 115}
 116
 117static int x2apic_apic_id_registered(void)
 118{
 119        return 1;
 120}
 121
 122static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
 123{
 124        /*
 125         * We're using fixed IRQ delivery, can only return one logical APIC ID.
 126         * May as well be the first.
 127         */
 128        int cpu = cpumask_first(cpumask);
 129
 130        if ((unsigned)cpu < nr_cpu_ids)
 131                return per_cpu(x86_cpu_to_logical_apicid, cpu);
 132        else
 133                return BAD_APICID;
 134}
 135
 136static unsigned int
 137x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
 138                              const struct cpumask *andmask)
 139{
 140        int cpu;
 141
 142        /*
 143         * We're using fixed IRQ delivery, can only return one logical APIC ID.
 144         * May as well be the first.
 145         */
 146        for_each_cpu_and(cpu, cpumask, andmask) {
 147                if (cpumask_test_cpu(cpu, cpu_online_mask))
 148                        break;
 149        }
 150
 151        if (cpu < nr_cpu_ids)
 152                return per_cpu(x86_cpu_to_logical_apicid, cpu);
 153
 154        return BAD_APICID;
 155}
 156
 157static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
 158{
 159        unsigned int id;
 160
 161        id = x;
 162        return id;
 163}
 164
 165static unsigned long set_apic_id(unsigned int id)
 166{
 167        unsigned long x;
 168
 169        x = id;
 170        return x;
 171}
 172
 173static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
 174{
 175        return initial_apicid >> index_msb;
 176}
 177
 178static void x2apic_send_IPI_self(int vector)
 179{
 180        apic_write(APIC_SELF_IPI, vector);
 181}
 182
 183static void init_x2apic_ldr(void)
 184{
 185        int cpu = smp_processor_id();
 186
 187        per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
 188}
 189
 190struct apic apic_x2apic_cluster = {
 191
 192        .name                           = "cluster x2apic",
 193        .probe                          = NULL,
 194        .acpi_madt_oem_check            = x2apic_acpi_madt_oem_check,
 195        .apic_id_registered             = x2apic_apic_id_registered,
 196
 197        .irq_delivery_mode              = dest_LowestPrio,
 198        .irq_dest_mode                  = 1, /* logical */
 199
 200        .target_cpus                    = x2apic_target_cpus,
 201        .disable_esr                    = 0,
 202        .dest_logical                   = APIC_DEST_LOGICAL,
 203        .check_apicid_used              = NULL,
 204        .check_apicid_present           = NULL,
 205
 206        .vector_allocation_domain       = x2apic_vector_allocation_domain,
 207        .init_apic_ldr                  = init_x2apic_ldr,
 208
 209        .ioapic_phys_id_map             = NULL,
 210        .setup_apic_routing             = NULL,
 211        .multi_timer_check              = NULL,
 212        .apicid_to_node                 = NULL,
 213        .cpu_to_logical_apicid          = NULL,
 214        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
 215        .apicid_to_cpu_present          = NULL,
 216        .setup_portio_remap             = NULL,
 217        .check_phys_apicid_present      = default_check_phys_apicid_present,
 218        .enable_apic_mode               = NULL,
 219        .phys_pkg_id                    = x2apic_cluster_phys_pkg_id,
 220        .mps_oem_check                  = NULL,
 221
 222        .get_apic_id                    = x2apic_cluster_phys_get_apic_id,
 223        .set_apic_id                    = set_apic_id,
 224        .apic_id_mask                   = 0xFFFFFFFFu,
 225
 226        .cpu_mask_to_apicid             = x2apic_cpu_mask_to_apicid,
 227        .cpu_mask_to_apicid_and         = x2apic_cpu_mask_to_apicid_and,
 228
 229        .send_IPI_mask                  = x2apic_send_IPI_mask,
 230        .send_IPI_mask_allbutself       = x2apic_send_IPI_mask_allbutself,
 231        .send_IPI_allbutself            = x2apic_send_IPI_allbutself,
 232        .send_IPI_all                   = x2apic_send_IPI_all,
 233        .send_IPI_self                  = x2apic_send_IPI_self,
 234
 235        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
 236        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
 237        .wait_for_init_deassert         = NULL,
 238        .smp_callin_clear_local_apic    = NULL,
 239        .inquire_remote_apic            = NULL,
 240
 241        .read                           = native_apic_msr_read,
 242        .write                          = native_apic_msr_write,
 243        .icr_read                       = native_x2apic_icr_read,
 244        .icr_write                      = native_x2apic_icr_write,
 245        .wait_icr_idle                  = native_x2apic_wait_icr_idle,
 246        .safe_wait_icr_idle             = native_safe_x2apic_wait_icr_idle,
 247};
 248