linux/arch/x86/kernel/apic/apic_flat_64.c
<<
>>
Prefs
   1/*
   2 * Copyright 2004 James Cleverdon, IBM.
   3 * Subject to the GNU Public License, v.2
   4 *
   5 * Flat APIC subarch code.
   6 *
   7 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
   8 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
   9 * James Cleverdon.
  10 */
  11#include <linux/errno.h>
  12#include <linux/threads.h>
  13#include <linux/cpumask.h>
  14#include <linux/string.h>
  15#include <linux/kernel.h>
  16#include <linux/ctype.h>
  17#include <linux/init.h>
  18#include <linux/hardirq.h>
  19#include <asm/smp.h>
  20#include <asm/apic.h>
  21#include <asm/ipi.h>
  22
  23#ifdef CONFIG_ACPI
  24#include <acpi/acpi_bus.h>
  25#endif
  26
  27static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  28{
  29        return 1;
  30}
  31
  32static const struct cpumask *flat_target_cpus(void)
  33{
  34        return cpu_online_mask;
  35}
  36
  37static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
  38{
  39        /* Careful. Some cpus do not strictly honor the set of cpus
  40         * specified in the interrupt destination when using lowest
  41         * priority interrupt delivery mode.
  42         *
  43         * In particular there was a hyperthreading cpu observed to
  44         * deliver interrupts to the wrong hyperthread when only one
  45         * hyperthread was specified in the interrupt desitination.
  46         */
  47        cpumask_clear(retmask);
  48        cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
  49}
  50
  51/*
  52 * Set up the logical destination ID.
  53 *
  54 * Intel recommends to set DFR, LDR and TPR before enabling
  55 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
  56 * document number 292116).  So here it goes...
  57 */
  58static void flat_init_apic_ldr(void)
  59{
  60        unsigned long val;
  61        unsigned long num, id;
  62
  63        num = smp_processor_id();
  64        id = 1UL << num;
  65        apic_write(APIC_DFR, APIC_DFR_FLAT);
  66        val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
  67        val |= SET_APIC_LOGICAL_ID(id);
  68        apic_write(APIC_LDR, val);
  69}
  70
  71static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
  72{
  73        unsigned long flags;
  74
  75        local_irq_save(flags);
  76        __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
  77        local_irq_restore(flags);
  78}
  79
  80static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
  81{
  82        unsigned long mask = cpumask_bits(cpumask)[0];
  83
  84        _flat_send_IPI_mask(mask, vector);
  85}
  86
  87static void
  88 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
  89{
  90        unsigned long mask = cpumask_bits(cpumask)[0];
  91        int cpu = smp_processor_id();
  92
  93        if (cpu < BITS_PER_LONG)
  94                clear_bit(cpu, &mask);
  95
  96        _flat_send_IPI_mask(mask, vector);
  97}
  98
  99static void flat_send_IPI_allbutself(int vector)
 100{
 101        int cpu = smp_processor_id();
 102#ifdef  CONFIG_HOTPLUG_CPU
 103        int hotplug = 1;
 104#else
 105        int hotplug = 0;
 106#endif
 107        if (hotplug || vector == NMI_VECTOR) {
 108                if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
 109                        unsigned long mask = cpumask_bits(cpu_online_mask)[0];
 110
 111                        if (cpu < BITS_PER_LONG)
 112                                clear_bit(cpu, &mask);
 113
 114                        _flat_send_IPI_mask(mask, vector);
 115                }
 116        } else if (num_online_cpus() > 1) {
 117                __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
 118                                            vector, apic->dest_logical);
 119        }
 120}
 121
 122static void flat_send_IPI_all(int vector)
 123{
 124        if (vector == NMI_VECTOR) {
 125                flat_send_IPI_mask(cpu_online_mask, vector);
 126        } else {
 127                __default_send_IPI_shortcut(APIC_DEST_ALLINC,
 128                                            vector, apic->dest_logical);
 129        }
 130}
 131
 132static unsigned int flat_get_apic_id(unsigned long x)
 133{
 134        unsigned int id;
 135
 136        id = (((x)>>24) & 0xFFu);
 137
 138        return id;
 139}
 140
 141static unsigned long set_apic_id(unsigned int id)
 142{
 143        unsigned long x;
 144
 145        x = ((id & 0xFFu)<<24);
 146        return x;
 147}
 148
 149static unsigned int read_xapic_id(void)
 150{
 151        unsigned int id;
 152
 153        id = flat_get_apic_id(apic_read(APIC_ID));
 154        return id;
 155}
 156
 157static int flat_apic_id_registered(void)
 158{
 159        return physid_isset(read_xapic_id(), phys_cpu_present_map);
 160}
 161
 162static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
 163{
 164        return initial_apic_id >> index_msb;
 165}
 166
 167struct apic apic_flat =  {
 168        .name                           = "flat",
 169        .probe                          = NULL,
 170        .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
 171        .apic_id_registered             = flat_apic_id_registered,
 172
 173        .irq_delivery_mode              = dest_LowestPrio,
 174        .irq_dest_mode                  = 1, /* logical */
 175
 176        .target_cpus                    = flat_target_cpus,
 177        .disable_esr                    = 0,
 178        .dest_logical                   = APIC_DEST_LOGICAL,
 179        .check_apicid_used              = NULL,
 180        .check_apicid_present           = NULL,
 181
 182        .vector_allocation_domain       = flat_vector_allocation_domain,
 183        .init_apic_ldr                  = flat_init_apic_ldr,
 184
 185        .ioapic_phys_id_map             = NULL,
 186        .setup_apic_routing             = NULL,
 187        .multi_timer_check              = NULL,
 188        .apicid_to_node                 = NULL,
 189        .cpu_to_logical_apicid          = NULL,
 190        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
 191        .apicid_to_cpu_present          = NULL,
 192        .setup_portio_remap             = NULL,
 193        .check_phys_apicid_present      = default_check_phys_apicid_present,
 194        .enable_apic_mode               = NULL,
 195        .phys_pkg_id                    = flat_phys_pkg_id,
 196        .mps_oem_check                  = NULL,
 197
 198        .get_apic_id                    = flat_get_apic_id,
 199        .set_apic_id                    = set_apic_id,
 200        .apic_id_mask                   = 0xFFu << 24,
 201
 202        .cpu_mask_to_apicid             = default_cpu_mask_to_apicid,
 203        .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
 204
 205        .send_IPI_mask                  = flat_send_IPI_mask,
 206        .send_IPI_mask_allbutself       = flat_send_IPI_mask_allbutself,
 207        .send_IPI_allbutself            = flat_send_IPI_allbutself,
 208        .send_IPI_all                   = flat_send_IPI_all,
 209        .send_IPI_self                  = apic_send_IPI_self,
 210
 211        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
 212        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
 213        .wait_for_init_deassert         = NULL,
 214        .smp_callin_clear_local_apic    = NULL,
 215        .inquire_remote_apic            = default_inquire_remote_apic,
 216
 217        .read                           = native_apic_mem_read,
 218        .write                          = native_apic_mem_write,
 219        .icr_read                       = native_apic_icr_read,
 220        .icr_write                      = native_apic_icr_write,
 221        .wait_icr_idle                  = native_apic_wait_icr_idle,
 222        .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
 223};
 224
 225/*
 226 * Physflat mode is used when there are more than 8 CPUs on a AMD system.
 227 * We cannot use logical delivery in this case because the mask
 228 * overflows, so use physical mode.
 229 */
 230static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 231{
 232#ifdef CONFIG_ACPI
 233        /*
 234         * Quirk: some x86_64 machines can only use physical APIC mode
 235         * regardless of how many processors are present (x86_64 ES7000
 236         * is an example).
 237         */
 238        if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
 239                (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
 240                printk(KERN_DEBUG "system APIC only can use physical flat");
 241                return 1;
 242        }
 243#endif
 244
 245        return 0;
 246}
 247
 248static const struct cpumask *physflat_target_cpus(void)
 249{
 250        return cpu_online_mask;
 251}
 252
 253static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
 254{
 255        cpumask_clear(retmask);
 256        cpumask_set_cpu(cpu, retmask);
 257}
 258
 259static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
 260{
 261        default_send_IPI_mask_sequence_phys(cpumask, vector);
 262}
 263
 264static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
 265                                              int vector)
 266{
 267        default_send_IPI_mask_allbutself_phys(cpumask, vector);
 268}
 269
 270static void physflat_send_IPI_allbutself(int vector)
 271{
 272        default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
 273}
 274
 275static void physflat_send_IPI_all(int vector)
 276{
 277        physflat_send_IPI_mask(cpu_online_mask, vector);
 278}
 279
 280static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
 281{
 282        int cpu;
 283
 284        /*
 285         * We're using fixed IRQ delivery, can only return one phys APIC ID.
 286         * May as well be the first.
 287         */
 288        cpu = cpumask_first(cpumask);
 289        if ((unsigned)cpu < nr_cpu_ids)
 290                return per_cpu(x86_cpu_to_apicid, cpu);
 291        else
 292                return BAD_APICID;
 293}
 294
 295static unsigned int
 296physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
 297                                const struct cpumask *andmask)
 298{
 299        int cpu;
 300
 301        /*
 302         * We're using fixed IRQ delivery, can only return one phys APIC ID.
 303         * May as well be the first.
 304         */
 305        for_each_cpu_and(cpu, cpumask, andmask) {
 306                if (cpumask_test_cpu(cpu, cpu_online_mask))
 307                        break;
 308        }
 309        if (cpu < nr_cpu_ids)
 310                return per_cpu(x86_cpu_to_apicid, cpu);
 311
 312        return BAD_APICID;
 313}
 314
 315struct apic apic_physflat =  {
 316
 317        .name                           = "physical flat",
 318        .probe                          = NULL,
 319        .acpi_madt_oem_check            = physflat_acpi_madt_oem_check,
 320        .apic_id_registered             = flat_apic_id_registered,
 321
 322        .irq_delivery_mode              = dest_Fixed,
 323        .irq_dest_mode                  = 0, /* physical */
 324
 325        .target_cpus                    = physflat_target_cpus,
 326        .disable_esr                    = 0,
 327        .dest_logical                   = 0,
 328        .check_apicid_used              = NULL,
 329        .check_apicid_present           = NULL,
 330
 331        .vector_allocation_domain       = physflat_vector_allocation_domain,
 332        /* not needed, but shouldn't hurt: */
 333        .init_apic_ldr                  = flat_init_apic_ldr,
 334
 335        .ioapic_phys_id_map             = NULL,
 336        .setup_apic_routing             = NULL,
 337        .multi_timer_check              = NULL,
 338        .apicid_to_node                 = NULL,
 339        .cpu_to_logical_apicid          = NULL,
 340        .cpu_present_to_apicid          = default_cpu_present_to_apicid,
 341        .apicid_to_cpu_present          = NULL,
 342        .setup_portio_remap             = NULL,
 343        .check_phys_apicid_present      = default_check_phys_apicid_present,
 344        .enable_apic_mode               = NULL,
 345        .phys_pkg_id                    = flat_phys_pkg_id,
 346        .mps_oem_check                  = NULL,
 347
 348        .get_apic_id                    = flat_get_apic_id,
 349        .set_apic_id                    = set_apic_id,
 350        .apic_id_mask                   = 0xFFu << 24,
 351
 352        .cpu_mask_to_apicid             = physflat_cpu_mask_to_apicid,
 353        .cpu_mask_to_apicid_and         = physflat_cpu_mask_to_apicid_and,
 354
 355        .send_IPI_mask                  = physflat_send_IPI_mask,
 356        .send_IPI_mask_allbutself       = physflat_send_IPI_mask_allbutself,
 357        .send_IPI_allbutself            = physflat_send_IPI_allbutself,
 358        .send_IPI_all                   = physflat_send_IPI_all,
 359        .send_IPI_self                  = apic_send_IPI_self,
 360
 361        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
 362        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
 363        .wait_for_init_deassert         = NULL,
 364        .smp_callin_clear_local_apic    = NULL,
 365        .inquire_remote_apic            = default_inquire_remote_apic,
 366
 367        .read                           = native_apic_mem_read,
 368        .write                          = native_apic_mem_write,
 369        .icr_read                       = native_apic_icr_read,
 370        .icr_write                      = native_apic_icr_write,
 371        .wait_icr_idle                  = native_apic_wait_icr_idle,
 372        .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
 373};
 374