linux/arch/x86/kernel/apic/ipi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/cpumask.h>
   4#include <linux/smp.h>
   5
   6#include "local.h"
   7
   8DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
   9
  10#ifdef CONFIG_SMP
  11static int apic_ipi_shorthand_off __ro_after_init;
  12
  13static __init int apic_ipi_shorthand(char *str)
  14{
  15        get_option(&str, &apic_ipi_shorthand_off);
  16        return 1;
  17}
  18__setup("no_ipi_broadcast=", apic_ipi_shorthand);
  19
  20static int __init print_ipi_mode(void)
  21{
  22        pr_info("IPI shorthand broadcast: %s\n",
  23                apic_ipi_shorthand_off ? "disabled" : "enabled");
  24        return 0;
  25}
  26late_initcall(print_ipi_mode);
  27
  28void apic_smt_update(void)
  29{
  30        /*
  31         * Do not switch to broadcast mode if:
  32         * - Disabled on the command line
  33         * - Only a single CPU is online
  34         * - Not all present CPUs have been at least booted once
  35         *
  36         * The latter is important as the local APIC might be in some
  37         * random state and a broadcast might cause havoc. That's
  38         * especially true for NMI broadcasting.
  39         */
  40        if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
  41            !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
  42                static_branch_disable(&apic_use_ipi_shorthand);
  43        } else {
  44                static_branch_enable(&apic_use_ipi_shorthand);
  45        }
  46}
  47
  48void apic_send_IPI_allbutself(unsigned int vector)
  49{
  50        if (num_online_cpus() < 2)
  51                return;
  52
  53        if (static_branch_likely(&apic_use_ipi_shorthand))
  54                apic->send_IPI_allbutself(vector);
  55        else
  56                apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
  57}
  58
  59/*
  60 * Send a 'reschedule' IPI to another CPU. It goes straight through and
  61 * wastes no time serializing anything. Worst case is that we lose a
  62 * reschedule ...
  63 */
  64void native_smp_send_reschedule(int cpu)
  65{
  66        if (unlikely(cpu_is_offline(cpu))) {
  67                WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
  68                return;
  69        }
  70        apic->send_IPI(cpu, RESCHEDULE_VECTOR);
  71}
  72
  73void native_send_call_func_single_ipi(int cpu)
  74{
  75        apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
  76}
  77
  78void native_send_call_func_ipi(const struct cpumask *mask)
  79{
  80        if (static_branch_likely(&apic_use_ipi_shorthand)) {
  81                unsigned int cpu = smp_processor_id();
  82
  83                if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
  84                        goto sendmask;
  85
  86                if (cpumask_test_cpu(cpu, mask))
  87                        apic->send_IPI_all(CALL_FUNCTION_VECTOR);
  88                else if (num_online_cpus() > 1)
  89                        apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
  90                return;
  91        }
  92
  93sendmask:
  94        apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
  95}
  96
  97#endif /* CONFIG_SMP */
  98
  99static inline int __prepare_ICR2(unsigned int mask)
 100{
 101        return SET_APIC_DEST_FIELD(mask);
 102}
 103
 104static inline void __xapic_wait_icr_idle(void)
 105{
 106        while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
 107                cpu_relax();
 108}
 109
 110void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
 111{
 112        /*
 113         * Subtle. In the case of the 'never do double writes' workaround
 114         * we have to lock out interrupts to be safe.  As we don't care
 115         * of the value read we use an atomic rmw access to avoid costly
 116         * cli/sti.  Otherwise we use an even cheaper single atomic write
 117         * to the APIC.
 118         */
 119        unsigned int cfg;
 120
 121        /*
 122         * Wait for idle.
 123         */
 124        if (unlikely(vector == NMI_VECTOR))
 125                safe_apic_wait_icr_idle();
 126        else
 127                __xapic_wait_icr_idle();
 128
 129        /*
 130         * No need to touch the target chip field. Also the destination
 131         * mode is ignored when a shorthand is used.
 132         */
 133        cfg = __prepare_ICR(shortcut, vector, 0);
 134
 135        /*
 136         * Send the IPI. The write to APIC_ICR fires this off.
 137         */
 138        native_apic_mem_write(APIC_ICR, cfg);
 139}
 140
 141/*
 142 * This is used to send an IPI with no shorthand notation (the destination is
 143 * specified in bits 56 to 63 of the ICR).
 144 */
 145void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 146{
 147        unsigned long cfg;
 148
 149        /*
 150         * Wait for idle.
 151         */
 152        if (unlikely(vector == NMI_VECTOR))
 153                safe_apic_wait_icr_idle();
 154        else
 155                __xapic_wait_icr_idle();
 156
 157        /*
 158         * prepare target chip field
 159         */
 160        cfg = __prepare_ICR2(mask);
 161        native_apic_mem_write(APIC_ICR2, cfg);
 162
 163        /*
 164         * program the ICR
 165         */
 166        cfg = __prepare_ICR(0, vector, dest);
 167
 168        /*
 169         * Send the IPI. The write to APIC_ICR fires this off.
 170         */
 171        native_apic_mem_write(APIC_ICR, cfg);
 172}
 173
 174void default_send_IPI_single_phys(int cpu, int vector)
 175{
 176        unsigned long flags;
 177
 178        local_irq_save(flags);
 179        __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
 180                                      vector, APIC_DEST_PHYSICAL);
 181        local_irq_restore(flags);
 182}
 183
 184void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
 185{
 186        unsigned long query_cpu;
 187        unsigned long flags;
 188
 189        /*
 190         * Hack. The clustered APIC addressing mode doesn't allow us to send
 191         * to an arbitrary mask, so I do a unicast to each CPU instead.
 192         * - mbligh
 193         */
 194        local_irq_save(flags);
 195        for_each_cpu(query_cpu, mask) {
 196                __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
 197                                query_cpu), vector, APIC_DEST_PHYSICAL);
 198        }
 199        local_irq_restore(flags);
 200}
 201
 202void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
 203                                                 int vector)
 204{
 205        unsigned int this_cpu = smp_processor_id();
 206        unsigned int query_cpu;
 207        unsigned long flags;
 208
 209        /* See Hack comment above */
 210
 211        local_irq_save(flags);
 212        for_each_cpu(query_cpu, mask) {
 213                if (query_cpu == this_cpu)
 214                        continue;
 215                __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
 216                                 query_cpu), vector, APIC_DEST_PHYSICAL);
 217        }
 218        local_irq_restore(flags);
 219}
 220
 221/*
 222 * Helper function for APICs which insist on cpumasks
 223 */
 224void default_send_IPI_single(int cpu, int vector)
 225{
 226        apic->send_IPI_mask(cpumask_of(cpu), vector);
 227}
 228
 229void default_send_IPI_allbutself(int vector)
 230{
 231        __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
 232}
 233
 234void default_send_IPI_all(int vector)
 235{
 236        __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
 237}
 238
 239void default_send_IPI_self(int vector)
 240{
 241        __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
 242}
 243
 244#ifdef CONFIG_X86_32
 245
 246void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
 247                                                 int vector)
 248{
 249        unsigned long flags;
 250        unsigned int query_cpu;
 251
 252        /*
 253         * Hack. The clustered APIC addressing mode doesn't allow us to send
 254         * to an arbitrary mask, so I do a unicasts to each CPU instead. This
 255         * should be modified to do 1 message per cluster ID - mbligh
 256         */
 257
 258        local_irq_save(flags);
 259        for_each_cpu(query_cpu, mask)
 260                __default_send_IPI_dest_field(
 261                        early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
 262                        vector, apic->dest_logical);
 263        local_irq_restore(flags);
 264}
 265
 266void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
 267                                                 int vector)
 268{
 269        unsigned long flags;
 270        unsigned int query_cpu;
 271        unsigned int this_cpu = smp_processor_id();
 272
 273        /* See Hack comment above */
 274
 275        local_irq_save(flags);
 276        for_each_cpu(query_cpu, mask) {
 277                if (query_cpu == this_cpu)
 278                        continue;
 279                __default_send_IPI_dest_field(
 280                        early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
 281                        vector, apic->dest_logical);
 282                }
 283        local_irq_restore(flags);
 284}
 285
 286/*
 287 * This is only used on smaller machines.
 288 */
 289void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
 290{
 291        unsigned long mask = cpumask_bits(cpumask)[0];
 292        unsigned long flags;
 293
 294        if (!mask)
 295                return;
 296
 297        local_irq_save(flags);
 298        WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
 299        __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
 300        local_irq_restore(flags);
 301}
 302
 303/* must come after the send_IPI functions above for inlining */
 304static int convert_apicid_to_cpu(int apic_id)
 305{
 306        int i;
 307
 308        for_each_possible_cpu(i) {
 309                if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
 310                        return i;
 311        }
 312        return -1;
 313}
 314
 315int safe_smp_processor_id(void)
 316{
 317        int apicid, cpuid;
 318
 319        if (!boot_cpu_has(X86_FEATURE_APIC))
 320                return 0;
 321
 322        apicid = hard_smp_processor_id();
 323        if (apicid == BAD_APICID)
 324                return 0;
 325
 326        cpuid = convert_apicid_to_cpu(apicid);
 327
 328        return cpuid >= 0 ? cpuid : 0;
 329}
 330#endif
 331