linux/arch/x86/kernel/vsmp_64.c
<<
>>
Prefs
   1/*
   2 * vSMPowered(tm) systems specific initialization
   3 * Copyright (C) 2005 ScaleMP Inc.
   4 *
   5 * Use of this code is subject to the terms and conditions of the
   6 * GNU general public license version 2. See "COPYING" or
   7 * http://www.gnu.org/licenses/gpl.html
   8 *
   9 * Ravikiran Thirumalai <kiran@scalemp.com>,
  10 * Shai Fultheim <shai@scalemp.com>
  11 * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
  12 *                           Ravikiran Thirumalai <kiran@scalemp.com>
  13 */
  14
  15#include <linux/init.h>
  16#include <linux/pci_ids.h>
  17#include <linux/pci_regs.h>
  18#include <linux/smp.h>
  19#include <linux/irq.h>
  20
  21#include <asm/apic.h>
  22#include <asm/pci-direct.h>
  23#include <asm/io.h>
  24#include <asm/paravirt.h>
  25#include <asm/setup.h>
  26
  27#define TOPOLOGY_REGISTER_OFFSET 0x10
  28
  29/* Flag below is initialized once during vSMP PCI initialization. */
  30static int irq_routing_comply = 1;
  31
  32#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
  33/*
  34 * Interrupt control on vSMPowered systems:
  35 * ~AC is a shadow of IF.  If IF is 'on' AC should be 'off'
  36 * and vice versa.
  37 */
  38
  39asmlinkage __visible unsigned long vsmp_save_fl(void)
  40{
  41        unsigned long flags = native_save_fl();
  42
  43        if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
  44                flags &= ~X86_EFLAGS_IF;
  45        return flags;
  46}
  47PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
  48
  49__visible void vsmp_restore_fl(unsigned long flags)
  50{
  51        if (flags & X86_EFLAGS_IF)
  52                flags &= ~X86_EFLAGS_AC;
  53        else
  54                flags |= X86_EFLAGS_AC;
  55        native_restore_fl(flags);
  56}
  57PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
  58
  59asmlinkage __visible void vsmp_irq_disable(void)
  60{
  61        unsigned long flags = native_save_fl();
  62
  63        native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
  64}
  65PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
  66
  67asmlinkage __visible void vsmp_irq_enable(void)
  68{
  69        unsigned long flags = native_save_fl();
  70
  71        native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
  72}
  73PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
  74
  75static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
  76                                  unsigned long addr, unsigned len)
  77{
  78        switch (type) {
  79        case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
  80        case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
  81        case PARAVIRT_PATCH(pv_irq_ops.save_fl):
  82        case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
  83                return paravirt_patch_default(type, clobbers, ibuf, addr, len);
  84        default:
  85                return native_patch(type, clobbers, ibuf, addr, len);
  86        }
  87
  88}
  89
  90static void __init set_vsmp_pv_ops(void)
  91{
  92        void __iomem *address;
  93        unsigned int cap, ctl, cfg;
  94
  95        /* set vSMP magic bits to indicate vSMP capable kernel */
  96        cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
  97        address = early_ioremap(cfg, 8);
  98        cap = readl(address);
  99        ctl = readl(address + 4);
 100        printk(KERN_INFO "vSMP CTL: capabilities:0x%08x  control:0x%08x\n",
 101               cap, ctl);
 102
 103        /* If possible, let the vSMP foundation route the interrupt optimally */
 104#ifdef CONFIG_SMP
 105        if (cap & ctl & BIT(8)) {
 106                ctl &= ~BIT(8);
 107
 108                /* Interrupt routing set to ignore */
 109                irq_routing_comply = 0;
 110
 111#ifdef CONFIG_PROC_FS
 112                /* Don't let users change irq affinity via procfs */
 113                no_irq_affinity = 1;
 114#endif
 115        }
 116#endif
 117
 118        if (cap & ctl & (1 << 4)) {
 119                /* Setup irq ops and turn on vSMP  IRQ fastpath handling */
 120                pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
 121                pv_irq_ops.irq_enable  = PV_CALLEE_SAVE(vsmp_irq_enable);
 122                pv_irq_ops.save_fl  = PV_CALLEE_SAVE(vsmp_save_fl);
 123                pv_irq_ops.restore_fl  = PV_CALLEE_SAVE(vsmp_restore_fl);
 124                pv_init_ops.patch = vsmp_patch;
 125                ctl &= ~(1 << 4);
 126        }
 127        writel(ctl, address + 4);
 128        ctl = readl(address + 4);
 129        pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
 130
 131        early_iounmap(address, 8);
 132}
 133#else
 134static void __init set_vsmp_pv_ops(void)
 135{
 136}
 137#endif
 138
 139#ifdef CONFIG_PCI
 140static int is_vsmp = -1;
 141
 142static void __init detect_vsmp_box(void)
 143{
 144        is_vsmp = 0;
 145
 146        if (!early_pci_allowed())
 147                return;
 148
 149        /* Check if we are running on a ScaleMP vSMPowered box */
 150        if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
 151             (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
 152                is_vsmp = 1;
 153}
 154
 155static int is_vsmp_box(void)
 156{
 157        if (is_vsmp != -1)
 158                return is_vsmp;
 159        else {
 160                WARN_ON_ONCE(1);
 161                return 0;
 162        }
 163}
 164
 165#else
 166static void __init detect_vsmp_box(void)
 167{
 168}
 169static int is_vsmp_box(void)
 170{
 171        return 0;
 172}
 173#endif
 174
 175static void __init vsmp_cap_cpus(void)
 176{
 177#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
 178        void __iomem *address;
 179        unsigned int cfg, topology, node_shift, maxcpus;
 180
 181        /*
 182         * CONFIG_X86_VSMP is not configured, so limit the number CPUs to the
 183         * ones present in the first board, unless explicitly overridden by
 184         * setup_max_cpus
 185         */
 186        if (setup_max_cpus != NR_CPUS)
 187                return;
 188
 189        /* Read the vSMP Foundation topology register */
 190        cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
 191        address = early_ioremap(cfg + TOPOLOGY_REGISTER_OFFSET, 4);
 192        if (WARN_ON(!address))
 193                return;
 194
 195        topology = readl(address);
 196        node_shift = (topology >> 16) & 0x7;
 197        if (!node_shift)
 198                /* The value 0 should be decoded as 8 */
 199                node_shift = 8;
 200        maxcpus = (topology & ((1 << node_shift) - 1)) + 1;
 201
 202        pr_info("vSMP CTL: Capping CPUs to %d (CONFIG_X86_VSMP is unset)\n",
 203                maxcpus);
 204        setup_max_cpus = maxcpus;
 205        early_iounmap(address, 4);
 206#endif
 207}
 208
 209static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
 210{
 211        return hard_smp_processor_id() >> index_msb;
 212}
 213
 214/*
 215 * In vSMP, all cpus should be capable of handling interrupts, regardless of
 216 * the APIC used.
 217 */
 218static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
 219                                          const struct cpumask *mask)
 220{
 221        cpumask_setall(retmask);
 222}
 223
 224static void vsmp_apic_post_init(void)
 225{
 226        /* need to update phys_pkg_id */
 227        apic->phys_pkg_id = apicid_phys_pkg_id;
 228
 229        if (!irq_routing_comply)
 230                apic->vector_allocation_domain = fill_vector_allocation_domain;
 231}
 232
 233void __init vsmp_init(void)
 234{
 235        detect_vsmp_box();
 236        if (!is_vsmp_box())
 237                return;
 238
 239        x86_platform.apic_post_init = vsmp_apic_post_init;
 240
 241        vsmp_cap_cpus();
 242
 243        set_vsmp_pv_ops();
 244        return;
 245}
 246