linux/arch/x86/platform/uv/uv_irq.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * SGI UV IRQ functions
   7 *
   8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/rbtree.h>
  13#include <linux/slab.h>
  14#include <linux/irq.h>
  15
  16#include <asm/apic.h>
  17#include <asm/uv/uv_irq.h>
  18#include <asm/uv/uv_hub.h>
  19
  20/* MMR offset and pnode of hub sourcing interrupts for a given irq */
  21struct uv_irq_2_mmr_pnode{
  22        struct rb_node          list;
  23        unsigned long           offset;
  24        int                     pnode;
  25        int                     irq;
  26};
  27
  28static spinlock_t               uv_irq_lock;
  29static struct rb_root           uv_irq_root;
  30
  31static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
  32
  33static void uv_noop(struct irq_data *data) { }
  34
  35static void uv_ack_apic(struct irq_data *data)
  36{
  37        ack_APIC_irq();
  38}
  39
  40static struct irq_chip uv_irq_chip = {
  41        .name                   = "UV-CORE",
  42        .irq_mask               = uv_noop,
  43        .irq_unmask             = uv_noop,
  44        .irq_eoi                = uv_ack_apic,
  45        .irq_set_affinity       = uv_set_irq_affinity,
  46};
  47
  48/*
  49 * Add offset and pnode information of the hub sourcing interrupts to the
  50 * rb tree for a specific irq.
  51 */
  52static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
  53{
  54        struct rb_node **link = &uv_irq_root.rb_node;
  55        struct rb_node *parent = NULL;
  56        struct uv_irq_2_mmr_pnode *n;
  57        struct uv_irq_2_mmr_pnode *e;
  58        unsigned long irqflags;
  59
  60        n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
  61                                uv_blade_to_memory_nid(blade));
  62        if (!n)
  63                return -ENOMEM;
  64
  65        n->irq = irq;
  66        n->offset = offset;
  67        n->pnode = uv_blade_to_pnode(blade);
  68        spin_lock_irqsave(&uv_irq_lock, irqflags);
  69        /* Find the right place in the rbtree: */
  70        while (*link) {
  71                parent = *link;
  72                e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
  73
  74                if (unlikely(irq == e->irq)) {
  75                        /* irq entry exists */
  76                        e->pnode = uv_blade_to_pnode(blade);
  77                        e->offset = offset;
  78                        spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  79                        kfree(n);
  80                        return 0;
  81                }
  82
  83                if (irq < e->irq)
  84                        link = &(*link)->rb_left;
  85                else
  86                        link = &(*link)->rb_right;
  87        }
  88
  89        /* Insert the node into the rbtree. */
  90        rb_link_node(&n->list, parent, link);
  91        rb_insert_color(&n->list, &uv_irq_root);
  92
  93        spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  94        return 0;
  95}
  96
  97/* Retrieve offset and pnode information from the rb tree for a specific irq */
  98int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
  99{
 100        struct uv_irq_2_mmr_pnode *e;
 101        struct rb_node *n;
 102        unsigned long irqflags;
 103
 104        spin_lock_irqsave(&uv_irq_lock, irqflags);
 105        n = uv_irq_root.rb_node;
 106        while (n) {
 107                e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
 108
 109                if (e->irq == irq) {
 110                        *offset = e->offset;
 111                        *pnode = e->pnode;
 112                        spin_unlock_irqrestore(&uv_irq_lock, irqflags);
 113                        return 0;
 114                }
 115
 116                if (irq < e->irq)
 117                        n = n->rb_left;
 118                else
 119                        n = n->rb_right;
 120        }
 121        spin_unlock_irqrestore(&uv_irq_lock, irqflags);
 122        return -1;
 123}
 124
 125/*
 126 * Re-target the irq to the specified CPU and enable the specified MMR located
 127 * on the specified blade to allow the sending of MSIs to the specified CPU.
 128 */
 129static int
 130arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
 131                       unsigned long mmr_offset, int limit)
 132{
 133        const struct cpumask *eligible_cpu = cpumask_of(cpu);
 134        struct irq_cfg *cfg = get_irq_chip_data(irq);
 135        unsigned long mmr_value;
 136        struct uv_IO_APIC_route_entry *entry;
 137        int mmr_pnode, err;
 138
 139        BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
 140                        sizeof(unsigned long));
 141
 142        err = assign_irq_vector(irq, cfg, eligible_cpu);
 143        if (err != 0)
 144                return err;
 145
 146        if (limit == UV_AFFINITY_CPU)
 147                irq_set_status_flags(irq, IRQ_NO_BALANCING);
 148        else
 149                irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
 150
 151        set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
 152                                      irq_name);
 153
 154        mmr_value = 0;
 155        entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
 156        entry->vector           = cfg->vector;
 157        entry->delivery_mode    = apic->irq_delivery_mode;
 158        entry->dest_mode        = apic->irq_dest_mode;
 159        entry->polarity         = 0;
 160        entry->trigger          = 0;
 161        entry->mask             = 0;
 162        entry->dest             = apic->cpu_mask_to_apicid(eligible_cpu);
 163
 164        mmr_pnode = uv_blade_to_pnode(mmr_blade);
 165        uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
 166
 167        if (cfg->move_in_progress)
 168                send_cleanup_vector(cfg);
 169
 170        return irq;
 171}
 172
 173/*
 174 * Disable the specified MMR located on the specified blade so that MSIs are
 175 * longer allowed to be sent.
 176 */
 177static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
 178{
 179        unsigned long mmr_value;
 180        struct uv_IO_APIC_route_entry *entry;
 181
 182        BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
 183                        sizeof(unsigned long));
 184
 185        mmr_value = 0;
 186        entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
 187        entry->mask = 1;
 188
 189        uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
 190}
 191
 192static int
 193uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
 194                    bool force)
 195{
 196        struct irq_cfg *cfg = data->chip_data;
 197        unsigned int dest;
 198        unsigned long mmr_value, mmr_offset;
 199        struct uv_IO_APIC_route_entry *entry;
 200        int mmr_pnode;
 201
 202        if (__ioapic_set_affinity(data, mask, &dest))
 203                return -1;
 204
 205        mmr_value = 0;
 206        entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
 207
 208        entry->vector           = cfg->vector;
 209        entry->delivery_mode    = apic->irq_delivery_mode;
 210        entry->dest_mode        = apic->irq_dest_mode;
 211        entry->polarity         = 0;
 212        entry->trigger          = 0;
 213        entry->mask             = 0;
 214        entry->dest             = dest;
 215
 216        /* Get previously stored MMR and pnode of hub sourcing interrupts */
 217        if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
 218                return -1;
 219
 220        uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
 221
 222        if (cfg->move_in_progress)
 223                send_cleanup_vector(cfg);
 224
 225        return 0;
 226}
 227
 228/*
 229 * Set up a mapping of an available irq and vector, and enable the specified
 230 * MMR that defines the MSI that is to be sent to the specified CPU when an
 231 * interrupt is raised.
 232 */
 233int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
 234                 unsigned long mmr_offset, int limit)
 235{
 236        int irq, ret;
 237
 238        irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
 239
 240        if (irq <= 0)
 241                return -EBUSY;
 242
 243        ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
 244                limit);
 245        if (ret == irq)
 246                uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
 247        else
 248                destroy_irq(irq);
 249
 250        return ret;
 251}
 252EXPORT_SYMBOL_GPL(uv_setup_irq);
 253
 254/*
 255 * Tear down a mapping of an irq and vector, and disable the specified MMR that
 256 * defined the MSI that was to be sent to the specified CPU when an interrupt
 257 * was raised.
 258 *
 259 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
 260 */
 261void uv_teardown_irq(unsigned int irq)
 262{
 263        struct uv_irq_2_mmr_pnode *e;
 264        struct rb_node *n;
 265        unsigned long irqflags;
 266
 267        spin_lock_irqsave(&uv_irq_lock, irqflags);
 268        n = uv_irq_root.rb_node;
 269        while (n) {
 270                e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
 271                if (e->irq == irq) {
 272                        arch_disable_uv_irq(e->pnode, e->offset);
 273                        rb_erase(n, &uv_irq_root);
 274                        kfree(e);
 275                        break;
 276                }
 277                if (irq < e->irq)
 278                        n = n->rb_left;
 279                else
 280                        n = n->rb_right;
 281        }
 282        spin_unlock_irqrestore(&uv_irq_lock, irqflags);
 283        destroy_irq(irq);
 284}
 285EXPORT_SYMBOL_GPL(uv_teardown_irq);
 286