linux/arch/alpha/kernel/sys_titan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *      linux/arch/alpha/kernel/sys_titan.c
   4 *
   5 *      Copyright (C) 1995 David A Rusling
   6 *      Copyright (C) 1996, 1999 Jay A Estabrook
   7 *      Copyright (C) 1998, 1999 Richard Henderson
   8 *      Copyright (C) 1999, 2000 Jeff Wiedemeier
   9 *
  10 * Code supporting TITAN systems (EV6+TITAN), currently:
  11 *      Privateer
  12 *      Falcon
  13 *      Granite
  14 */
  15
  16#include <linux/kernel.h>
  17#include <linux/types.h>
  18#include <linux/mm.h>
  19#include <linux/sched.h>
  20#include <linux/pci.h>
  21#include <linux/init.h>
  22#include <linux/bitops.h>
  23
  24#include <asm/ptrace.h>
  25#include <asm/dma.h>
  26#include <asm/irq.h>
  27#include <asm/mmu_context.h>
  28#include <asm/io.h>
  29#include <asm/core_titan.h>
  30#include <asm/hwrpb.h>
  31#include <asm/tlbflush.h>
  32
  33#include "proto.h"
  34#include "irq_impl.h"
  35#include "pci_impl.h"
  36#include "machvec_impl.h"
  37#include "err_impl.h"
  38
  39
  40/*
  41 * Titan generic
  42 */
  43
  44/*
  45 * Titan supports up to 4 CPUs
  46 */
  47static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL };
  48
  49/*
  50 * Mask is set (1) if enabled
  51 */
  52static unsigned long titan_cached_irq_mask;
  53
  54/*
  55 * Need SMP-safe access to interrupt CSRs
  56 */
  57DEFINE_SPINLOCK(titan_irq_lock);
  58
  59static void
  60titan_update_irq_hw(unsigned long mask)
  61{
  62        register titan_cchip *cchip = TITAN_cchip;
  63        unsigned long isa_enable = 1UL << 55;
  64        register int bcpu = boot_cpuid;
  65
  66#ifdef CONFIG_SMP
  67        cpumask_t cpm;
  68        volatile unsigned long *dim0, *dim1, *dim2, *dim3;
  69        unsigned long mask0, mask1, mask2, mask3, dummy;
  70
  71        cpumask_copy(&cpm, cpu_present_mask);
  72        mask &= ~isa_enable;
  73        mask0 = mask & titan_cpu_irq_affinity[0];
  74        mask1 = mask & titan_cpu_irq_affinity[1];
  75        mask2 = mask & titan_cpu_irq_affinity[2];
  76        mask3 = mask & titan_cpu_irq_affinity[3];
  77
  78        if (bcpu == 0) mask0 |= isa_enable;
  79        else if (bcpu == 1) mask1 |= isa_enable;
  80        else if (bcpu == 2) mask2 |= isa_enable;
  81        else mask3 |= isa_enable;
  82
  83        dim0 = &cchip->dim0.csr;
  84        dim1 = &cchip->dim1.csr;
  85        dim2 = &cchip->dim2.csr;
  86        dim3 = &cchip->dim3.csr;
  87        if (!cpumask_test_cpu(0, &cpm)) dim0 = &dummy;
  88        if (!cpumask_test_cpu(1, &cpm)) dim1 = &dummy;
  89        if (!cpumask_test_cpu(2, &cpm)) dim2 = &dummy;
  90        if (!cpumask_test_cpu(3, &cpm)) dim3 = &dummy;
  91
  92        *dim0 = mask0;
  93        *dim1 = mask1;
  94        *dim2 = mask2;
  95        *dim3 = mask3;
  96        mb();
  97        *dim0;
  98        *dim1;
  99        *dim2;
 100        *dim3;
 101#else
 102        volatile unsigned long *dimB;
 103        dimB = &cchip->dim0.csr;
 104        if (bcpu == 1) dimB = &cchip->dim1.csr;
 105        else if (bcpu == 2) dimB = &cchip->dim2.csr;
 106        else if (bcpu == 3) dimB = &cchip->dim3.csr;
 107
 108        *dimB = mask | isa_enable;
 109        mb();
 110        *dimB;
 111#endif
 112}
 113
 114static inline void
 115titan_enable_irq(struct irq_data *d)
 116{
 117        unsigned int irq = d->irq;
 118        spin_lock(&titan_irq_lock);
 119        titan_cached_irq_mask |= 1UL << (irq - 16);
 120        titan_update_irq_hw(titan_cached_irq_mask);
 121        spin_unlock(&titan_irq_lock);
 122}
 123
 124static inline void
 125titan_disable_irq(struct irq_data *d)
 126{
 127        unsigned int irq = d->irq;
 128        spin_lock(&titan_irq_lock);
 129        titan_cached_irq_mask &= ~(1UL << (irq - 16));
 130        titan_update_irq_hw(titan_cached_irq_mask);
 131        spin_unlock(&titan_irq_lock);
 132}
 133
 134static void
 135titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 136{
 137        int cpu;
 138
 139        for (cpu = 0; cpu < 4; cpu++) {
 140                if (cpumask_test_cpu(cpu, &affinity))
 141                        titan_cpu_irq_affinity[cpu] |= 1UL << irq;
 142                else
 143                        titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
 144        }
 145
 146}
 147
 148static int
 149titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
 150                       bool force)
 151{ 
 152        unsigned int irq = d->irq;
 153        spin_lock(&titan_irq_lock);
 154        titan_cpu_set_irq_affinity(irq - 16, *affinity);
 155        titan_update_irq_hw(titan_cached_irq_mask);
 156        spin_unlock(&titan_irq_lock);
 157
 158        return 0;
 159}
 160
 161static void
 162titan_device_interrupt(unsigned long vector)
 163{
 164        printk("titan_device_interrupt: NOT IMPLEMENTED YET!!\n");
 165}
 166
 167static void 
 168titan_srm_device_interrupt(unsigned long vector)
 169{
 170        int irq;
 171
 172        irq = (vector - 0x800) >> 4;
 173        handle_irq(irq);
 174}
 175
 176
 177static void __init
 178init_titan_irqs(struct irq_chip * ops, int imin, int imax)
 179{
 180        long i;
 181        for (i = imin; i <= imax; ++i) {
 182                irq_set_chip_and_handler(i, ops, handle_level_irq);
 183                irq_set_status_flags(i, IRQ_LEVEL);
 184        }
 185}
 186
 187static struct irq_chip titan_irq_type = {
 188       .name                    = "TITAN",
 189       .irq_unmask              = titan_enable_irq,
 190       .irq_mask                = titan_disable_irq,
 191       .irq_mask_ack            = titan_disable_irq,
 192       .irq_set_affinity        = titan_set_irq_affinity,
 193};
 194
 195static irqreturn_t
 196titan_intr_nop(int irq, void *dev_id)
 197{
 198      /*
 199       * This is a NOP interrupt handler for the purposes of
 200       * event counting -- just return.
 201       */                                                                     
 202       return IRQ_HANDLED;
 203}
 204
 205static void __init
 206titan_init_irq(void)
 207{
 208        if (alpha_using_srm && !alpha_mv.device_interrupt)
 209                alpha_mv.device_interrupt = titan_srm_device_interrupt;
 210        if (!alpha_mv.device_interrupt)
 211                alpha_mv.device_interrupt = titan_device_interrupt;
 212
 213        titan_update_irq_hw(0);
 214
 215        init_titan_irqs(&titan_irq_type, 16, 63 + 16);
 216}
 217  
 218static void __init
 219titan_legacy_init_irq(void)
 220{
 221        /* init the legacy dma controller */
 222        outb(0, DMA1_RESET_REG);
 223        outb(0, DMA2_RESET_REG);
 224        outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
 225        outb(0, DMA2_MASK_REG);
 226
 227        /* init the legacy irq controller */
 228        init_i8259a_irqs();
 229
 230        /* init the titan irqs */
 231        titan_init_irq();
 232}
 233
 234void
 235titan_dispatch_irqs(u64 mask)
 236{
 237        unsigned long vector;
 238
 239        /*
 240         * Mask down to those interrupts which are enable on this processor
 241         */
 242        mask &= titan_cpu_irq_affinity[smp_processor_id()];
 243
 244        /*
 245         * Dispatch all requested interrupts 
 246         */
 247        while (mask) {
 248                /* convert to SRM vector... priority is <63> -> <0> */
 249                vector = 63 - __kernel_ctlz(mask);
 250                mask &= ~(1UL << vector);       /* clear it out          */
 251                vector = 0x900 + (vector << 4); /* convert to SRM vector */
 252                
 253                /* dispatch it */
 254                alpha_mv.device_interrupt(vector);
 255        }
 256}
 257  
 258
 259/*
 260 * Titan Family
 261 */
 262static void __init
 263titan_request_irq(unsigned int irq, irq_handler_t handler,
 264                  unsigned long irqflags, const char *devname,
 265                  void *dev_id)
 266{
 267        int err;
 268        err = request_irq(irq, handler, irqflags, devname, dev_id);
 269        if (err) {
 270                printk("titan_request_irq for IRQ %d returned %d; ignoring\n",
 271                       irq, err);
 272        }
 273}
 274
 275static void __init
 276titan_late_init(void)
 277{
 278        /*
 279         * Enable the system error interrupts. These interrupts are 
 280         * all reported to the kernel as machine checks, so the handler
 281         * is a nop so it can be called to count the individual events.
 282         */
 283        titan_request_irq(63+16, titan_intr_nop, 0,
 284                    "CChip Error", NULL);
 285        titan_request_irq(62+16, titan_intr_nop, 0,
 286                    "PChip 0 H_Error", NULL);
 287        titan_request_irq(61+16, titan_intr_nop, 0,
 288                    "PChip 1 H_Error", NULL);
 289        titan_request_irq(60+16, titan_intr_nop, 0,
 290                    "PChip 0 C_Error", NULL);
 291        titan_request_irq(59+16, titan_intr_nop, 0,
 292                    "PChip 1 C_Error", NULL);
 293
 294        /* 
 295         * Register our error handlers.
 296         */
 297        titan_register_error_handlers();
 298
 299        /*
 300         * Check if the console left us any error logs.
 301         */
 302        cdl_check_console_data_log();
 303
 304}
 305
 306static int
 307titan_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 308{
 309        u8 intline;
 310        int irq;
 311
 312        /* Get the current intline.  */
 313        pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
 314        irq = intline;
 315
 316        /* Is it explicitly routed through ISA?  */
 317        if ((irq & 0xF0) == 0xE0)
 318                return irq;
 319 
 320        /* Offset by 16 to make room for ISA interrupts 0 - 15.  */
 321        return irq + 16;
 322}
 323
 324static void __init
 325titan_init_pci(void)
 326{
 327        /*
 328         * This isn't really the right place, but there's some init
 329         * that needs to be done after everything is basically up.
 330         */
 331        titan_late_init();
 332 
 333        /* Indicate that we trust the console to configure things properly */
 334        pci_set_flags(PCI_PROBE_ONLY);
 335        common_init_pci();
 336        SMC669_Init(0);
 337        locate_and_init_vga(NULL);
 338}
 339
 340
 341/*
 342 * Privateer
 343 */
 344static void __init
 345privateer_init_pci(void)
 346{
 347        /*
 348         * Hook a couple of extra err interrupts that the
 349         * common titan code won't.
 350         */
 351        titan_request_irq(53+16, titan_intr_nop, 0,
 352                    "NMI", NULL);
 353        titan_request_irq(50+16, titan_intr_nop, 0,
 354                    "Temperature Warning", NULL);
 355
 356        /*
 357         * Finish with the common version.
 358         */
 359        return titan_init_pci();
 360}
 361
 362
 363/*
 364 * The System Vectors.
 365 */
 366struct alpha_machine_vector titan_mv __initmv = {
 367        .vector_name            = "TITAN",
 368        DO_EV6_MMU,
 369        DO_DEFAULT_RTC,
 370        DO_TITAN_IO,
 371        .machine_check          = titan_machine_check,
 372        .max_isa_dma_address    = ALPHA_MAX_ISA_DMA_ADDRESS,
 373        .min_io_address         = DEFAULT_IO_BASE,
 374        .min_mem_address        = DEFAULT_MEM_BASE,
 375        .pci_dac_offset         = TITAN_DAC_OFFSET,
 376
 377        .nr_irqs                = 80,   /* 64 + 16 */
 378        /* device_interrupt will be filled in by titan_init_irq */
 379
 380        .agp_info               = titan_agp_info,
 381
 382        .init_arch              = titan_init_arch,
 383        .init_irq               = titan_legacy_init_irq,
 384        .init_rtc               = common_init_rtc,
 385        .init_pci               = titan_init_pci,
 386
 387        .kill_arch              = titan_kill_arch,
 388        .pci_map_irq            = titan_map_irq,
 389        .pci_swizzle            = common_swizzle,
 390};
 391ALIAS_MV(titan)
 392
 393struct alpha_machine_vector privateer_mv __initmv = {
 394        .vector_name            = "PRIVATEER",
 395        DO_EV6_MMU,
 396        DO_DEFAULT_RTC,
 397        DO_TITAN_IO,
 398        .machine_check          = privateer_machine_check,
 399        .max_isa_dma_address    = ALPHA_MAX_ISA_DMA_ADDRESS,
 400        .min_io_address         = DEFAULT_IO_BASE,
 401        .min_mem_address        = DEFAULT_MEM_BASE,
 402        .pci_dac_offset         = TITAN_DAC_OFFSET,
 403
 404        .nr_irqs                = 80,   /* 64 + 16 */
 405        /* device_interrupt will be filled in by titan_init_irq */
 406
 407        .agp_info               = titan_agp_info,
 408
 409        .init_arch              = titan_init_arch,
 410        .init_irq               = titan_legacy_init_irq,
 411        .init_rtc               = common_init_rtc,
 412        .init_pci               = privateer_init_pci,
 413
 414        .kill_arch              = titan_kill_arch,
 415        .pci_map_irq            = titan_map_irq,
 416        .pci_swizzle            = common_swizzle,
 417};
 418/* No alpha_mv alias for privateer since we compile it 
 419   in unconditionally with titan; setup_arch knows how to cope. */
 420