linux/arch/powerpc/platforms/cell/pervasive.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * CBE Pervasive Monitor and Debug
   4 *
   5 * (C) Copyright IBM Corporation 2005
   6 *
   7 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
   8 *          Michael N. Day (mnday@us.ibm.com)
   9 */
  10
  11#undef DEBUG
  12
  13#include <linux/interrupt.h>
  14#include <linux/irq.h>
  15#include <linux/percpu.h>
  16#include <linux/types.h>
  17#include <linux/kallsyms.h>
  18#include <linux/pgtable.h>
  19
  20#include <asm/io.h>
  21#include <asm/machdep.h>
  22#include <asm/prom.h>
  23#include <asm/reg.h>
  24#include <asm/cell-regs.h>
  25#include <asm/cpu_has_feature.h>
  26
  27#include "pervasive.h"
  28
  29static void cbe_power_save(void)
  30{
  31        unsigned long ctrl, thread_switch_control;
  32
  33        /* Ensure our interrupt state is properly tracked */
  34        if (!prep_irq_for_idle())
  35                return;
  36
  37        ctrl = mfspr(SPRN_CTRLF);
  38
  39        /* Enable DEC and EE interrupt request */
  40        thread_switch_control  = mfspr(SPRN_TSC_CELL);
  41        thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
  42
  43        switch (ctrl & CTRL_CT) {
  44        case CTRL_CT0:
  45                thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
  46                break;
  47        case CTRL_CT1:
  48                thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
  49                break;
  50        default:
  51                printk(KERN_WARNING "%s: unknown configuration\n",
  52                        __func__);
  53                break;
  54        }
  55        mtspr(SPRN_TSC_CELL, thread_switch_control);
  56
  57        /*
  58         * go into low thread priority, medium priority will be
  59         * restored for us after wake-up.
  60         */
  61        HMT_low();
  62
  63        /*
  64         * atomically disable thread execution and runlatch.
  65         * External and Decrementer exceptions are still handled when the
  66         * thread is disabled but now enter in cbe_system_reset_exception()
  67         */
  68        ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
  69        mtspr(SPRN_CTRLT, ctrl);
  70
  71        /* Re-enable interrupts in MSR */
  72        __hard_irq_enable();
  73}
  74
  75static int cbe_system_reset_exception(struct pt_regs *regs)
  76{
  77        switch (regs->msr & SRR1_WAKEMASK) {
  78        case SRR1_WAKEDEC:
  79                set_dec(1);
  80        case SRR1_WAKEEE:
  81                /*
  82                 * Handle these when interrupts get re-enabled and we take
  83                 * them as regular exceptions. We are in an NMI context
  84                 * and can't handle these here.
  85                 */
  86                break;
  87        case SRR1_WAKEMT:
  88                return cbe_sysreset_hack();
  89#ifdef CONFIG_CBE_RAS
  90        case SRR1_WAKESYSERR:
  91                cbe_system_error_exception(regs);
  92                break;
  93        case SRR1_WAKETHERM:
  94                cbe_thermal_exception(regs);
  95                break;
  96#endif /* CONFIG_CBE_RAS */
  97        default:
  98                /* do system reset */
  99                return 0;
 100        }
 101        /* everything handled */
 102        return 1;
 103}
 104
 105void __init cbe_pervasive_init(void)
 106{
 107        int cpu;
 108
 109        if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
 110                return;
 111
 112        for_each_possible_cpu(cpu) {
 113                struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
 114                if (!regs)
 115                        continue;
 116
 117                 /* Enable Pause(0) control bit */
 118                out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
 119                                            CBE_PMD_PAUSE_ZERO_CONTROL);
 120        }
 121
 122        ppc_md.power_save = cbe_power_save;
 123        ppc_md.system_reset_exception = cbe_system_reset_exception;
 124}
 125