linux/arch/mips/kernel/mips-cpc.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Imagination Technologies
   3 * Author: Paul Burton <paul.burton@imgtec.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License as published by the
   7 * Free Software Foundation;  either version 2 of the  License, or (at your
   8 * option) any later version.
   9 */
  10
  11#include <linux/errno.h>
  12#include <linux/percpu.h>
  13#include <linux/spinlock.h>
  14
  15#include <asm/mips-cm.h>
  16#include <asm/mips-cpc.h>
  17
  18void __iomem *mips_cpc_base;
  19
  20static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
  21
  22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
  23
  24phys_addr_t __weak mips_cpc_default_phys_base(void)
  25{
  26        return 0;
  27}
  28
  29/**
  30 * mips_cpc_phys_base - retrieve the physical base address of the CPC
  31 *
  32 * This function returns the physical base address of the Cluster Power
  33 * Controller memory mapped registers, or 0 if no Cluster Power Controller
  34 * is present.
  35 */
  36static phys_addr_t mips_cpc_phys_base(void)
  37{
  38        unsigned long cpc_base;
  39
  40        if (!mips_cm_present())
  41                return 0;
  42
  43        if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
  44                return 0;
  45
  46        /* If the CPC is already enabled, leave it so */
  47        cpc_base = read_gcr_cpc_base();
  48        if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
  49                return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
  50
  51        /* Otherwise, use the default address */
  52        cpc_base = mips_cpc_default_phys_base();
  53        if (!cpc_base)
  54                return cpc_base;
  55
  56        /* Enable the CPC, mapped at the default address */
  57        write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
  58        return cpc_base;
  59}
  60
  61int mips_cpc_probe(void)
  62{
  63        phys_addr_t addr;
  64        unsigned int cpu;
  65
  66        for_each_possible_cpu(cpu)
  67                spin_lock_init(&per_cpu(cpc_core_lock, cpu));
  68
  69        addr = mips_cpc_phys_base();
  70        if (!addr)
  71                return -ENODEV;
  72
  73        mips_cpc_base = ioremap_nocache(addr, 0x8000);
  74        if (!mips_cpc_base)
  75                return -ENXIO;
  76
  77        return 0;
  78}
  79
  80void mips_cpc_lock_other(unsigned int core)
  81{
  82        unsigned int curr_core;
  83
  84        if (mips_cm_revision() >= CM_REV_CM3)
  85                /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
  86                return;
  87
  88        preempt_disable();
  89        curr_core = current_cpu_data.core;
  90        spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
  91                          per_cpu(cpc_core_lock_flags, curr_core));
  92        write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
  93
  94        /*
  95         * Ensure the core-other region reflects the appropriate core &
  96         * VP before any accesses to it occur.
  97         */
  98        mb();
  99}
 100
 101void mips_cpc_unlock_other(void)
 102{
 103        unsigned int curr_core;
 104
 105        if (mips_cm_revision() >= CM_REV_CM3)
 106                /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
 107                return;
 108
 109        curr_core = current_cpu_data.core;
 110        spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
 111                               per_cpu(cpc_core_lock_flags, curr_core));
 112        preempt_enable();
 113}
 114