linux/arch/mips/kernel/mips-cpc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2013 Imagination Technologies
   4 * Author: Paul Burton <paul.burton@mips.com>
   5 */
   6
   7#include <linux/errno.h>
   8#include <linux/percpu.h>
   9#include <linux/of.h>
  10#include <linux/of_address.h>
  11#include <linux/spinlock.h>
  12
  13#include <asm/mips-cps.h>
  14
  15void __iomem *mips_cpc_base;
  16
  17static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
  18
  19static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
  20
  21phys_addr_t __weak mips_cpc_default_phys_base(void)
  22{
  23        struct device_node *cpc_node;
  24        struct resource res;
  25        int err;
  26
  27        cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
  28        if (cpc_node) {
  29                err = of_address_to_resource(cpc_node, 0, &res);
  30                if (!err)
  31                        return res.start;
  32        }
  33
  34        return 0;
  35}
  36
  37/**
  38 * mips_cpc_phys_base - retrieve the physical base address of the CPC
  39 *
  40 * This function returns the physical base address of the Cluster Power
  41 * Controller memory mapped registers, or 0 if no Cluster Power Controller
  42 * is present.
  43 */
  44static phys_addr_t mips_cpc_phys_base(void)
  45{
  46        unsigned long cpc_base;
  47
  48        if (!mips_cm_present())
  49                return 0;
  50
  51        if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
  52                return 0;
  53
  54        /* If the CPC is already enabled, leave it so */
  55        cpc_base = read_gcr_cpc_base();
  56        if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
  57                return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
  58
  59        /* Otherwise, use the default address */
  60        cpc_base = mips_cpc_default_phys_base();
  61        if (!cpc_base)
  62                return cpc_base;
  63
  64        /* Enable the CPC, mapped at the default address */
  65        write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
  66        return cpc_base;
  67}
  68
  69int mips_cpc_probe(void)
  70{
  71        phys_addr_t addr;
  72        unsigned int cpu;
  73
  74        for_each_possible_cpu(cpu)
  75                spin_lock_init(&per_cpu(cpc_core_lock, cpu));
  76
  77        addr = mips_cpc_phys_base();
  78        if (!addr)
  79                return -ENODEV;
  80
  81        mips_cpc_base = ioremap(addr, 0x8000);
  82        if (!mips_cpc_base)
  83                return -ENXIO;
  84
  85        return 0;
  86}
  87
  88void mips_cpc_lock_other(unsigned int core)
  89{
  90        unsigned int curr_core;
  91
  92        if (mips_cm_revision() >= CM_REV_CM3)
  93                /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
  94                return;
  95
  96        preempt_disable();
  97        curr_core = cpu_core(&current_cpu_data);
  98        spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
  99                          per_cpu(cpc_core_lock_flags, curr_core));
 100        write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
 101
 102        /*
 103         * Ensure the core-other region reflects the appropriate core &
 104         * VP before any accesses to it occur.
 105         */
 106        mb();
 107}
 108
 109void mips_cpc_unlock_other(void)
 110{
 111        unsigned int curr_core;
 112
 113        if (mips_cm_revision() >= CM_REV_CM3)
 114                /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
 115                return;
 116
 117        curr_core = cpu_core(&current_cpu_data);
 118        spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
 119                               per_cpu(cpc_core_lock_flags, curr_core));
 120        preempt_enable();
 121}
 122