1
2
3
4
5
6
7
8
9
10
11#include <linux/errno.h>
12#include <linux/percpu.h>
13#include <linux/spinlock.h>
14
15#include <asm/mips-cm.h>
16#include <asm/mips-cpc.h>
17
18void __iomem *mips_cpc_base;
19
20static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
21
22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
23
24
25
26
27
28
29
30
31static phys_addr_t mips_cpc_phys_base(void)
32{
33 unsigned long cpc_base;
34
35 if (!mips_cm_present())
36 return 0;
37
38 if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
39 return 0;
40
41
42 cpc_base = read_gcr_cpc_base();
43 if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
44 return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
45
46
47 cpc_base = mips_cpc_default_phys_base();
48 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
49 return cpc_base;
50}
51
52int mips_cpc_probe(void)
53{
54 phys_addr_t addr;
55 unsigned cpu;
56
57 for_each_possible_cpu(cpu)
58 spin_lock_init(&per_cpu(cpc_core_lock, cpu));
59
60 addr = mips_cpc_phys_base();
61 if (!addr)
62 return -ENODEV;
63
64 mips_cpc_base = ioremap_nocache(addr, 0x8000);
65 if (!mips_cpc_base)
66 return -ENXIO;
67
68 return 0;
69}
70
71void mips_cpc_lock_other(unsigned int core)
72{
73 unsigned curr_core;
74 preempt_disable();
75 curr_core = current_cpu_data.core;
76 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
77 per_cpu(cpc_core_lock_flags, curr_core));
78 write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
79
80
81
82
83
84 mb();
85}
86
87void mips_cpc_unlock_other(void)
88{
89 unsigned curr_core = current_cpu_data.core;
90 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
91 per_cpu(cpc_core_lock_flags, curr_core));
92 preempt_enable();
93}
94