1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
16#include <linux/acpi.h>
17#include <acpi/processor.h>
18#include <linux/uaccess.h>
19
20#define PREFIX "ACPI: "
21
22#define ACPI_PROCESSOR_CLASS "processor"
23#define _COMPONENT ACPI_PROCESSOR_COMPONENT
24ACPI_MODULE_NAME("processor_thermal");
25
26#ifdef CONFIG_CPU_FREQ
27
28
29
30
31
32
33
34#define CPUFREQ_THERMAL_MIN_STEP 0
35#define CPUFREQ_THERMAL_MAX_STEP 3
36
37static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
38static unsigned int acpi_thermal_cpufreq_is_init = 0;
39
40#define reduction_pctg(cpu) \
41 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
42
43
44
45
46
47
48
49
50static int phys_package_first_cpu(int cpu)
51{
52 int i;
53 int id = topology_physical_package_id(cpu);
54
55 for_each_online_cpu(i)
56 if (topology_physical_package_id(i) == id)
57 return i;
58 return 0;
59}
60
61static int cpu_has_cpufreq(unsigned int cpu)
62{
63 struct cpufreq_policy policy;
64 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
65 return 0;
66 return 1;
67}
68
69static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
70 unsigned long event, void *data)
71{
72 struct cpufreq_policy *policy = data;
73 unsigned long max_freq = 0;
74
75 if (event != CPUFREQ_ADJUST)
76 goto out;
77
78 max_freq = (
79 policy->cpuinfo.max_freq *
80 (100 - reduction_pctg(policy->cpu) * 20)
81 ) / 100;
82
83 cpufreq_verify_within_limits(policy, 0, max_freq);
84
85 out:
86 return 0;
87}
88
89static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
90 .notifier_call = acpi_thermal_cpufreq_notifier,
91};
92
93static int cpufreq_get_max_state(unsigned int cpu)
94{
95 if (!cpu_has_cpufreq(cpu))
96 return 0;
97
98 return CPUFREQ_THERMAL_MAX_STEP;
99}
100
101static int cpufreq_get_cur_state(unsigned int cpu)
102{
103 if (!cpu_has_cpufreq(cpu))
104 return 0;
105
106 return reduction_pctg(cpu);
107}
108
109static int cpufreq_set_cur_state(unsigned int cpu, int state)
110{
111 int i;
112
113 if (!cpu_has_cpufreq(cpu))
114 return 0;
115
116 reduction_pctg(cpu) = state;
117
118
119
120
121
122
123 for_each_online_cpu(i) {
124 if (topology_physical_package_id(i) ==
125 topology_physical_package_id(cpu))
126 cpufreq_update_policy(i);
127 }
128 return 0;
129}
130
131void acpi_thermal_cpufreq_init(void)
132{
133 int i;
134
135 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
136 CPUFREQ_POLICY_NOTIFIER);
137 if (!i)
138 acpi_thermal_cpufreq_is_init = 1;
139}
140
141void acpi_thermal_cpufreq_exit(void)
142{
143 if (acpi_thermal_cpufreq_is_init)
144 cpufreq_unregister_notifier
145 (&acpi_thermal_cpufreq_notifier_block,
146 CPUFREQ_POLICY_NOTIFIER);
147
148 acpi_thermal_cpufreq_is_init = 0;
149}
150
151#else
152static int cpufreq_get_max_state(unsigned int cpu)
153{
154 return 0;
155}
156
157static int cpufreq_get_cur_state(unsigned int cpu)
158{
159 return 0;
160}
161
162static int cpufreq_set_cur_state(unsigned int cpu, int state)
163{
164 return 0;
165}
166
167#endif
168
169
170static int acpi_processor_max_state(struct acpi_processor *pr)
171{
172 int max_state = 0;
173
174
175
176
177
178 max_state += cpufreq_get_max_state(pr->id);
179 if (pr->flags.throttling)
180 max_state += (pr->throttling.state_count -1);
181
182 return max_state;
183}
184static int
185processor_get_max_state(struct thermal_cooling_device *cdev,
186 unsigned long *state)
187{
188 struct acpi_device *device = cdev->devdata;
189 struct acpi_processor *pr;
190
191 if (!device)
192 return -EINVAL;
193
194 pr = acpi_driver_data(device);
195 if (!pr)
196 return -EINVAL;
197
198 *state = acpi_processor_max_state(pr);
199 return 0;
200}
201
202static int
203processor_get_cur_state(struct thermal_cooling_device *cdev,
204 unsigned long *cur_state)
205{
206 struct acpi_device *device = cdev->devdata;
207 struct acpi_processor *pr;
208
209 if (!device)
210 return -EINVAL;
211
212 pr = acpi_driver_data(device);
213 if (!pr)
214 return -EINVAL;
215
216 *cur_state = cpufreq_get_cur_state(pr->id);
217 if (pr->flags.throttling)
218 *cur_state += pr->throttling.state;
219 return 0;
220}
221
222static int
223processor_set_cur_state(struct thermal_cooling_device *cdev,
224 unsigned long state)
225{
226 struct acpi_device *device = cdev->devdata;
227 struct acpi_processor *pr;
228 int result = 0;
229 int max_pstate;
230
231 if (!device)
232 return -EINVAL;
233
234 pr = acpi_driver_data(device);
235 if (!pr)
236 return -EINVAL;
237
238 max_pstate = cpufreq_get_max_state(pr->id);
239
240 if (state > acpi_processor_max_state(pr))
241 return -EINVAL;
242
243 if (state <= max_pstate) {
244 if (pr->flags.throttling && pr->throttling.state)
245 result = acpi_processor_set_throttling(pr, 0, false);
246 cpufreq_set_cur_state(pr->id, state);
247 } else {
248 cpufreq_set_cur_state(pr->id, max_pstate);
249 result = acpi_processor_set_throttling(pr,
250 state - max_pstate, false);
251 }
252 return result;
253}
254
255const struct thermal_cooling_device_ops processor_cooling_ops = {
256 .get_max_state = processor_get_max_state,
257 .get_cur_state = processor_get_cur_state,
258 .set_cur_state = processor_set_cur_state,
259};
260