1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kernel.h>
14#include <linux/cpuidle.h>
15#include <linux/pm_qos.h>
16#include <linux/time.h>
17#include <linux/ktime.h>
18#include <linux/hrtimer.h>
19#include <linux/tick.h>
20#include <linux/sched.h>
21#include <linux/math64.h>
22#include <linux/module.h>
23
24#define BUCKETS 12
25#define INTERVALS 8
26#define RESOLUTION 1024
27#define DECAY 8
28#define MAX_INTERESTING 50000
29#define STDDEV_THRESH 400
30
31
32#define MAX_DEVIATION 60
33
34static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
35static DEFINE_PER_CPU(int, hrtimer_status);
36
37enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123static unsigned int perfect_cstate_ms __read_mostly = 30;
124module_param(perfect_cstate_ms, uint, 0000);
125
126struct menu_device {
127 int last_state_idx;
128 int needs_update;
129
130 unsigned int expected_us;
131 u64 predicted_us;
132 unsigned int exit_us;
133 unsigned int bucket;
134 u64 correction_factor[BUCKETS];
135 u32 intervals[INTERVALS];
136 int interval_ptr;
137};
138
139
140#define LOAD_INT(x) ((x) >> FSHIFT)
141#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
142
143static int get_loadavg(void)
144{
145 unsigned long this = this_cpu_load();
146
147
148 return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
149}
150
151static inline int which_bucket(unsigned int duration)
152{
153 int bucket = 0;
154
155
156
157
158
159
160
161 if (nr_iowait_cpu(smp_processor_id()))
162 bucket = BUCKETS/2;
163
164 if (duration < 10)
165 return bucket;
166 if (duration < 100)
167 return bucket + 1;
168 if (duration < 1000)
169 return bucket + 2;
170 if (duration < 10000)
171 return bucket + 3;
172 if (duration < 100000)
173 return bucket + 4;
174 return bucket + 5;
175}
176
177
178
179
180
181
182
183
184static inline int performance_multiplier(void)
185{
186 int mult = 1;
187
188
189
190 mult += 2 * get_loadavg();
191
192
193 mult += 10 * nr_iowait_cpu(smp_processor_id());
194
195 return mult;
196}
197
198static DEFINE_PER_CPU(struct menu_device, menu_devices);
199
200static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
201
202
203static u64 div_round64(u64 dividend, u32 divisor)
204{
205 return div_u64(dividend + (divisor / 2), divisor);
206}
207
208
209void menu_hrtimer_cancel(void)
210{
211 int cpu = smp_processor_id();
212 struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
213
214
215 if (per_cpu(hrtimer_status, cpu)) {
216 hrtimer_cancel(hrtmr);
217 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
218 }
219}
220EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
221
222
223static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
224{
225 int cpu = smp_processor_id();
226 struct menu_device *data = &per_cpu(menu_devices, cpu);
227
228
229
230
231
232
233
234 if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
235 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
236
237 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
238
239 return HRTIMER_NORESTART;
240}
241
242
243
244
245
246
247
248static u32 get_typical_interval(struct menu_device *data)
249{
250 int i = 0, divisor = 0;
251 uint64_t max = 0, avg = 0, stddev = 0;
252 int64_t thresh = LLONG_MAX;
253 unsigned int ret = 0;
254
255again:
256
257
258 max = avg = divisor = stddev = 0;
259 for (i = 0; i < INTERVALS; i++) {
260 int64_t value = data->intervals[i];
261 if (value <= thresh) {
262 avg += value;
263 divisor++;
264 if (value > max)
265 max = value;
266 }
267 }
268 do_div(avg, divisor);
269
270 for (i = 0; i < INTERVALS; i++) {
271 int64_t value = data->intervals[i];
272 if (value <= thresh) {
273 int64_t diff = value - avg;
274 stddev += diff * diff;
275 }
276 }
277 do_div(stddev, divisor);
278 stddev = int_sqrt(stddev);
279
280
281
282
283
284
285
286
287
288
289
290
291 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
292 || stddev <= 20) {
293 data->predicted_us = avg;
294 ret = 1;
295 return ret;
296
297 } else if ((divisor * 4) > INTERVALS * 3) {
298
299 thresh = max - 1;
300 goto again;
301 }
302
303 return ret;
304}
305
306
307
308
309
310
311static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
312{
313 struct menu_device *data = &__get_cpu_var(menu_devices);
314 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
315 int i;
316 int multiplier;
317 struct timespec t;
318 int repeat = 0, low_predicted = 0;
319 int cpu = smp_processor_id();
320 struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
321
322 if (data->needs_update) {
323 menu_update(drv, dev);
324 data->needs_update = 0;
325 }
326
327 data->last_state_idx = 0;
328 data->exit_us = 0;
329
330
331 if (unlikely(latency_req == 0))
332 return 0;
333
334
335 t = ktime_to_timespec(tick_nohz_get_sleep_length());
336 data->expected_us =
337 t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
338
339
340 data->bucket = which_bucket(data->expected_us);
341
342 multiplier = performance_multiplier();
343
344
345
346
347
348 if (data->correction_factor[data->bucket] == 0)
349 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
350
351
352 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
353 RESOLUTION * DECAY);
354
355 repeat = get_typical_interval(data);
356
357
358
359
360
361 if (data->expected_us > 5 &&
362 !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
363 dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
364 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
365
366
367
368
369
370 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
371 struct cpuidle_state *s = &drv->states[i];
372 struct cpuidle_state_usage *su = &dev->states_usage[i];
373
374 if (s->disabled || su->disable)
375 continue;
376 if (s->target_residency > data->predicted_us) {
377 low_predicted = 1;
378 continue;
379 }
380 if (s->exit_latency > latency_req)
381 continue;
382 if (s->exit_latency * multiplier > data->predicted_us)
383 continue;
384
385 data->last_state_idx = i;
386 data->exit_us = s->exit_latency;
387 }
388
389
390 if (low_predicted) {
391 unsigned int timer_us = 0;
392 unsigned int perfect_us = 0;
393
394
395
396
397
398
399
400
401 timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
402
403 perfect_us = perfect_cstate_ms * 1000;
404
405 if (repeat && (4 * timer_us < data->expected_us)) {
406 RCU_NONIDLE(hrtimer_start(hrtmr,
407 ns_to_ktime(1000 * timer_us),
408 HRTIMER_MODE_REL_PINNED));
409
410 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
411 } else if (perfect_us < data->expected_us) {
412
413
414
415
416
417
418 RCU_NONIDLE(hrtimer_start(hrtmr,
419 ns_to_ktime(1000 * timer_us),
420 HRTIMER_MODE_REL_PINNED));
421
422 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
423 }
424
425 }
426
427 return data->last_state_idx;
428}
429
430
431
432
433
434
435
436
437
438static void menu_reflect(struct cpuidle_device *dev, int index)
439{
440 struct menu_device *data = &__get_cpu_var(menu_devices);
441 data->last_state_idx = index;
442 if (index >= 0)
443 data->needs_update = 1;
444}
445
446
447
448
449
450
451static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
452{
453 struct menu_device *data = &__get_cpu_var(menu_devices);
454 int last_idx = data->last_state_idx;
455 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
456 struct cpuidle_state *target = &drv->states[last_idx];
457 unsigned int measured_us;
458 u64 new_factor;
459
460
461
462
463
464
465 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
466 last_idle_us = data->expected_us;
467
468
469 measured_us = last_idle_us;
470
471
472
473
474
475 if (measured_us > data->exit_us)
476 measured_us -= data->exit_us;
477
478
479
480
481 new_factor = data->correction_factor[data->bucket]
482 * (DECAY - 1) / DECAY;
483
484 if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
485 new_factor += RESOLUTION * measured_us / data->expected_us;
486 else
487
488
489
490
491 new_factor += RESOLUTION;
492
493
494
495
496
497 if (new_factor == 0)
498 new_factor = 1;
499
500 data->correction_factor[data->bucket] = new_factor;
501
502
503 data->intervals[data->interval_ptr++] = last_idle_us;
504 if (data->interval_ptr >= INTERVALS)
505 data->interval_ptr = 0;
506}
507
508
509
510
511
512
513static int menu_enable_device(struct cpuidle_driver *drv,
514 struct cpuidle_device *dev)
515{
516 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
517 struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
518 hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
519 t->function = menu_hrtimer_notify;
520
521 memset(data, 0, sizeof(struct menu_device));
522
523 return 0;
524}
525
526static struct cpuidle_governor menu_governor = {
527 .name = "menu",
528 .rating = 20,
529 .enable = menu_enable_device,
530 .select = menu_select,
531 .reflect = menu_reflect,
532 .owner = THIS_MODULE,
533};
534
535
536
537
538static int __init init_menu(void)
539{
540 return cpuidle_register_governor(&menu_governor);
541}
542
543
544
545
546static void __exit exit_menu(void)
547{
548 cpuidle_unregister_governor(&menu_governor);
549}
550
551MODULE_LICENSE("GPL");
552module_init(init_menu);
553module_exit(exit_menu);
554