1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#include "sched.h"
56
57
58
59
60
61
62unsigned long long __weak sched_clock(void)
63{
64 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
65 * (NSEC_PER_SEC / HZ);
66}
67EXPORT_SYMBOL_GPL(sched_clock);
68
69__read_mostly int sched_clock_running;
70
71void sched_clock_init(void)
72{
73 sched_clock_running = 1;
74}
75
76#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
77
78
79
80
81
82
83
84static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
85static int __sched_clock_stable_early = 1;
86
87
88
89
90__read_mostly u64 __sched_clock_offset;
91static __read_mostly u64 __gtod_offset;
92
93struct sched_clock_data {
94 u64 tick_raw;
95 u64 tick_gtod;
96 u64 clock;
97};
98
99static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
100
101static inline struct sched_clock_data *this_scd(void)
102{
103 return this_cpu_ptr(&sched_clock_data);
104}
105
106static inline struct sched_clock_data *cpu_sdc(int cpu)
107{
108 return &per_cpu(sched_clock_data, cpu);
109}
110
111int sched_clock_stable(void)
112{
113 return static_branch_likely(&__sched_clock_stable);
114}
115
116static void __scd_stamp(struct sched_clock_data *scd)
117{
118 scd->tick_gtod = ktime_get_ns();
119 scd->tick_raw = sched_clock();
120}
121
122static void __set_sched_clock_stable(void)
123{
124 struct sched_clock_data *scd;
125
126
127
128
129
130 local_irq_disable();
131 scd = this_scd();
132
133
134
135 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
136 local_irq_enable();
137
138 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
139 scd->tick_gtod, __gtod_offset,
140 scd->tick_raw, __sched_clock_offset);
141
142 static_branch_enable(&__sched_clock_stable);
143 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
144}
145
146
147
148
149
150
151
152
153
154
155
156
157static void __sched_clock_work(struct work_struct *work)
158{
159 struct sched_clock_data *scd;
160 int cpu;
161
162
163 preempt_disable();
164 scd = this_scd();
165 __scd_stamp(scd);
166 scd->clock = scd->tick_gtod + __gtod_offset;
167 preempt_enable();
168
169
170 for_each_possible_cpu(cpu)
171 per_cpu(sched_clock_data, cpu) = *scd;
172
173 printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n");
174 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
175 scd->tick_gtod, __gtod_offset,
176 scd->tick_raw, __sched_clock_offset);
177
178 static_branch_disable(&__sched_clock_stable);
179}
180
181static DECLARE_WORK(sched_clock_work, __sched_clock_work);
182
183static void __clear_sched_clock_stable(void)
184{
185 if (!sched_clock_stable())
186 return;
187
188 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
189 schedule_work(&sched_clock_work);
190}
191
192void clear_sched_clock_stable(void)
193{
194 __sched_clock_stable_early = 0;
195
196 smp_mb();
197
198 if (sched_clock_running == 2)
199 __clear_sched_clock_stable();
200}
201
202
203
204
205
206static int __init sched_clock_init_late(void)
207{
208 sched_clock_running = 2;
209
210
211
212
213
214
215
216 smp_mb();
217
218 if (__sched_clock_stable_early)
219 __set_sched_clock_stable();
220
221 return 0;
222}
223late_initcall(sched_clock_init_late);
224
225
226
227
228
229static inline u64 wrap_min(u64 x, u64 y)
230{
231 return (s64)(x - y) < 0 ? x : y;
232}
233
234static inline u64 wrap_max(u64 x, u64 y)
235{
236 return (s64)(x - y) > 0 ? x : y;
237}
238
239
240
241
242
243
244
245static u64 sched_clock_local(struct sched_clock_data *scd)
246{
247 u64 now, clock, old_clock, min_clock, max_clock, gtod;
248 s64 delta;
249
250again:
251 now = sched_clock();
252 delta = now - scd->tick_raw;
253 if (unlikely(delta < 0))
254 delta = 0;
255
256 old_clock = scd->clock;
257
258
259
260
261
262
263
264 gtod = scd->tick_gtod + __gtod_offset;
265 clock = gtod + delta;
266 min_clock = wrap_max(gtod, old_clock);
267 max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
268
269 clock = wrap_max(clock, min_clock);
270 clock = wrap_min(clock, max_clock);
271
272 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
273 goto again;
274
275 return clock;
276}
277
278static u64 sched_clock_remote(struct sched_clock_data *scd)
279{
280 struct sched_clock_data *my_scd = this_scd();
281 u64 this_clock, remote_clock;
282 u64 *ptr, old_val, val;
283
284#if BITS_PER_LONG != 64
285again:
286
287
288
289
290
291
292
293
294
295
296
297 this_clock = sched_clock_local(my_scd);
298
299
300
301
302
303 remote_clock = cmpxchg64(&scd->clock, 0, 0);
304#else
305
306
307
308
309 sched_clock_local(my_scd);
310again:
311 this_clock = my_scd->clock;
312 remote_clock = scd->clock;
313#endif
314
315
316
317
318
319
320
321 if (likely((s64)(remote_clock - this_clock) < 0)) {
322 ptr = &scd->clock;
323 old_val = remote_clock;
324 val = this_clock;
325 } else {
326
327
328
329 ptr = &my_scd->clock;
330 old_val = this_clock;
331 val = remote_clock;
332 }
333
334 if (cmpxchg64(ptr, old_val, val) != old_val)
335 goto again;
336
337 return val;
338}
339
340
341
342
343
344
345u64 sched_clock_cpu(int cpu)
346{
347 struct sched_clock_data *scd;
348 u64 clock;
349
350 if (sched_clock_stable())
351 return sched_clock() + __sched_clock_offset;
352
353 if (unlikely(!sched_clock_running))
354 return 0ull;
355
356 preempt_disable_notrace();
357 scd = cpu_sdc(cpu);
358
359 if (cpu != smp_processor_id())
360 clock = sched_clock_remote(scd);
361 else
362 clock = sched_clock_local(scd);
363 preempt_enable_notrace();
364
365 return clock;
366}
367EXPORT_SYMBOL_GPL(sched_clock_cpu);
368
369void sched_clock_tick(void)
370{
371 struct sched_clock_data *scd;
372
373 if (sched_clock_stable())
374 return;
375
376 if (unlikely(!sched_clock_running))
377 return;
378
379 lockdep_assert_irqs_disabled();
380
381 scd = this_scd();
382 __scd_stamp(scd);
383 sched_clock_local(scd);
384}
385
386void sched_clock_tick_stable(void)
387{
388 u64 gtod, clock;
389
390 if (!sched_clock_stable())
391 return;
392
393
394
395
396
397
398
399
400 local_irq_disable();
401 gtod = ktime_get_ns();
402 clock = sched_clock();
403 __gtod_offset = (clock + __sched_clock_offset) - gtod;
404 local_irq_enable();
405}
406
407
408
409
410void sched_clock_idle_sleep_event(void)
411{
412 sched_clock_cpu(smp_processor_id());
413}
414EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
415
416
417
418
419void sched_clock_idle_wakeup_event(void)
420{
421 unsigned long flags;
422
423 if (sched_clock_stable())
424 return;
425
426 if (unlikely(timekeeping_suspended))
427 return;
428
429 local_irq_save(flags);
430 sched_clock_tick();
431 local_irq_restore(flags);
432}
433EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
434
435#else
436
437u64 sched_clock_cpu(int cpu)
438{
439 if (unlikely(!sched_clock_running))
440 return 0;
441
442 return sched_clock();
443}
444
445#endif
446
447
448
449
450
451
452
453
454
455u64 __weak running_clock(void)
456{
457 return local_clock();
458}
459