1
2
3
4
5
6#include <linux/clocksource.h>
7#include <linux/init.h>
8#include <linux/jiffies.h>
9#include <linux/ktime.h>
10#include <linux/kernel.h>
11#include <linux/moduleparam.h>
12#include <linux/sched.h>
13#include <linux/sched/clock.h>
14#include <linux/syscore_ops.h>
15#include <linux/hrtimer.h>
16#include <linux/sched_clock.h>
17#include <linux/seqlock.h>
18#include <linux/bitops.h>
19
20#include "timekeeping.h"
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38struct clock_read_data {
39 u64 epoch_ns;
40 u64 epoch_cyc;
41 u64 sched_clock_mask;
42 u64 (*read_sched_clock)(void);
43 u32 mult;
44 u32 shift;
45};
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62struct clock_data {
63 seqcount_t seq;
64 struct clock_read_data read_data[2];
65 ktime_t wrap_kt;
66 unsigned long rate;
67
68 u64 (*actual_read_sched_clock)(void);
69};
70
71static struct hrtimer sched_clock_timer;
72static int irqtime = -1;
73
74core_param(irqtime, irqtime, int, 0400);
75
76static u64 notrace jiffy_sched_clock_read(void)
77{
78
79
80
81
82 return (u64)(jiffies - INITIAL_JIFFIES);
83}
84
85static struct clock_data cd ____cacheline_aligned = {
86 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
87 .read_sched_clock = jiffy_sched_clock_read, },
88 .actual_read_sched_clock = jiffy_sched_clock_read,
89};
90
91static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
92{
93 return (cyc * mult) >> shift;
94}
95
96unsigned long long notrace sched_clock(void)
97{
98 u64 cyc, res;
99 unsigned int seq;
100 struct clock_read_data *rd;
101
102 do {
103 seq = raw_read_seqcount(&cd.seq);
104 rd = cd.read_data + (seq & 1);
105
106 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
107 rd->sched_clock_mask;
108 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
109 } while (read_seqcount_retry(&cd.seq, seq));
110
111 return res;
112}
113
114
115
116
117
118
119
120
121
122
123
124static void update_clock_read_data(struct clock_read_data *rd)
125{
126
127 cd.read_data[1] = *rd;
128
129
130 raw_write_seqcount_latch(&cd.seq);
131
132
133 cd.read_data[0] = *rd;
134
135
136 raw_write_seqcount_latch(&cd.seq);
137}
138
139
140
141
142static void update_sched_clock(void)
143{
144 u64 cyc;
145 u64 ns;
146 struct clock_read_data rd;
147
148 rd = cd.read_data[0];
149
150 cyc = cd.actual_read_sched_clock();
151 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
152
153 rd.epoch_ns = ns;
154 rd.epoch_cyc = cyc;
155
156 update_clock_read_data(&rd);
157}
158
159static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
160{
161 update_sched_clock();
162 hrtimer_forward_now(hrt, cd.wrap_kt);
163
164 return HRTIMER_RESTART;
165}
166
167void __init
168sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
169{
170 u64 res, wrap, new_mask, new_epoch, cyc, ns;
171 u32 new_mult, new_shift;
172 unsigned long r, flags;
173 char r_unit;
174 struct clock_read_data rd;
175
176 if (cd.rate > rate)
177 return;
178
179
180 local_irq_save(flags);
181
182
183 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
184
185 new_mask = CLOCKSOURCE_MASK(bits);
186 cd.rate = rate;
187
188
189 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
190 cd.wrap_kt = ns_to_ktime(wrap);
191
192 rd = cd.read_data[0];
193
194
195 new_epoch = read();
196 cyc = cd.actual_read_sched_clock();
197 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
198 cd.actual_read_sched_clock = read;
199
200 rd.read_sched_clock = read;
201 rd.sched_clock_mask = new_mask;
202 rd.mult = new_mult;
203 rd.shift = new_shift;
204 rd.epoch_cyc = new_epoch;
205 rd.epoch_ns = ns;
206
207 update_clock_read_data(&rd);
208
209 if (sched_clock_timer.function != NULL) {
210
211 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
212 }
213
214 r = rate;
215 if (r >= 4000000) {
216 r /= 1000000;
217 r_unit = 'M';
218 } else {
219 if (r >= 1000) {
220 r /= 1000;
221 r_unit = 'k';
222 } else {
223 r_unit = ' ';
224 }
225 }
226
227
228 res = cyc_to_ns(1ULL, new_mult, new_shift);
229
230 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
231 bits, r, r_unit, res, wrap);
232
233
234 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
235 enable_sched_clock_irqtime();
236
237 local_irq_restore(flags);
238
239 pr_debug("Registered %pS as sched_clock source\n", read);
240}
241
242void __init generic_sched_clock_init(void)
243{
244
245
246
247
248 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
249 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
250
251 update_sched_clock();
252
253
254
255
256
257 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
258 sched_clock_timer.function = sched_clock_poll;
259 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
260}
261
262
263
264
265
266
267
268
269
270
271
272
273static u64 notrace suspended_sched_clock_read(void)
274{
275 unsigned int seq = raw_read_seqcount(&cd.seq);
276
277 return cd.read_data[seq & 1].epoch_cyc;
278}
279
280int sched_clock_suspend(void)
281{
282 struct clock_read_data *rd = &cd.read_data[0];
283
284 update_sched_clock();
285 hrtimer_cancel(&sched_clock_timer);
286 rd->read_sched_clock = suspended_sched_clock_read;
287
288 return 0;
289}
290
291void sched_clock_resume(void)
292{
293 struct clock_read_data *rd = &cd.read_data[0];
294
295 rd->epoch_cyc = cd.actual_read_sched_clock();
296 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
297 rd->read_sched_clock = cd.actual_read_sched_clock;
298}
299
300static struct syscore_ops sched_clock_ops = {
301 .suspend = sched_clock_suspend,
302 .resume = sched_clock_resume,
303};
304
305static int __init sched_clock_syscore_init(void)
306{
307 register_syscore_ops(&sched_clock_ops);
308
309 return 0;
310}
311device_initcall(sched_clock_syscore_init);
312