1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73#include <linux/proportions.h>
74#include <linux/rcupdate.h>
75
76int prop_descriptor_init(struct prop_descriptor *pd, int shift)
77{
78 int err;
79
80 if (shift > PROP_MAX_SHIFT)
81 shift = PROP_MAX_SHIFT;
82
83 pd->index = 0;
84 pd->pg[0].shift = shift;
85 mutex_init(&pd->mutex);
86 err = percpu_counter_init(&pd->pg[0].events, 0, GFP_KERNEL);
87 if (err)
88 goto out;
89
90 err = percpu_counter_init(&pd->pg[1].events, 0, GFP_KERNEL);
91 if (err)
92 percpu_counter_destroy(&pd->pg[0].events);
93
94out:
95 return err;
96}
97
98
99
100
101
102
103
104
105void prop_change_shift(struct prop_descriptor *pd, int shift)
106{
107 int index;
108 int offset;
109 u64 events;
110 unsigned long flags;
111
112 if (shift > PROP_MAX_SHIFT)
113 shift = PROP_MAX_SHIFT;
114
115 mutex_lock(&pd->mutex);
116
117 index = pd->index ^ 1;
118 offset = pd->pg[pd->index].shift - shift;
119 if (!offset)
120 goto out;
121
122 pd->pg[index].shift = shift;
123
124 local_irq_save(flags);
125 events = percpu_counter_sum(&pd->pg[pd->index].events);
126 if (offset < 0)
127 events <<= -offset;
128 else
129 events >>= offset;
130 percpu_counter_set(&pd->pg[index].events, events);
131
132
133
134
135 smp_wmb();
136 pd->index = index;
137 local_irq_restore(flags);
138
139 synchronize_rcu();
140
141out:
142 mutex_unlock(&pd->mutex);
143}
144
145
146
147
148
149static struct prop_global *prop_get_global(struct prop_descriptor *pd)
150__acquires(RCU)
151{
152 int index;
153
154 rcu_read_lock();
155 index = pd->index;
156
157
158
159 smp_rmb();
160 return &pd->pg[index];
161}
162
163static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg)
164__releases(RCU)
165{
166 rcu_read_unlock();
167}
168
169static void
170prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
171{
172 int offset = *pl_shift - new_shift;
173
174 if (!offset)
175 return;
176
177 if (offset < 0)
178 *pl_period <<= -offset;
179 else
180 *pl_period >>= offset;
181
182 *pl_shift = new_shift;
183}
184
185
186
187
188
189#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
190
191int prop_local_init_percpu(struct prop_local_percpu *pl)
192{
193 raw_spin_lock_init(&pl->lock);
194 pl->shift = 0;
195 pl->period = 0;
196 return percpu_counter_init(&pl->events, 0, GFP_KERNEL);
197}
198
199void prop_local_destroy_percpu(struct prop_local_percpu *pl)
200{
201 percpu_counter_destroy(&pl->events);
202}
203
204
205
206
207
208
209
210
211static
212void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
213{
214 unsigned long period = 1UL << (pg->shift - 1);
215 unsigned long period_mask = ~(period - 1);
216 unsigned long global_period;
217 unsigned long flags;
218
219 global_period = percpu_counter_read(&pg->events);
220 global_period &= period_mask;
221
222
223
224
225
226 if (pl->period == global_period)
227 return;
228
229 raw_spin_lock_irqsave(&pl->lock, flags);
230 prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
231
232
233
234
235
236
237 period = (global_period - pl->period) >> (pg->shift - 1);
238 if (period < BITS_PER_LONG) {
239 s64 val = percpu_counter_read(&pl->events);
240
241 if (val < (nr_cpu_ids * PROP_BATCH))
242 val = percpu_counter_sum(&pl->events);
243
244 __percpu_counter_add(&pl->events, -val + (val >> period),
245 PROP_BATCH);
246 } else
247 percpu_counter_set(&pl->events, 0);
248
249 pl->period = global_period;
250 raw_spin_unlock_irqrestore(&pl->lock, flags);
251}
252
253
254
255
256void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
257{
258 struct prop_global *pg = prop_get_global(pd);
259
260 prop_norm_percpu(pg, pl);
261 __percpu_counter_add(&pl->events, 1, PROP_BATCH);
262 percpu_counter_add(&pg->events, 1);
263 prop_put_global(pd, pg);
264}
265
266
267
268
269
270void __prop_inc_percpu_max(struct prop_descriptor *pd,
271 struct prop_local_percpu *pl, long frac)
272{
273 struct prop_global *pg = prop_get_global(pd);
274
275 prop_norm_percpu(pg, pl);
276
277 if (unlikely(frac != PROP_FRAC_BASE)) {
278 unsigned long period_2 = 1UL << (pg->shift - 1);
279 unsigned long counter_mask = period_2 - 1;
280 unsigned long global_count;
281 long numerator, denominator;
282
283 numerator = percpu_counter_read_positive(&pl->events);
284 global_count = percpu_counter_read(&pg->events);
285 denominator = period_2 + (global_count & counter_mask);
286
287 if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT))
288 goto out_put;
289 }
290
291 percpu_counter_add(&pl->events, 1);
292 percpu_counter_add(&pg->events, 1);
293
294out_put:
295 prop_put_global(pd, pg);
296}
297
298
299
300
301
302
303void prop_fraction_percpu(struct prop_descriptor *pd,
304 struct prop_local_percpu *pl,
305 long *numerator, long *denominator)
306{
307 struct prop_global *pg = prop_get_global(pd);
308 unsigned long period_2 = 1UL << (pg->shift - 1);
309 unsigned long counter_mask = period_2 - 1;
310 unsigned long global_count;
311
312 prop_norm_percpu(pg, pl);
313 *numerator = percpu_counter_read_positive(&pl->events);
314
315 global_count = percpu_counter_read(&pg->events);
316 *denominator = period_2 + (global_count & counter_mask);
317
318 prop_put_global(pd, pg);
319}
320
321
322
323
324
325int prop_local_init_single(struct prop_local_single *pl)
326{
327 raw_spin_lock_init(&pl->lock);
328 pl->shift = 0;
329 pl->period = 0;
330 pl->events = 0;
331 return 0;
332}
333
334void prop_local_destroy_single(struct prop_local_single *pl)
335{
336}
337
338
339
340
341static
342void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
343{
344 unsigned long period = 1UL << (pg->shift - 1);
345 unsigned long period_mask = ~(period - 1);
346 unsigned long global_period;
347 unsigned long flags;
348
349 global_period = percpu_counter_read(&pg->events);
350 global_period &= period_mask;
351
352
353
354
355
356 if (pl->period == global_period)
357 return;
358
359 raw_spin_lock_irqsave(&pl->lock, flags);
360 prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
361
362
363
364 period = (global_period - pl->period) >> (pg->shift - 1);
365 if (likely(period < BITS_PER_LONG))
366 pl->events >>= period;
367 else
368 pl->events = 0;
369 pl->period = global_period;
370 raw_spin_unlock_irqrestore(&pl->lock, flags);
371}
372
373
374
375
376void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
377{
378 struct prop_global *pg = prop_get_global(pd);
379
380 prop_norm_single(pg, pl);
381 pl->events++;
382 percpu_counter_add(&pg->events, 1);
383 prop_put_global(pd, pg);
384}
385
386
387
388
389
390
391void prop_fraction_single(struct prop_descriptor *pd,
392 struct prop_local_single *pl,
393 long *numerator, long *denominator)
394{
395 struct prop_global *pg = prop_get_global(pd);
396 unsigned long period_2 = 1UL << (pg->shift - 1);
397 unsigned long counter_mask = period_2 - 1;
398 unsigned long global_count;
399
400 prop_norm_single(pg, pl);
401 *numerator = pl->events;
402
403 global_count = percpu_counter_read(&pg->events);
404 *denominator = period_2 + (global_count & counter_mask);
405
406 prop_put_global(pd, pg);
407}
408