1#ifndef __RES_COUNTER_H__
2#define __RES_COUNTER_H__
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/spinlock.h>
17#include <linux/errno.h>
18
19
20
21
22
23
24
25struct res_counter {
26
27
28
29 unsigned long long usage;
30
31
32
33 unsigned long long max_usage;
34
35
36
37 unsigned long long limit;
38
39
40
41 unsigned long long soft_limit;
42
43
44
45 unsigned long long failcnt;
46
47
48
49
50 spinlock_t lock;
51
52
53
54 struct res_counter *parent;
55};
56
57#define RES_COUNTER_MAX ULLONG_MAX
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72u64 res_counter_read_u64(struct res_counter *counter, int member);
73
74ssize_t res_counter_read(struct res_counter *counter, int member,
75 const char __user *buf, size_t nbytes, loff_t *pos,
76 int (*read_strategy)(unsigned long long val, char *s));
77
78int res_counter_memparse_write_strategy(const char *buf,
79 unsigned long long *res);
80
81
82
83
84
85enum {
86 RES_USAGE,
87 RES_MAX_USAGE,
88 RES_LIMIT,
89 RES_FAILCNT,
90 RES_SOFT_LIMIT,
91};
92
93
94
95
96
97void res_counter_init(struct res_counter *counter, struct res_counter *parent);
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114int __must_check res_counter_charge_locked(struct res_counter *counter,
115 unsigned long val, bool force);
116int __must_check res_counter_charge(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at);
118int res_counter_charge_nofail(struct res_counter *counter,
119 unsigned long val, struct res_counter **limit_fail_at);
120
121
122
123
124
125
126
127
128
129
130
131
132
133u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
134u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
135
136u64 res_counter_uncharge_until(struct res_counter *counter,
137 struct res_counter *top,
138 unsigned long val);
139
140
141
142
143
144
145
146static inline unsigned long long res_counter_margin(struct res_counter *cnt)
147{
148 unsigned long long margin;
149 unsigned long flags;
150
151 spin_lock_irqsave(&cnt->lock, flags);
152 if (cnt->limit > cnt->usage)
153 margin = cnt->limit - cnt->usage;
154 else
155 margin = 0;
156 spin_unlock_irqrestore(&cnt->lock, flags);
157 return margin;
158}
159
160
161
162
163
164
165
166
167static inline unsigned long long
168res_counter_soft_limit_excess(struct res_counter *cnt)
169{
170 unsigned long long excess;
171 unsigned long flags;
172
173 spin_lock_irqsave(&cnt->lock, flags);
174 if (cnt->usage <= cnt->soft_limit)
175 excess = 0;
176 else
177 excess = cnt->usage - cnt->soft_limit;
178 spin_unlock_irqrestore(&cnt->lock, flags);
179 return excess;
180}
181
182static inline void res_counter_reset_max(struct res_counter *cnt)
183{
184 unsigned long flags;
185
186 spin_lock_irqsave(&cnt->lock, flags);
187 cnt->max_usage = cnt->usage;
188 spin_unlock_irqrestore(&cnt->lock, flags);
189}
190
191static inline void res_counter_reset_failcnt(struct res_counter *cnt)
192{
193 unsigned long flags;
194
195 spin_lock_irqsave(&cnt->lock, flags);
196 cnt->failcnt = 0;
197 spin_unlock_irqrestore(&cnt->lock, flags);
198}
199
200static inline int res_counter_set_limit(struct res_counter *cnt,
201 unsigned long long limit)
202{
203 unsigned long flags;
204 int ret = -EBUSY;
205
206 spin_lock_irqsave(&cnt->lock, flags);
207 if (cnt->usage <= limit) {
208 cnt->limit = limit;
209 ret = 0;
210 }
211 spin_unlock_irqrestore(&cnt->lock, flags);
212 return ret;
213}
214
215static inline int
216res_counter_set_soft_limit(struct res_counter *cnt,
217 unsigned long long soft_limit)
218{
219 unsigned long flags;
220
221 spin_lock_irqsave(&cnt->lock, flags);
222 cnt->soft_limit = soft_limit;
223 spin_unlock_irqrestore(&cnt->lock, flags);
224 return 0;
225}
226
227#endif
228