1#ifndef __RES_COUNTER_H__
2#define __RES_COUNTER_H__
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/spinlock.h>
17#include <linux/errno.h>
18
19
20
21
22
23
24
25struct res_counter {
26
27
28
29 unsigned long long usage;
30
31
32
33 unsigned long long max_usage;
34
35
36
37 unsigned long long limit;
38
39
40
41 unsigned long long soft_limit;
42
43
44
45 unsigned long long failcnt;
46
47
48
49
50 spinlock_t lock;
51
52
53
54 struct res_counter *parent;
55};
56
57#define RES_COUNTER_MAX ULLONG_MAX
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72u64 res_counter_read_u64(struct res_counter *counter, int member);
73
74ssize_t res_counter_read(struct res_counter *counter, int member,
75 const char __user *buf, size_t nbytes, loff_t *pos,
76 int (*read_strategy)(unsigned long long val, char *s));
77
78int res_counter_memparse_write_strategy(const char *buf,
79 unsigned long long *res);
80
81
82
83
84
85enum {
86 RES_USAGE,
87 RES_MAX_USAGE,
88 RES_LIMIT,
89 RES_FAILCNT,
90 RES_SOFT_LIMIT,
91};
92
93
94
95
96
97void res_counter_init(struct res_counter *counter, struct res_counter *parent);
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114int __must_check res_counter_charge(struct res_counter *counter,
115 unsigned long val, struct res_counter **limit_fail_at);
116int res_counter_charge_nofail(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at);
118
119
120
121
122
123
124
125
126
127
128
129
130u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
131
132u64 res_counter_uncharge_until(struct res_counter *counter,
133 struct res_counter *top,
134 unsigned long val);
135
136
137
138
139
140
141
142static inline unsigned long long res_counter_margin(struct res_counter *cnt)
143{
144 unsigned long long margin;
145 unsigned long flags;
146
147 spin_lock_irqsave(&cnt->lock, flags);
148 if (cnt->limit > cnt->usage)
149 margin = cnt->limit - cnt->usage;
150 else
151 margin = 0;
152 spin_unlock_irqrestore(&cnt->lock, flags);
153 return margin;
154}
155
156
157
158
159
160
161
162
163static inline unsigned long long
164res_counter_soft_limit_excess(struct res_counter *cnt)
165{
166 unsigned long long excess;
167 unsigned long flags;
168
169 spin_lock_irqsave(&cnt->lock, flags);
170 if (cnt->usage <= cnt->soft_limit)
171 excess = 0;
172 else
173 excess = cnt->usage - cnt->soft_limit;
174 spin_unlock_irqrestore(&cnt->lock, flags);
175 return excess;
176}
177
178static inline void res_counter_reset_max(struct res_counter *cnt)
179{
180 unsigned long flags;
181
182 spin_lock_irqsave(&cnt->lock, flags);
183 cnt->max_usage = cnt->usage;
184 spin_unlock_irqrestore(&cnt->lock, flags);
185}
186
187static inline void res_counter_reset_failcnt(struct res_counter *cnt)
188{
189 unsigned long flags;
190
191 spin_lock_irqsave(&cnt->lock, flags);
192 cnt->failcnt = 0;
193 spin_unlock_irqrestore(&cnt->lock, flags);
194}
195
196static inline int res_counter_set_limit(struct res_counter *cnt,
197 unsigned long long limit)
198{
199 unsigned long flags;
200 int ret = -EBUSY;
201
202 spin_lock_irqsave(&cnt->lock, flags);
203 if (cnt->usage <= limit) {
204 cnt->limit = limit;
205 ret = 0;
206 }
207 spin_unlock_irqrestore(&cnt->lock, flags);
208 return ret;
209}
210
211static inline int
212res_counter_set_soft_limit(struct res_counter *cnt,
213 unsigned long long soft_limit)
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&cnt->lock, flags);
218 cnt->soft_limit = soft_limit;
219 spin_unlock_irqrestore(&cnt->lock, flags);
220 return 0;
221}
222
223#endif
224