1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include "ubifs.h"
31
32
33LIST_HEAD(ubifs_infos);
34
35
36
37
38
39
40static unsigned int shrinker_run_no;
41
42
43DEFINE_SPINLOCK(ubifs_infos_lock);
44
45
46atomic_long_t ubifs_clean_zn_cnt;
47
48
49
50
51
52
53
54
55
56
57
58static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
59{
60 int total_freed = 0;
61 struct ubifs_znode *znode, *zprev;
62 time64_t time = ktime_get_seconds();
63
64 ubifs_assert(c, mutex_is_locked(&c->umount_mutex));
65 ubifs_assert(c, mutex_is_locked(&c->tnc_mutex));
66
67 if (!c->zroot.znode || atomic_long_read(&c->clean_zn_cnt) == 0)
68 return 0;
69
70
71
72
73
74
75
76
77
78
79 zprev = NULL;
80 znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
81 while (znode && total_freed < nr &&
82 atomic_long_read(&c->clean_zn_cnt) > 0) {
83 int freed;
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103 if (znode->cnext) {
104
105
106
107
108 *contention = 1;
109 } else if (!ubifs_zn_dirty(znode) &&
110 abs(time - znode->time) >= age) {
111 if (znode->parent)
112 znode->parent->zbranch[znode->iip].znode = NULL;
113 else
114 c->zroot.znode = NULL;
115
116 freed = ubifs_destroy_tnc_subtree(c, znode);
117 atomic_long_sub(freed, &ubifs_clean_zn_cnt);
118 atomic_long_sub(freed, &c->clean_zn_cnt);
119 total_freed += freed;
120 znode = zprev;
121 }
122
123 if (unlikely(!c->zroot.znode))
124 break;
125
126 zprev = znode;
127 znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
128 cond_resched();
129 }
130
131 return total_freed;
132}
133
134
135
136
137
138
139
140
141
142
143
144static int shrink_tnc_trees(int nr, int age, int *contention)
145{
146 struct ubifs_info *c;
147 struct list_head *p;
148 unsigned int run_no;
149 int freed = 0;
150
151 spin_lock(&ubifs_infos_lock);
152 do {
153 run_no = ++shrinker_run_no;
154 } while (run_no == 0);
155
156 p = ubifs_infos.next;
157 while (p != &ubifs_infos) {
158 c = list_entry(p, struct ubifs_info, infos_list);
159
160
161
162
163 if (c->shrinker_run_no == run_no)
164 break;
165 if (!mutex_trylock(&c->umount_mutex)) {
166
167 *contention = 1;
168 p = p->next;
169 continue;
170 }
171
172
173
174
175 if (!mutex_trylock(&c->tnc_mutex)) {
176 mutex_unlock(&c->umount_mutex);
177 *contention = 1;
178 p = p->next;
179 continue;
180 }
181 spin_unlock(&ubifs_infos_lock);
182
183
184
185
186 c->shrinker_run_no = run_no;
187 freed += shrink_tnc(c, nr, age, contention);
188 mutex_unlock(&c->tnc_mutex);
189 spin_lock(&ubifs_infos_lock);
190
191 p = p->next;
192
193
194
195
196 list_move_tail(&c->infos_list, &ubifs_infos);
197 mutex_unlock(&c->umount_mutex);
198 if (freed >= nr)
199 break;
200 }
201 spin_unlock(&ubifs_infos_lock);
202 return freed;
203}
204
205
206
207
208
209
210
211
212
213static int kick_a_thread(void)
214{
215 int i;
216 struct ubifs_info *c;
217
218
219
220
221
222
223 spin_lock(&ubifs_infos_lock);
224 for (i = 0; i < 2; i++) {
225 list_for_each_entry(c, &ubifs_infos, infos_list) {
226 long dirty_zn_cnt;
227
228 if (!mutex_trylock(&c->umount_mutex)) {
229
230
231
232
233 spin_unlock(&ubifs_infos_lock);
234 return -1;
235 }
236
237 dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt);
238
239 if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN ||
240 c->ro_mount || c->ro_error) {
241 mutex_unlock(&c->umount_mutex);
242 continue;
243 }
244
245 if (c->cmt_state != COMMIT_RESTING) {
246 spin_unlock(&ubifs_infos_lock);
247 mutex_unlock(&c->umount_mutex);
248 return -1;
249 }
250
251 if (i == 1) {
252 list_move_tail(&c->infos_list, &ubifs_infos);
253 spin_unlock(&ubifs_infos_lock);
254
255 ubifs_request_bg_commit(c);
256 mutex_unlock(&c->umount_mutex);
257 return -1;
258 }
259 mutex_unlock(&c->umount_mutex);
260 }
261 }
262 spin_unlock(&ubifs_infos_lock);
263
264 return 0;
265}
266
267unsigned long ubifs_shrink_count(struct shrinker *shrink,
268 struct shrink_control *sc)
269{
270 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
271
272
273
274
275
276 return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
277}
278
279unsigned long ubifs_shrink_scan(struct shrinker *shrink,
280 struct shrink_control *sc)
281{
282 unsigned long nr = sc->nr_to_scan;
283 int contention = 0;
284 unsigned long freed;
285 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
286
287 if (!clean_zn_cnt) {
288
289
290
291
292
293
294
295 dbg_tnc("no clean znodes, kick a thread");
296 return kick_a_thread();
297 }
298
299 freed = shrink_tnc_trees(nr, OLD_ZNODE_AGE, &contention);
300 if (freed >= nr)
301 goto out;
302
303 dbg_tnc("not enough old znodes, try to free young ones");
304 freed += shrink_tnc_trees(nr - freed, YOUNG_ZNODE_AGE, &contention);
305 if (freed >= nr)
306 goto out;
307
308 dbg_tnc("not enough young znodes, free all");
309 freed += shrink_tnc_trees(nr - freed, 0, &contention);
310
311 if (!freed && contention) {
312 dbg_tnc("freed nothing, but contention");
313 return SHRINK_STOP;
314 }
315
316out:
317 dbg_tnc("%lu znodes were freed, requested %lu", freed, nr);
318 return freed;
319}
320