1
2
3
4
5
6
7
8
9
10
11
12static void update_fastmap_work_fn(struct work_struct *wrk)
13{
14 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
15
16 ubi_update_fastmap(ubi);
17 spin_lock(&ubi->wl_lock);
18 ubi->fm_work_scheduled = 0;
19 spin_unlock(&ubi->wl_lock);
20}
21
22
23
24
25
26static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
27{
28 struct rb_node *p;
29 struct ubi_wl_entry *e, *victim = NULL;
30 int max_ec = UBI_MAX_ERASECOUNTER;
31
32 ubi_rb_for_each_entry(p, e, root, u.rb) {
33 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
34 victim = e;
35 max_ec = e->ec;
36 }
37 }
38
39 return victim;
40}
41
42static inline void return_unused_peb(struct ubi_device *ubi,
43 struct ubi_wl_entry *e)
44{
45 wl_tree_add(e, &ubi->free);
46 ubi->free_count++;
47}
48
49
50
51
52
53
54static void return_unused_pool_pebs(struct ubi_device *ubi,
55 struct ubi_fm_pool *pool)
56{
57 int i;
58 struct ubi_wl_entry *e;
59
60 for (i = pool->used; i < pool->size; i++) {
61 e = ubi->lookuptbl[pool->pebs[i]];
62 return_unused_peb(ubi, e);
63 }
64}
65
66
67
68
69
70
71
72
73
74
75struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
76{
77 struct ubi_wl_entry *e = NULL;
78
79 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
80 goto out;
81
82 if (anchor)
83 e = find_anchor_wl_entry(&ubi->free);
84 else
85 e = find_mean_wl_entry(ubi, &ubi->free);
86
87 if (!e)
88 goto out;
89
90 self_check_in_wl_tree(ubi, e, &ubi->free);
91
92
93
94 rb_erase(&e->u.rb, &ubi->free);
95 ubi->free_count--;
96out:
97 return e;
98}
99
100
101
102
103
104void ubi_refill_pools(struct ubi_device *ubi)
105{
106 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
107 struct ubi_fm_pool *pool = &ubi->fm_pool;
108 struct ubi_wl_entry *e;
109 int enough;
110
111 spin_lock(&ubi->wl_lock);
112
113 return_unused_pool_pebs(ubi, wl_pool);
114 return_unused_pool_pebs(ubi, pool);
115
116 wl_pool->size = 0;
117 pool->size = 0;
118
119 if (ubi->fm_anchor) {
120 wl_tree_add(ubi->fm_anchor, &ubi->free);
121 ubi->free_count++;
122 }
123 if (ubi->fm_next_anchor) {
124 wl_tree_add(ubi->fm_next_anchor, &ubi->free);
125 ubi->free_count++;
126 }
127
128
129
130
131 ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
132 ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
133
134 for (;;) {
135 enough = 0;
136 if (pool->size < pool->max_size) {
137 if (!ubi->free.rb_node)
138 break;
139
140 e = wl_get_wle(ubi);
141 if (!e)
142 break;
143
144 pool->pebs[pool->size] = e->pnum;
145 pool->size++;
146 } else
147 enough++;
148
149 if (wl_pool->size < wl_pool->max_size) {
150 if (!ubi->free.rb_node ||
151 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
152 break;
153
154 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
155 self_check_in_wl_tree(ubi, e, &ubi->free);
156 rb_erase(&e->u.rb, &ubi->free);
157 ubi->free_count--;
158
159 wl_pool->pebs[wl_pool->size] = e->pnum;
160 wl_pool->size++;
161 } else
162 enough++;
163
164 if (enough == 2)
165 break;
166 }
167
168 wl_pool->used = 0;
169 pool->used = 0;
170
171 spin_unlock(&ubi->wl_lock);
172}
173
174
175
176
177
178
179
180
181
182
183static int produce_free_peb(struct ubi_device *ubi)
184{
185 int err;
186
187 while (!ubi->free.rb_node && ubi->works_count) {
188 dbg_wl("do one work synchronously");
189 err = do_work(ubi);
190
191 if (err)
192 return err;
193 }
194
195 return 0;
196}
197
198
199
200
201
202
203
204
205
206int ubi_wl_get_peb(struct ubi_device *ubi)
207{
208 int ret, attempts = 0;
209 struct ubi_fm_pool *pool = &ubi->fm_pool;
210 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
211
212again:
213 down_read(&ubi->fm_eba_sem);
214 spin_lock(&ubi->wl_lock);
215
216
217
218 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
219 spin_unlock(&ubi->wl_lock);
220 up_read(&ubi->fm_eba_sem);
221 ret = ubi_update_fastmap(ubi);
222 if (ret) {
223 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
224 down_read(&ubi->fm_eba_sem);
225 return -ENOSPC;
226 }
227 down_read(&ubi->fm_eba_sem);
228 spin_lock(&ubi->wl_lock);
229 }
230
231 if (pool->used == pool->size) {
232 spin_unlock(&ubi->wl_lock);
233 attempts++;
234 if (attempts == 10) {
235 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
236 ret = -ENOSPC;
237 goto out;
238 }
239 up_read(&ubi->fm_eba_sem);
240 ret = produce_free_peb(ubi);
241 if (ret < 0) {
242 down_read(&ubi->fm_eba_sem);
243 goto out;
244 }
245 goto again;
246 }
247
248 ubi_assert(pool->used < pool->size);
249 ret = pool->pebs[pool->used++];
250 prot_queue_add(ubi, ubi->lookuptbl[ret]);
251 spin_unlock(&ubi->wl_lock);
252out:
253 return ret;
254}
255
256
257
258
259
260static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
261{
262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
263 int pnum;
264
265 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
266
267 if (pool->used == pool->size) {
268
269
270
271 if (!ubi->fm_work_scheduled) {
272 ubi->fm_work_scheduled = 1;
273 schedule_work(&ubi->fm_work);
274 }
275 return NULL;
276 }
277
278 pnum = pool->pebs[pool->used++];
279 return ubi->lookuptbl[pnum];
280}
281
282
283
284
285
286int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
287{
288 struct ubi_work *wrk;
289
290 spin_lock(&ubi->wl_lock);
291
292
293 if (!ubi->fm_next_anchor) {
294 ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
295 if (!ubi->fm_next_anchor)
296
297 ubi->fm_do_produce_anchor = 1;
298 }
299
300
301
302
303 if (ubi->wl_scheduled) {
304 spin_unlock(&ubi->wl_lock);
305 return 0;
306 }
307 ubi->wl_scheduled = 1;
308 spin_unlock(&ubi->wl_lock);
309
310 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
311 if (!wrk) {
312 spin_lock(&ubi->wl_lock);
313 ubi->wl_scheduled = 0;
314 spin_unlock(&ubi->wl_lock);
315 return -ENOMEM;
316 }
317
318 wrk->func = &wear_leveling_worker;
319 __schedule_ubi_work(ubi, wrk);
320 return 0;
321}
322
323
324
325
326
327
328
329
330
331
332
333int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
334 int lnum, int torture)
335{
336 struct ubi_wl_entry *e;
337 int vol_id, pnum = fm_e->pnum;
338
339 dbg_wl("PEB %d", pnum);
340
341 ubi_assert(pnum >= 0);
342 ubi_assert(pnum < ubi->peb_count);
343
344 spin_lock(&ubi->wl_lock);
345 e = ubi->lookuptbl[pnum];
346
347
348
349
350
351 if (!e) {
352 e = fm_e;
353 ubi_assert(e->ec >= 0);
354 ubi->lookuptbl[pnum] = e;
355 }
356
357 spin_unlock(&ubi->wl_lock);
358
359 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
360 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
361}
362
363
364
365
366
367int ubi_is_erase_work(struct ubi_work *wrk)
368{
369 return wrk->func == erase_worker;
370}
371
372static void ubi_fastmap_close(struct ubi_device *ubi)
373{
374 int i;
375
376 return_unused_pool_pebs(ubi, &ubi->fm_pool);
377 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
378
379 if (ubi->fm_anchor) {
380 return_unused_peb(ubi, ubi->fm_anchor);
381 ubi->fm_anchor = NULL;
382 }
383
384 if (ubi->fm_next_anchor) {
385 return_unused_peb(ubi, ubi->fm_next_anchor);
386 ubi->fm_next_anchor = NULL;
387 }
388
389 if (ubi->fm) {
390 for (i = 0; i < ubi->fm->used_blocks; i++)
391 kfree(ubi->fm->e[i]);
392 }
393 kfree(ubi->fm);
394}
395
396
397
398
399
400
401
402
403
404static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
405 struct ubi_wl_entry *e,
406 struct rb_root *root) {
407 if (e && !ubi->fm_disabled && !ubi->fm &&
408 e->pnum < UBI_FM_MAX_START)
409 e = rb_entry(rb_next(root->rb_node),
410 struct ubi_wl_entry, u.rb);
411
412 return e;
413}
414