1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21static void update_fastmap_work_fn(struct work_struct *wrk)
22{
23 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
24
25 ubi_update_fastmap(ubi);
26 spin_lock(&ubi->wl_lock);
27 ubi->fm_work_scheduled = 0;
28 spin_unlock(&ubi->wl_lock);
29}
30
31
32
33
34
35static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
36{
37 struct rb_node *p;
38 struct ubi_wl_entry *e, *victim = NULL;
39 int max_ec = UBI_MAX_ERASECOUNTER;
40
41 ubi_rb_for_each_entry(p, e, root, u.rb) {
42 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
43 victim = e;
44 max_ec = e->ec;
45 }
46 }
47
48 return victim;
49}
50
51
52
53
54
55
56static void return_unused_pool_pebs(struct ubi_device *ubi,
57 struct ubi_fm_pool *pool)
58{
59 int i;
60 struct ubi_wl_entry *e;
61
62 for (i = pool->used; i < pool->size; i++) {
63 e = ubi->lookuptbl[pool->pebs[i]];
64 wl_tree_add(e, &ubi->free);
65 ubi->free_count++;
66 }
67}
68
69static int anchor_pebs_avalible(struct rb_root *root)
70{
71 struct rb_node *p;
72 struct ubi_wl_entry *e;
73
74 ubi_rb_for_each_entry(p, e, root, u.rb)
75 if (e->pnum < UBI_FM_MAX_START)
76 return 1;
77
78 return 0;
79}
80
81
82
83
84
85
86
87
88
89
90struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
91{
92 struct ubi_wl_entry *e = NULL;
93
94 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
95 goto out;
96
97 if (anchor)
98 e = find_anchor_wl_entry(&ubi->free);
99 else
100 e = find_mean_wl_entry(ubi, &ubi->free);
101
102 if (!e)
103 goto out;
104
105 self_check_in_wl_tree(ubi, e, &ubi->free);
106
107
108
109 rb_erase(&e->u.rb, &ubi->free);
110 ubi->free_count--;
111out:
112 return e;
113}
114
115
116
117
118
119void ubi_refill_pools(struct ubi_device *ubi)
120{
121 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
122 struct ubi_fm_pool *pool = &ubi->fm_pool;
123 struct ubi_wl_entry *e;
124 int enough;
125
126 spin_lock(&ubi->wl_lock);
127
128 return_unused_pool_pebs(ubi, wl_pool);
129 return_unused_pool_pebs(ubi, pool);
130
131 wl_pool->size = 0;
132 pool->size = 0;
133
134 for (;;) {
135 enough = 0;
136 if (pool->size < pool->max_size) {
137 if (!ubi->free.rb_node)
138 break;
139
140 e = wl_get_wle(ubi);
141 if (!e)
142 break;
143
144 pool->pebs[pool->size] = e->pnum;
145 pool->size++;
146 } else
147 enough++;
148
149 if (wl_pool->size < wl_pool->max_size) {
150 if (!ubi->free.rb_node ||
151 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
152 break;
153
154 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
155 self_check_in_wl_tree(ubi, e, &ubi->free);
156 rb_erase(&e->u.rb, &ubi->free);
157 ubi->free_count--;
158
159 wl_pool->pebs[wl_pool->size] = e->pnum;
160 wl_pool->size++;
161 } else
162 enough++;
163
164 if (enough == 2)
165 break;
166 }
167
168 wl_pool->used = 0;
169 pool->used = 0;
170
171 spin_unlock(&ubi->wl_lock);
172}
173
174
175
176
177
178
179
180
181
182
183static int produce_free_peb(struct ubi_device *ubi)
184{
185 int err;
186
187 while (!ubi->free.rb_node && ubi->works_count) {
188 dbg_wl("do one work synchronously");
189 err = do_work(ubi);
190
191 if (err)
192 return err;
193 }
194
195 return 0;
196}
197
198
199
200
201
202
203
204
205
206int ubi_wl_get_peb(struct ubi_device *ubi)
207{
208 int ret, retried = 0;
209 struct ubi_fm_pool *pool = &ubi->fm_pool;
210 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
211
212again:
213 down_read(&ubi->fm_eba_sem);
214 spin_lock(&ubi->wl_lock);
215
216
217
218 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
219 spin_unlock(&ubi->wl_lock);
220 up_read(&ubi->fm_eba_sem);
221 ret = ubi_update_fastmap(ubi);
222 if (ret) {
223 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
224 down_read(&ubi->fm_eba_sem);
225 return -ENOSPC;
226 }
227 down_read(&ubi->fm_eba_sem);
228 spin_lock(&ubi->wl_lock);
229 }
230
231 if (pool->used == pool->size) {
232 spin_unlock(&ubi->wl_lock);
233 if (retried) {
234 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
235 ret = -ENOSPC;
236 goto out;
237 }
238 retried = 1;
239 up_read(&ubi->fm_eba_sem);
240 ret = produce_free_peb(ubi);
241 if (ret < 0) {
242 down_read(&ubi->fm_eba_sem);
243 goto out;
244 }
245 goto again;
246 }
247
248 ubi_assert(pool->used < pool->size);
249 ret = pool->pebs[pool->used++];
250 prot_queue_add(ubi, ubi->lookuptbl[ret]);
251 spin_unlock(&ubi->wl_lock);
252out:
253 return ret;
254}
255
256
257
258
259
260static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
261{
262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
263 int pnum;
264
265 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
266
267 if (pool->used == pool->size) {
268
269
270
271 if (!ubi->fm_work_scheduled) {
272 ubi->fm_work_scheduled = 1;
273 schedule_work(&ubi->fm_work);
274 }
275 return NULL;
276 }
277
278 pnum = pool->pebs[pool->used++];
279 return ubi->lookuptbl[pnum];
280}
281
282
283
284
285
286int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
287{
288 struct ubi_work *wrk;
289
290 spin_lock(&ubi->wl_lock);
291 if (ubi->wl_scheduled) {
292 spin_unlock(&ubi->wl_lock);
293 return 0;
294 }
295 ubi->wl_scheduled = 1;
296 spin_unlock(&ubi->wl_lock);
297
298 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
299 if (!wrk) {
300 spin_lock(&ubi->wl_lock);
301 ubi->wl_scheduled = 0;
302 spin_unlock(&ubi->wl_lock);
303 return -ENOMEM;
304 }
305
306 wrk->anchor = 1;
307 wrk->func = &wear_leveling_worker;
308 __schedule_ubi_work(ubi, wrk);
309 return 0;
310}
311
312
313
314
315
316
317
318
319
320
321
322int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
323 int lnum, int torture)
324{
325 struct ubi_wl_entry *e;
326 int vol_id, pnum = fm_e->pnum;
327
328 dbg_wl("PEB %d", pnum);
329
330 ubi_assert(pnum >= 0);
331 ubi_assert(pnum < ubi->peb_count);
332
333 spin_lock(&ubi->wl_lock);
334 e = ubi->lookuptbl[pnum];
335
336
337
338
339
340 if (!e) {
341 e = fm_e;
342 ubi_assert(e->ec >= 0);
343 ubi->lookuptbl[pnum] = e;
344 }
345
346 spin_unlock(&ubi->wl_lock);
347
348 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
349 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
350}
351
352
353
354
355
356int ubi_is_erase_work(struct ubi_work *wrk)
357{
358 return wrk->func == erase_worker;
359}
360
361static void ubi_fastmap_close(struct ubi_device *ubi)
362{
363 int i;
364
365 flush_work(&ubi->fm_work);
366 return_unused_pool_pebs(ubi, &ubi->fm_pool);
367 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
368
369 if (ubi->fm) {
370 for (i = 0; i < ubi->fm->used_blocks; i++)
371 kfree(ubi->fm->e[i]);
372 }
373 kfree(ubi->fm);
374}
375
376
377
378
379
380
381
382
383
384static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
385 struct ubi_wl_entry *e,
386 struct rb_root *root) {
387 if (e && !ubi->fm_disabled && !ubi->fm &&
388 e->pnum < UBI_FM_MAX_START)
389 e = rb_entry(rb_next(root->rb_node),
390 struct ubi_wl_entry, u.rb);
391
392 return e;
393}
394