1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kernel.h>
13#include <linux/mtd/mtd.h>
14#include <linux/compiler.h>
15#include <linux/sched.h>
16#include "nodelist.h"
17#include "debug.h"
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
39 uint32_t *len, uint32_t sumsize);
40
41int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
42 uint32_t *len, int prio, uint32_t sumsize)
43{
44 int ret = -EAGAIN;
45 int blocksneeded = c->resv_blocks_write;
46
47 minsize = PAD(minsize);
48
49 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
50 mutex_lock(&c->alloc_sem);
51
52 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
53
54 spin_lock(&c->erase_completion_lock);
55
56
57 while(ret == -EAGAIN) {
58 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
59 uint32_t dirty, avail;
60
61
62
63
64
65
66
67
68
69
70
71
72
73 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
74 if (dirty < c->nospc_dirty_size) {
75 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
76 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
77 break;
78 }
79 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
80 dirty, c->unchecked_size, c->sector_size));
81
82 spin_unlock(&c->erase_completion_lock);
83 mutex_unlock(&c->alloc_sem);
84 return -ENOSPC;
85 }
86
87
88
89
90
91
92
93
94
95
96 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
97 if ( (avail / c->sector_size) <= blocksneeded) {
98 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
99 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
100 break;
101 }
102
103 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
104 avail, blocksneeded * c->sector_size));
105 spin_unlock(&c->erase_completion_lock);
106 mutex_unlock(&c->alloc_sem);
107 return -ENOSPC;
108 }
109
110 mutex_unlock(&c->alloc_sem);
111
112 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
113 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
114 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
115 spin_unlock(&c->erase_completion_lock);
116
117 ret = jffs2_garbage_collect_pass(c);
118
119 if (ret == -EAGAIN) {
120 spin_lock(&c->erase_completion_lock);
121 if (c->nr_erasing_blocks &&
122 list_empty(&c->erase_pending_list) &&
123 list_empty(&c->erase_complete_list)) {
124 DECLARE_WAITQUEUE(wait, current);
125 set_current_state(TASK_UNINTERRUPTIBLE);
126 add_wait_queue(&c->erase_wait, &wait);
127 D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__));
128 spin_unlock(&c->erase_completion_lock);
129
130 schedule();
131 } else
132 spin_unlock(&c->erase_completion_lock);
133 } else if (ret)
134 return ret;
135
136 cond_resched();
137
138 if (signal_pending(current))
139 return -EINTR;
140
141 mutex_lock(&c->alloc_sem);
142 spin_lock(&c->erase_completion_lock);
143 }
144
145 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
146 if (ret) {
147 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
148 }
149 }
150 spin_unlock(&c->erase_completion_lock);
151 if (!ret)
152 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
153 if (ret)
154 mutex_unlock(&c->alloc_sem);
155 return ret;
156}
157
158int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
159 uint32_t *len, uint32_t sumsize)
160{
161 int ret = -EAGAIN;
162 minsize = PAD(minsize);
163
164 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
165
166 spin_lock(&c->erase_completion_lock);
167 while(ret == -EAGAIN) {
168 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
169 if (ret) {
170 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
171 }
172 }
173 spin_unlock(&c->erase_completion_lock);
174 if (!ret)
175 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
176
177 return ret;
178}
179
180
181
182
183static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
184{
185
186 if (c->nextblock == NULL) {
187 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
188 jeb->offset));
189 return;
190 }
191
192 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
193 c->dirty_size += jeb->wasted_size;
194 c->wasted_size -= jeb->wasted_size;
195 jeb->dirty_size += jeb->wasted_size;
196 jeb->wasted_size = 0;
197 if (VERYDIRTY(c, jeb->dirty_size)) {
198 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
199 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
200 list_add_tail(&jeb->list, &c->very_dirty_list);
201 } else {
202 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
203 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
204 list_add_tail(&jeb->list, &c->dirty_list);
205 }
206 } else {
207 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
208 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
209 list_add_tail(&jeb->list, &c->clean_list);
210 }
211 c->nextblock = NULL;
212
213}
214
215
216
217static int jffs2_find_nextblock(struct jffs2_sb_info *c)
218{
219 struct list_head *next;
220
221
222
223 if (list_empty(&c->free_list)) {
224
225 if (!c->nr_erasing_blocks &&
226 !list_empty(&c->erasable_list)) {
227 struct jffs2_eraseblock *ejeb;
228
229 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
230 list_move_tail(&ejeb->list, &c->erase_pending_list);
231 c->nr_erasing_blocks++;
232 jffs2_garbage_collect_trigger(c);
233 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
234 ejeb->offset));
235 }
236
237 if (!c->nr_erasing_blocks &&
238 !list_empty(&c->erasable_pending_wbuf_list)) {
239 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
240
241 spin_unlock(&c->erase_completion_lock);
242 jffs2_flush_wbuf_pad(c);
243 spin_lock(&c->erase_completion_lock);
244
245 return -EAGAIN;
246 }
247
248 if (!c->nr_erasing_blocks) {
249
250
251 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
252 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
253 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
254 return -ENOSPC;
255 }
256
257 spin_unlock(&c->erase_completion_lock);
258
259 jffs2_erase_pending_blocks(c, 1);
260 spin_lock(&c->erase_completion_lock);
261
262
263
264
265 return -EAGAIN;
266 }
267
268 next = c->free_list.next;
269 list_del(next);
270 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
271 c->nr_free_blocks--;
272
273 jffs2_sum_reset_collected(c->summary);
274
275#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
276
277 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
278 c->wbuf_ofs = 0xffffffff;
279#endif
280
281 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
282
283 return 0;
284}
285
286
287static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
288 uint32_t *len, uint32_t sumsize)
289{
290 struct jffs2_eraseblock *jeb = c->nextblock;
291 uint32_t reserved_size;
292 int ret;
293
294 restart:
295 reserved_size = 0;
296
297 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
298
299
300 if (jeb) {
301 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
302 dbg_summary("minsize=%d , jeb->free=%d ,"
303 "summary->size=%d , sumsize=%d\n",
304 minsize, jeb->free_size,
305 c->summary->sum_size, sumsize);
306 }
307
308
309
310 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
311 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
312
313
314 if (jffs2_sum_is_disabled(c->summary)) {
315 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
316 goto restart;
317 }
318
319
320 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
321 ret = jffs2_sum_write_sumnode(c);
322
323 if (ret)
324 return ret;
325
326 if (jffs2_sum_is_disabled(c->summary)) {
327
328
329
330 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
331 goto restart;
332 }
333
334 jffs2_close_nextblock(c, jeb);
335 jeb = NULL;
336
337 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
338 }
339 } else {
340 if (jeb && minsize > jeb->free_size) {
341 uint32_t waste;
342
343
344
345
346 if (jffs2_wbuf_dirty(c)) {
347 spin_unlock(&c->erase_completion_lock);
348 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
349 jffs2_flush_wbuf_pad(c);
350 spin_lock(&c->erase_completion_lock);
351 jeb = c->nextblock;
352 goto restart;
353 }
354
355 spin_unlock(&c->erase_completion_lock);
356
357 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
358 if (ret)
359 return ret;
360
361
362
363
364 spin_lock(&c->erase_completion_lock);
365
366 waste = jeb->free_size;
367 jffs2_link_node_ref(c, jeb,
368 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
369 waste, NULL);
370
371 jeb->dirty_size -= waste;
372 c->dirty_size -= waste;
373 jeb->wasted_size += waste;
374 c->wasted_size += waste;
375
376 jffs2_close_nextblock(c, jeb);
377 jeb = NULL;
378 }
379 }
380
381 if (!jeb) {
382
383 ret = jffs2_find_nextblock(c);
384 if (ret)
385 return ret;
386
387 jeb = c->nextblock;
388
389 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
390 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
391 goto restart;
392 }
393 }
394
395
396 *len = jeb->free_size - reserved_size;
397
398 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
399 !jeb->first_node->next_in_ino) {
400
401
402
403
404
405
406 spin_unlock(&c->erase_completion_lock);
407 jffs2_mark_node_obsolete(c, jeb->first_node);
408 spin_lock(&c->erase_completion_lock);
409 }
410
411 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
412 *len, jeb->offset + (c->sector_size - jeb->free_size)));
413 return 0;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
429 uint32_t ofs, uint32_t len,
430 struct jffs2_inode_cache *ic)
431{
432 struct jffs2_eraseblock *jeb;
433 struct jffs2_raw_node_ref *new;
434
435 jeb = &c->blocks[ofs / c->sector_size];
436
437 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
438 ofs & ~3, ofs & 3, len));
439#if 1
440
441
442
443 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
444 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
445 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
446 if (c->nextblock)
447 printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
448 else
449 printk(KERN_WARNING "No nextblock");
450 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
451 return ERR_PTR(-EINVAL);
452 }
453#endif
454 spin_lock(&c->erase_completion_lock);
455
456 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
457
458 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
459
460 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
461 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
462 if (jffs2_wbuf_dirty(c)) {
463
464 spin_unlock(&c->erase_completion_lock);
465 jffs2_flush_wbuf_pad(c);
466 spin_lock(&c->erase_completion_lock);
467 }
468
469 list_add_tail(&jeb->list, &c->clean_list);
470 c->nextblock = NULL;
471 }
472 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
473 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
474
475 spin_unlock(&c->erase_completion_lock);
476
477 return new;
478}
479
480
481void jffs2_complete_reservation(struct jffs2_sb_info *c)
482{
483 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
484 spin_lock(&c->erase_completion_lock);
485 jffs2_garbage_collect_trigger(c);
486 spin_unlock(&c->erase_completion_lock);
487 mutex_unlock(&c->alloc_sem);
488}
489
490static inline int on_list(struct list_head *obj, struct list_head *head)
491{
492 struct list_head *this;
493
494 list_for_each(this, head) {
495 if (this == obj) {
496 D1(printk("%p is on list at %p\n", obj, head));
497 return 1;
498
499 }
500 }
501 return 0;
502}
503
504void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
505{
506 struct jffs2_eraseblock *jeb;
507 int blocknr;
508 struct jffs2_unknown_node n;
509 int ret, addedsize;
510 size_t retlen;
511 uint32_t freed_len;
512
513 if(unlikely(!ref)) {
514 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
515 return;
516 }
517 if (ref_obsolete(ref)) {
518 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
519 return;
520 }
521 blocknr = ref->flash_offset / c->sector_size;
522 if (blocknr >= c->nr_blocks) {
523 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
524 BUG();
525 }
526 jeb = &c->blocks[blocknr];
527
528 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
529 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
530
531
532
533
534
535
536 mutex_lock(&c->erase_free_sem);
537 }
538
539 spin_lock(&c->erase_completion_lock);
540
541 freed_len = ref_totlen(c, jeb, ref);
542
543 if (ref_flags(ref) == REF_UNCHECKED) {
544 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
545 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
546 freed_len, blocknr, ref->flash_offset, jeb->used_size);
547 BUG();
548 })
549 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
550 jeb->unchecked_size -= freed_len;
551 c->unchecked_size -= freed_len;
552 } else {
553 D1(if (unlikely(jeb->used_size < freed_len)) {
554 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
555 freed_len, blocknr, ref->flash_offset, jeb->used_size);
556 BUG();
557 })
558 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
559 jeb->used_size -= freed_len;
560 c->used_size -= freed_len;
561 }
562
563
564 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
565 D1(printk("Dirtying\n"));
566 addedsize = freed_len;
567 jeb->dirty_size += freed_len;
568 c->dirty_size += freed_len;
569
570
571 if (jeb->wasted_size) {
572 if (on_list(&jeb->list, &c->bad_used_list)) {
573 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
574 jeb->offset));
575 addedsize = 0;
576 } else {
577 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
578 jeb->wasted_size, jeb->offset));
579 addedsize += jeb->wasted_size;
580 jeb->dirty_size += jeb->wasted_size;
581 c->dirty_size += jeb->wasted_size;
582 c->wasted_size -= jeb->wasted_size;
583 jeb->wasted_size = 0;
584 }
585 }
586 } else {
587 D1(printk("Wasting\n"));
588 addedsize = 0;
589 jeb->wasted_size += freed_len;
590 c->wasted_size += freed_len;
591 }
592 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
593
594 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
595 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
596
597 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
598
599
600
601
602
603 spin_unlock(&c->erase_completion_lock);
604
605 return;
606 }
607
608 if (jeb == c->nextblock) {
609 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
610 } else if (!jeb->used_size && !jeb->unchecked_size) {
611 if (jeb == c->gcblock) {
612 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
613 c->gcblock = NULL;
614 } else {
615 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
616 list_del(&jeb->list);
617 }
618 if (jffs2_wbuf_dirty(c)) {
619 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
620 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
621 } else {
622 if (jiffies & 127) {
623
624
625 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
626 list_add_tail(&jeb->list, &c->erase_pending_list);
627 c->nr_erasing_blocks++;
628 jffs2_garbage_collect_trigger(c);
629 } else {
630
631
632 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
633 list_add_tail(&jeb->list, &c->erasable_list);
634 }
635 }
636 D1(printk(KERN_DEBUG "Done OK\n"));
637 } else if (jeb == c->gcblock) {
638 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
639 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
640 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
641 list_del(&jeb->list);
642 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
643 list_add_tail(&jeb->list, &c->dirty_list);
644 } else if (VERYDIRTY(c, jeb->dirty_size) &&
645 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
646 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
647 list_del(&jeb->list);
648 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
649 list_add_tail(&jeb->list, &c->very_dirty_list);
650 } else {
651 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
652 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
653 }
654
655 spin_unlock(&c->erase_completion_lock);
656
657 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
658 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
659
660 return;
661 }
662
663
664
665
666
667
668 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
669 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
670 if (ret) {
671 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
672 goto out_erase_sem;
673 }
674 if (retlen != sizeof(n)) {
675 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
676 goto out_erase_sem;
677 }
678 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
679 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
680 goto out_erase_sem;
681 }
682 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
683 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
684 goto out_erase_sem;
685 }
686
687 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
688 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
689 if (ret) {
690 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
691 goto out_erase_sem;
692 }
693 if (retlen != sizeof(n)) {
694 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
695 goto out_erase_sem;
696 }
697
698
699
700
701
702
703
704
705
706
707
708 if (ref->next_in_ino) {
709 struct jffs2_inode_cache *ic;
710 struct jffs2_raw_node_ref **p;
711
712 spin_lock(&c->erase_completion_lock);
713
714 ic = jffs2_raw_ref_to_ic(ref);
715 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
716 ;
717
718 *p = ref->next_in_ino;
719 ref->next_in_ino = NULL;
720
721 switch (ic->class) {
722#ifdef CONFIG_JFFS2_FS_XATTR
723 case RAWNODE_CLASS_XATTR_DATUM:
724 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
725 break;
726 case RAWNODE_CLASS_XATTR_REF:
727 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
728 break;
729#endif
730 default:
731 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
732 jffs2_del_ino_cache(c, ic);
733 break;
734 }
735 spin_unlock(&c->erase_completion_lock);
736 }
737
738 out_erase_sem:
739 mutex_unlock(&c->erase_free_sem);
740}
741
742int jffs2_thread_should_wake(struct jffs2_sb_info *c)
743{
744 int ret = 0;
745 uint32_t dirty;
746 int nr_very_dirty = 0;
747 struct jffs2_eraseblock *jeb;
748
749 if (!list_empty(&c->erase_complete_list) ||
750 !list_empty(&c->erase_pending_list))
751 return 1;
752
753 if (c->unchecked_size) {
754 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
755 c->unchecked_size, c->checked_ino));
756 return 1;
757 }
758
759
760
761
762
763
764
765
766
767 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
768
769 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
770 (dirty > c->nospc_dirty_size))
771 ret = 1;
772
773 list_for_each_entry(jeb, &c->very_dirty_list, list) {
774 nr_very_dirty++;
775 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
776 ret = 1;
777
778 D1(continue);
779 break;
780 }
781 }
782
783 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
784 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
785
786 return ret;
787}
788