1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/mtd/mtd.h>
18#include <linux/compiler.h>
19#include <linux/crc32.h>
20#include <linux/sched.h>
21#include <linux/pagemap.h>
22#include "nodelist.h"
23
24struct erase_priv_struct {
25 struct jffs2_eraseblock *jeb;
26 struct jffs2_sb_info *c;
27};
28
29#ifndef __ECOS
30static void jffs2_erase_callback(struct erase_info *);
31#endif
32static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
33static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
34static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
35
36static void jffs2_erase_block(struct jffs2_sb_info *c,
37 struct jffs2_eraseblock *jeb)
38{
39 int ret;
40 uint32_t bad_offset;
41#ifdef __ECOS
42 ret = jffs2_flash_erase(c, jeb);
43 if (!ret) {
44 jffs2_erase_succeeded(c, jeb);
45 return;
46 }
47 bad_offset = jeb->offset;
48#else
49 struct erase_info *instr;
50
51 jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
52 __func__,
53 jeb->offset, jeb->offset, jeb->offset + c->sector_size);
54 instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
55 if (!instr) {
56 pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
57 mutex_lock(&c->erase_free_sem);
58 spin_lock(&c->erase_completion_lock);
59 list_move(&jeb->list, &c->erase_pending_list);
60 c->erasing_size -= c->sector_size;
61 c->dirty_size += c->sector_size;
62 jeb->dirty_size = c->sector_size;
63 spin_unlock(&c->erase_completion_lock);
64 mutex_unlock(&c->erase_free_sem);
65 return;
66 }
67
68 memset(instr, 0, sizeof(*instr));
69
70 instr->mtd = c->mtd;
71 instr->addr = jeb->offset;
72 instr->len = c->sector_size;
73 instr->callback = jffs2_erase_callback;
74 instr->priv = (unsigned long)(&instr[1]);
75
76 ((struct erase_priv_struct *)instr->priv)->jeb = jeb;
77 ((struct erase_priv_struct *)instr->priv)->c = c;
78
79 ret = mtd_erase(c->mtd, instr);
80 if (!ret)
81 return;
82
83 bad_offset = instr->fail_addr;
84 kfree(instr);
85#endif
86
87 if (ret == -ENOMEM || ret == -EAGAIN) {
88
89 jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n",
90 jeb->offset, ret);
91 mutex_lock(&c->erase_free_sem);
92 spin_lock(&c->erase_completion_lock);
93 list_move(&jeb->list, &c->erase_pending_list);
94 c->erasing_size -= c->sector_size;
95 c->dirty_size += c->sector_size;
96 jeb->dirty_size = c->sector_size;
97 spin_unlock(&c->erase_completion_lock);
98 mutex_unlock(&c->erase_free_sem);
99 return;
100 }
101
102 if (ret == -EROFS)
103 pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n",
104 jeb->offset);
105 else
106 pr_warn("Erase at 0x%08x failed immediately: errno %d\n",
107 jeb->offset, ret);
108
109 jffs2_erase_failed(c, jeb, bad_offset);
110}
111
112int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
113{
114 struct jffs2_eraseblock *jeb;
115 int work_done = 0;
116
117 mutex_lock(&c->erase_free_sem);
118
119 spin_lock(&c->erase_completion_lock);
120
121 while (!list_empty(&c->erase_complete_list) ||
122 !list_empty(&c->erase_pending_list)) {
123
124 if (!list_empty(&c->erase_complete_list)) {
125 jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
126 list_move(&jeb->list, &c->erase_checking_list);
127 spin_unlock(&c->erase_completion_lock);
128 mutex_unlock(&c->erase_free_sem);
129 jffs2_mark_erased_block(c, jeb);
130
131 work_done++;
132 if (!--count) {
133 jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n");
134 goto done;
135 }
136
137 } else if (!list_empty(&c->erase_pending_list)) {
138 jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
139 jffs2_dbg(1, "Starting erase of pending block 0x%08x\n",
140 jeb->offset);
141 list_del(&jeb->list);
142 c->erasing_size += c->sector_size;
143 c->wasted_size -= jeb->wasted_size;
144 c->free_size -= jeb->free_size;
145 c->used_size -= jeb->used_size;
146 c->dirty_size -= jeb->dirty_size;
147 jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
148 jffs2_free_jeb_node_refs(c, jeb);
149 list_add(&jeb->list, &c->erasing_list);
150 spin_unlock(&c->erase_completion_lock);
151 mutex_unlock(&c->erase_free_sem);
152
153 jffs2_erase_block(c, jeb);
154
155 } else {
156 BUG();
157 }
158
159
160 cond_resched();
161 mutex_lock(&c->erase_free_sem);
162 spin_lock(&c->erase_completion_lock);
163 }
164
165 spin_unlock(&c->erase_completion_lock);
166 mutex_unlock(&c->erase_free_sem);
167 done:
168 jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n");
169 return work_done;
170}
171
172static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
173{
174 jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset);
175 mutex_lock(&c->erase_free_sem);
176 spin_lock(&c->erase_completion_lock);
177 list_move_tail(&jeb->list, &c->erase_complete_list);
178
179 jffs2_garbage_collect_trigger(c);
180 spin_unlock(&c->erase_completion_lock);
181 mutex_unlock(&c->erase_free_sem);
182 wake_up(&c->erase_wait);
183}
184
185static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
186{
187
188
189 if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) {
190
191
192 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
193
194 mutex_lock(&c->erase_free_sem);
195 spin_lock(&c->erase_completion_lock);
196 list_move(&jeb->list, &c->erase_pending_list);
197 c->erasing_size -= c->sector_size;
198 c->dirty_size += c->sector_size;
199 jeb->dirty_size = c->sector_size;
200 spin_unlock(&c->erase_completion_lock);
201 mutex_unlock(&c->erase_free_sem);
202 return;
203 }
204 }
205
206 mutex_lock(&c->erase_free_sem);
207 spin_lock(&c->erase_completion_lock);
208 c->erasing_size -= c->sector_size;
209 c->bad_size += c->sector_size;
210 list_move(&jeb->list, &c->bad_list);
211 c->nr_erasing_blocks--;
212 spin_unlock(&c->erase_completion_lock);
213 mutex_unlock(&c->erase_free_sem);
214 wake_up(&c->erase_wait);
215}
216
217#ifndef __ECOS
218static void jffs2_erase_callback(struct erase_info *instr)
219{
220 struct erase_priv_struct *priv = (void *)instr->priv;
221
222 if(instr->state != MTD_ERASE_DONE) {
223 pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
224 (unsigned long long)instr->addr, instr->state);
225 jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
226 } else {
227 jffs2_erase_succeeded(priv->c, priv->jeb);
228 }
229 kfree(instr);
230}
231#endif
232
233
234
235static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
236 struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
237{
238 struct jffs2_inode_cache *ic = NULL;
239 struct jffs2_raw_node_ref **prev;
240
241 prev = &ref->next_in_ino;
242
243
244 while (1) {
245 if (!(*prev)->next_in_ino) {
246
247
248
249 ic = (struct jffs2_inode_cache *)(*prev);
250 prev = &ic->nodes;
251 continue;
252 }
253
254 if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) {
255
256 struct jffs2_raw_node_ref *this;
257
258 this = *prev;
259 *prev = this->next_in_ino;
260 this->next_in_ino = NULL;
261
262 if (this == ref)
263 break;
264
265 continue;
266 }
267
268 prev = &((*prev)->next_in_ino);
269 }
270
271
272 if (!ic) {
273 JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref"
274 " not found in remove_node_refs()!!\n");
275 return;
276 }
277
278 jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
279 jeb->offset, jeb->offset + c->sector_size, ic->ino);
280
281 D2({
282 int i=0;
283 struct jffs2_raw_node_ref *this;
284 printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n");
285
286 this = ic->nodes;
287
288 printk(KERN_DEBUG);
289 while(this) {
290 pr_cont("0x%08x(%d)->",
291 ref_offset(this), ref_flags(this));
292 if (++i == 5) {
293 printk(KERN_DEBUG);
294 i=0;
295 }
296 this = this->next_in_ino;
297 }
298 pr_cont("\n");
299 });
300
301 switch (ic->class) {
302#ifdef CONFIG_JFFS2_FS_XATTR
303 case RAWNODE_CLASS_XATTR_DATUM:
304 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
305 break;
306 case RAWNODE_CLASS_XATTR_REF:
307 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
308 break;
309#endif
310 default:
311 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
312 jffs2_del_ino_cache(c, ic);
313 }
314}
315
316void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
317{
318 struct jffs2_raw_node_ref *block, *ref;
319 jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n",
320 jeb->offset);
321
322 block = ref = jeb->first_node;
323
324 while (ref) {
325 if (ref->flash_offset == REF_LINK_NODE) {
326 ref = ref->next_in_ino;
327 jffs2_free_refblock(block);
328 block = ref;
329 continue;
330 }
331 if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino)
332 jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
333
334
335 ref++;
336 }
337 jeb->first_node = jeb->last_node = NULL;
338}
339
340static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
341{
342 void *ebuf;
343 uint32_t ofs;
344 size_t retlen;
345 int ret;
346 unsigned long *wordebuf;
347
348 ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
349 &ebuf, NULL);
350 if (ret != -EOPNOTSUPP) {
351 if (ret) {
352 jffs2_dbg(1, "MTD point failed %d\n", ret);
353 goto do_flash_read;
354 }
355 if (retlen < c->sector_size) {
356
357 jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
358 retlen);
359 mtd_unpoint(c->mtd, jeb->offset, retlen);
360 goto do_flash_read;
361 }
362 wordebuf = ebuf-sizeof(*wordebuf);
363 retlen /= sizeof(*wordebuf);
364 do {
365 if (*++wordebuf != ~0)
366 break;
367 } while(--retlen);
368 mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
369 if (retlen) {
370 pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
371 *wordebuf,
372 jeb->offset +
373 c->sector_size-retlen * sizeof(*wordebuf));
374 return -EIO;
375 }
376 return 0;
377 }
378 do_flash_read:
379 ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
380 if (!ebuf) {
381 pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n",
382 jeb->offset);
383 return -EAGAIN;
384 }
385
386 jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset);
387
388 for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
389 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
390 int i;
391
392 *bad_offset = ofs;
393
394 ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
395 if (ret) {
396 pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n",
397 ofs, ret);
398 ret = -EIO;
399 goto fail;
400 }
401 if (retlen != readlen) {
402 pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n",
403 ofs, readlen, retlen);
404 ret = -EIO;
405 goto fail;
406 }
407 for (i=0; i<readlen; i += sizeof(unsigned long)) {
408
409 unsigned long *datum = ebuf + i;
410 if (*datum + 1) {
411 *bad_offset += i;
412 pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
413 *datum, *bad_offset);
414 ret = -EIO;
415 goto fail;
416 }
417 }
418 ofs += readlen;
419 cond_resched();
420 }
421 ret = 0;
422fail:
423 kfree(ebuf);
424 return ret;
425}
426
427static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
428{
429 size_t retlen;
430 int ret;
431 uint32_t uninitialized_var(bad_offset);
432
433 switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
434 case -EAGAIN: goto refile;
435 case -EIO: goto filebad;
436 }
437
438
439 jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset);
440 bad_offset = jeb->offset;
441
442
443 if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) {
444
445 if (jffs2_cleanmarker_oob(c)) {
446 if (jffs2_write_nand_cleanmarker(c, jeb))
447 goto filebad;
448 }
449 } else {
450
451 struct kvec vecs[1];
452 struct jffs2_unknown_node marker = {
453 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
454 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
455 .totlen = cpu_to_je32(c->cleanmarker_size)
456 };
457
458 jffs2_prealloc_raw_node_refs(c, jeb, 1);
459
460 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
461
462 vecs[0].iov_base = (unsigned char *) ▮
463 vecs[0].iov_len = sizeof(marker);
464 ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
465
466 if (ret || retlen != sizeof(marker)) {
467 if (ret)
468 pr_warn("Write clean marker to block at 0x%08x failed: %d\n",
469 jeb->offset, ret);
470 else
471 pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
472 jeb->offset, sizeof(marker), retlen);
473
474 goto filebad;
475 }
476 }
477
478 jeb->free_size = c->sector_size;
479
480 mutex_lock(&c->erase_free_sem);
481 spin_lock(&c->erase_completion_lock);
482
483 c->erasing_size -= c->sector_size;
484 c->free_size += c->sector_size;
485
486
487 if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c))
488 jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
489
490 list_move_tail(&jeb->list, &c->free_list);
491 c->nr_erasing_blocks--;
492 c->nr_free_blocks++;
493
494 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
495 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
496
497 spin_unlock(&c->erase_completion_lock);
498 mutex_unlock(&c->erase_free_sem);
499 wake_up(&c->erase_wait);
500 return;
501
502filebad:
503 jffs2_erase_failed(c, jeb, bad_offset);
504 return;
505
506refile:
507
508 mutex_lock(&c->erase_free_sem);
509 spin_lock(&c->erase_completion_lock);
510 jffs2_garbage_collect_trigger(c);
511 list_move(&jeb->list, &c->erase_complete_list);
512 spin_unlock(&c->erase_completion_lock);
513 mutex_unlock(&c->erase_free_sem);
514 return;
515}
516