1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/fs.h>
24#include <linux/string.h>
25#include <linux/errno.h>
26#include "nilfs.h"
27#include "bmap.h"
28#include "btree.h"
29#include "direct.h"
30#include "btnode.h"
31#include "mdt.h"
32#include "dat.h"
33#include "alloc.h"
34
35struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap)
36{
37 struct the_nilfs *nilfs = bmap->b_inode->i_sb->s_fs_info;
38
39 return nilfs->ns_dat;
40}
41
42static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
43 const char *fname, int err)
44{
45 struct inode *inode = bmap->b_inode;
46
47 if (err == -EINVAL) {
48 nilfs_error(inode->i_sb, fname,
49 "broken bmap (inode number=%lu)\n", inode->i_ino);
50 err = -EIO;
51 }
52 return err;
53}
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
76 __u64 *ptrp)
77{
78 sector_t blocknr;
79 int ret;
80
81 down_read(&bmap->b_sem);
82 ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
83 if (ret < 0) {
84 ret = nilfs_bmap_convert_error(bmap, __func__, ret);
85 goto out;
86 }
87 if (NILFS_BMAP_USE_VBN(bmap)) {
88 ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
89 &blocknr);
90 if (!ret)
91 *ptrp = blocknr;
92 }
93
94 out:
95 up_read(&bmap->b_sem);
96 return ret;
97}
98
99int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
100 unsigned maxblocks)
101{
102 int ret;
103
104 down_read(&bmap->b_sem);
105 ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks);
106 up_read(&bmap->b_sem);
107
108 return nilfs_bmap_convert_error(bmap, __func__, ret);
109}
110
111static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
112{
113 __u64 keys[NILFS_BMAP_SMALL_HIGH + 1];
114 __u64 ptrs[NILFS_BMAP_SMALL_HIGH + 1];
115 int ret, n;
116
117 if (bmap->b_ops->bop_check_insert != NULL) {
118 ret = bmap->b_ops->bop_check_insert(bmap, key);
119 if (ret > 0) {
120 n = bmap->b_ops->bop_gather_data(
121 bmap, keys, ptrs, NILFS_BMAP_SMALL_HIGH + 1);
122 if (n < 0)
123 return n;
124 ret = nilfs_btree_convert_and_insert(
125 bmap, key, ptr, keys, ptrs, n);
126 if (ret == 0)
127 bmap->b_u.u_flags |= NILFS_BMAP_LARGE;
128
129 return ret;
130 } else if (ret < 0)
131 return ret;
132 }
133
134 return bmap->b_ops->bop_insert(bmap, key, ptr);
135}
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec)
156{
157 int ret;
158
159 down_write(&bmap->b_sem);
160 ret = nilfs_bmap_do_insert(bmap, key, rec);
161 up_write(&bmap->b_sem);
162
163 return nilfs_bmap_convert_error(bmap, __func__, ret);
164}
165
166static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key)
167{
168 __u64 keys[NILFS_BMAP_LARGE_LOW + 1];
169 __u64 ptrs[NILFS_BMAP_LARGE_LOW + 1];
170 int ret, n;
171
172 if (bmap->b_ops->bop_check_delete != NULL) {
173 ret = bmap->b_ops->bop_check_delete(bmap, key);
174 if (ret > 0) {
175 n = bmap->b_ops->bop_gather_data(
176 bmap, keys, ptrs, NILFS_BMAP_LARGE_LOW + 1);
177 if (n < 0)
178 return n;
179 ret = nilfs_direct_delete_and_convert(
180 bmap, key, keys, ptrs, n);
181 if (ret == 0)
182 bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE;
183
184 return ret;
185 } else if (ret < 0)
186 return ret;
187 }
188
189 return bmap->b_ops->bop_delete(bmap, key);
190}
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp)
211{
212 int ret;
213
214 down_read(&bmap->b_sem);
215 ret = bmap->b_ops->bop_seek_key(bmap, start, keyp);
216 up_read(&bmap->b_sem);
217
218 if (ret < 0)
219 ret = nilfs_bmap_convert_error(bmap, __func__, ret);
220 return ret;
221}
222
223int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp)
224{
225 int ret;
226
227 down_read(&bmap->b_sem);
228 ret = bmap->b_ops->bop_last_key(bmap, keyp);
229 up_read(&bmap->b_sem);
230
231 if (ret < 0)
232 ret = nilfs_bmap_convert_error(bmap, __func__, ret);
233 return ret;
234}
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key)
254{
255 int ret;
256
257 down_write(&bmap->b_sem);
258 ret = nilfs_bmap_do_delete(bmap, key);
259 up_write(&bmap->b_sem);
260
261 return nilfs_bmap_convert_error(bmap, __func__, ret);
262}
263
264static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key)
265{
266 __u64 lastkey;
267 int ret;
268
269 ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
270 if (ret < 0) {
271 if (ret == -ENOENT)
272 ret = 0;
273 return ret;
274 }
275
276 while (key <= lastkey) {
277 ret = nilfs_bmap_do_delete(bmap, lastkey);
278 if (ret < 0)
279 return ret;
280 ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
281 if (ret < 0) {
282 if (ret == -ENOENT)
283 ret = 0;
284 return ret;
285 }
286 }
287 return 0;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key)
306{
307 int ret;
308
309 down_write(&bmap->b_sem);
310 ret = nilfs_bmap_do_truncate(bmap, key);
311 up_write(&bmap->b_sem);
312
313 return nilfs_bmap_convert_error(bmap, __func__, ret);
314}
315
316
317
318
319
320
321
322void nilfs_bmap_clear(struct nilfs_bmap *bmap)
323{
324 down_write(&bmap->b_sem);
325 if (bmap->b_ops->bop_clear != NULL)
326 bmap->b_ops->bop_clear(bmap);
327 up_write(&bmap->b_sem);
328}
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh)
346{
347 int ret;
348
349 down_write(&bmap->b_sem);
350 ret = bmap->b_ops->bop_propagate(bmap, bh);
351 up_write(&bmap->b_sem);
352
353 return nilfs_bmap_convert_error(bmap, __func__, ret);
354}
355
356
357
358
359
360
361void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap,
362 struct list_head *listp)
363{
364 if (bmap->b_ops->bop_lookup_dirty_buffers != NULL)
365 bmap->b_ops->bop_lookup_dirty_buffers(bmap, listp);
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387int nilfs_bmap_assign(struct nilfs_bmap *bmap,
388 struct buffer_head **bh,
389 unsigned long blocknr,
390 union nilfs_binfo *binfo)
391{
392 int ret;
393
394 down_write(&bmap->b_sem);
395 ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo);
396 up_write(&bmap->b_sem);
397
398 return nilfs_bmap_convert_error(bmap, __func__, ret);
399}
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
418{
419 int ret;
420
421 if (bmap->b_ops->bop_mark == NULL)
422 return 0;
423
424 down_write(&bmap->b_sem);
425 ret = bmap->b_ops->bop_mark(bmap, key, level);
426 up_write(&bmap->b_sem);
427
428 return nilfs_bmap_convert_error(bmap, __func__, ret);
429}
430
431
432
433
434
435
436
437
438
439
440int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
441{
442 int ret;
443
444 down_write(&bmap->b_sem);
445 ret = nilfs_bmap_dirty(bmap);
446 nilfs_bmap_clear_dirty(bmap);
447 up_write(&bmap->b_sem);
448 return ret;
449}
450
451
452
453
454
455__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
456 const struct buffer_head *bh)
457{
458 struct buffer_head *pbh;
459 __u64 key;
460
461 key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT -
462 bmap->b_inode->i_blkbits);
463 for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
464 key++;
465
466 return key;
467}
468
469__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key)
470{
471 __s64 diff;
472
473 diff = key - bmap->b_last_allocated_key;
474 if ((nilfs_bmap_keydiff_abs(diff) < NILFS_INODE_BMAP_SIZE) &&
475 (bmap->b_last_allocated_ptr != NILFS_BMAP_INVALID_PTR) &&
476 (bmap->b_last_allocated_ptr + diff > 0))
477 return bmap->b_last_allocated_ptr + diff;
478 else
479 return NILFS_BMAP_INVALID_PTR;
480}
481
482#define NILFS_BMAP_GROUP_DIV 8
483__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap)
484{
485 struct inode *dat = nilfs_bmap_get_dat(bmap);
486 unsigned long entries_per_group = nilfs_palloc_entries_per_group(dat);
487 unsigned long group = bmap->b_inode->i_ino / entries_per_group;
488
489 return group * entries_per_group +
490 (bmap->b_inode->i_ino % NILFS_BMAP_GROUP_DIV) *
491 (entries_per_group / NILFS_BMAP_GROUP_DIV);
492}
493
494static struct lock_class_key nilfs_bmap_dat_lock_key;
495static struct lock_class_key nilfs_bmap_mdt_lock_key;
496
497
498
499
500
501
502
503
504
505
506
507
508
509int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
510{
511 if (raw_inode == NULL)
512 memset(bmap->b_u.u_data, 0, NILFS_BMAP_SIZE);
513 else
514 memcpy(bmap->b_u.u_data, raw_inode->i_bmap, NILFS_BMAP_SIZE);
515
516 init_rwsem(&bmap->b_sem);
517 bmap->b_state = 0;
518 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
519 switch (bmap->b_inode->i_ino) {
520 case NILFS_DAT_INO:
521 bmap->b_ptr_type = NILFS_BMAP_PTR_P;
522 bmap->b_last_allocated_key = 0;
523 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT;
524 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
525 break;
526 case NILFS_CPFILE_INO:
527 case NILFS_SUFILE_INO:
528 bmap->b_ptr_type = NILFS_BMAP_PTR_VS;
529 bmap->b_last_allocated_key = 0;
530 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
531 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
532 break;
533 case NILFS_IFILE_INO:
534 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
535
536 default:
537 bmap->b_ptr_type = NILFS_BMAP_PTR_VM;
538 bmap->b_last_allocated_key = 0;
539 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
540 break;
541 }
542
543 return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ?
544 nilfs_btree_init(bmap) : nilfs_direct_init(bmap);
545}
546
547
548
549
550
551
552
553
554void nilfs_bmap_write(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
555{
556 down_write(&bmap->b_sem);
557 memcpy(raw_inode->i_bmap, bmap->b_u.u_data,
558 NILFS_INODE_BMAP_SIZE * sizeof(__le64));
559 if (bmap->b_inode->i_ino == NILFS_DAT_INO)
560 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT;
561
562 up_write(&bmap->b_sem);
563}
564
565void nilfs_bmap_init_gc(struct nilfs_bmap *bmap)
566{
567 memset(&bmap->b_u, 0, NILFS_BMAP_SIZE);
568 init_rwsem(&bmap->b_sem);
569 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
570 bmap->b_ptr_type = NILFS_BMAP_PTR_U;
571 bmap->b_last_allocated_key = 0;
572 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
573 bmap->b_state = 0;
574 nilfs_btree_init_gc(bmap);
575}
576
577void nilfs_bmap_save(const struct nilfs_bmap *bmap,
578 struct nilfs_bmap_store *store)
579{
580 memcpy(store->data, bmap->b_u.u_data, sizeof(store->data));
581 store->last_allocated_key = bmap->b_last_allocated_key;
582 store->last_allocated_ptr = bmap->b_last_allocated_ptr;
583 store->state = bmap->b_state;
584}
585
586void nilfs_bmap_restore(struct nilfs_bmap *bmap,
587 const struct nilfs_bmap_store *store)
588{
589 memcpy(bmap->b_u.u_data, store->data, sizeof(store->data));
590 bmap->b_last_allocated_key = store->last_allocated_key;
591 bmap->b_last_allocated_ptr = store->last_allocated_ptr;
592 bmap->b_state = store->state;
593}
594