1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/fs.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include "nilfs.h"
23#include "bmap.h"
24#include "btree.h"
25#include "direct.h"
26#include "btnode.h"
27#include "mdt.h"
28#include "dat.h"
29#include "alloc.h"
30
31struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap)
32{
33 struct the_nilfs *nilfs = bmap->b_inode->i_sb->s_fs_info;
34
35 return nilfs->ns_dat;
36}
37
38static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
39 const char *fname, int err)
40{
41 struct inode *inode = bmap->b_inode;
42
43 if (err == -EINVAL) {
44 __nilfs_error(inode->i_sb, fname,
45 "broken bmap (inode number=%lu)", inode->i_ino);
46 err = -EIO;
47 }
48 return err;
49}
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
72 __u64 *ptrp)
73{
74 sector_t blocknr;
75 int ret;
76
77 down_read(&bmap->b_sem);
78 ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
79 if (ret < 0) {
80 ret = nilfs_bmap_convert_error(bmap, __func__, ret);
81 goto out;
82 }
83 if (NILFS_BMAP_USE_VBN(bmap)) {
84 ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
85 &blocknr);
86 if (!ret)
87 *ptrp = blocknr;
88 }
89
90 out:
91 up_read(&bmap->b_sem);
92 return ret;
93}
94
95int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
96 unsigned int maxblocks)
97{
98 int ret;
99
100 down_read(&bmap->b_sem);
101 ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks);
102 up_read(&bmap->b_sem);
103
104 return nilfs_bmap_convert_error(bmap, __func__, ret);
105}
106
107static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
108{
109 __u64 keys[NILFS_BMAP_SMALL_HIGH + 1];
110 __u64 ptrs[NILFS_BMAP_SMALL_HIGH + 1];
111 int ret, n;
112
113 if (bmap->b_ops->bop_check_insert != NULL) {
114 ret = bmap->b_ops->bop_check_insert(bmap, key);
115 if (ret > 0) {
116 n = bmap->b_ops->bop_gather_data(
117 bmap, keys, ptrs, NILFS_BMAP_SMALL_HIGH + 1);
118 if (n < 0)
119 return n;
120 ret = nilfs_btree_convert_and_insert(
121 bmap, key, ptr, keys, ptrs, n);
122 if (ret == 0)
123 bmap->b_u.u_flags |= NILFS_BMAP_LARGE;
124
125 return ret;
126 } else if (ret < 0)
127 return ret;
128 }
129
130 return bmap->b_ops->bop_insert(bmap, key, ptr);
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec)
152{
153 int ret;
154
155 down_write(&bmap->b_sem);
156 ret = nilfs_bmap_do_insert(bmap, key, rec);
157 up_write(&bmap->b_sem);
158
159 return nilfs_bmap_convert_error(bmap, __func__, ret);
160}
161
162static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key)
163{
164 __u64 keys[NILFS_BMAP_LARGE_LOW + 1];
165 __u64 ptrs[NILFS_BMAP_LARGE_LOW + 1];
166 int ret, n;
167
168 if (bmap->b_ops->bop_check_delete != NULL) {
169 ret = bmap->b_ops->bop_check_delete(bmap, key);
170 if (ret > 0) {
171 n = bmap->b_ops->bop_gather_data(
172 bmap, keys, ptrs, NILFS_BMAP_LARGE_LOW + 1);
173 if (n < 0)
174 return n;
175 ret = nilfs_direct_delete_and_convert(
176 bmap, key, keys, ptrs, n);
177 if (ret == 0)
178 bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE;
179
180 return ret;
181 } else if (ret < 0)
182 return ret;
183 }
184
185 return bmap->b_ops->bop_delete(bmap, key);
186}
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp)
207{
208 int ret;
209
210 down_read(&bmap->b_sem);
211 ret = bmap->b_ops->bop_seek_key(bmap, start, keyp);
212 up_read(&bmap->b_sem);
213
214 if (ret < 0)
215 ret = nilfs_bmap_convert_error(bmap, __func__, ret);
216 return ret;
217}
218
219int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp)
220{
221 int ret;
222
223 down_read(&bmap->b_sem);
224 ret = bmap->b_ops->bop_last_key(bmap, keyp);
225 up_read(&bmap->b_sem);
226
227 if (ret < 0)
228 ret = nilfs_bmap_convert_error(bmap, __func__, ret);
229 return ret;
230}
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key)
250{
251 int ret;
252
253 down_write(&bmap->b_sem);
254 ret = nilfs_bmap_do_delete(bmap, key);
255 up_write(&bmap->b_sem);
256
257 return nilfs_bmap_convert_error(bmap, __func__, ret);
258}
259
260static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key)
261{
262 __u64 lastkey;
263 int ret;
264
265 ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
266 if (ret < 0) {
267 if (ret == -ENOENT)
268 ret = 0;
269 return ret;
270 }
271
272 while (key <= lastkey) {
273 ret = nilfs_bmap_do_delete(bmap, lastkey);
274 if (ret < 0)
275 return ret;
276 ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
277 if (ret < 0) {
278 if (ret == -ENOENT)
279 ret = 0;
280 return ret;
281 }
282 }
283 return 0;
284}
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key)
302{
303 int ret;
304
305 down_write(&bmap->b_sem);
306 ret = nilfs_bmap_do_truncate(bmap, key);
307 up_write(&bmap->b_sem);
308
309 return nilfs_bmap_convert_error(bmap, __func__, ret);
310}
311
312
313
314
315
316
317
318void nilfs_bmap_clear(struct nilfs_bmap *bmap)
319{
320 down_write(&bmap->b_sem);
321 if (bmap->b_ops->bop_clear != NULL)
322 bmap->b_ops->bop_clear(bmap);
323 up_write(&bmap->b_sem);
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh)
342{
343 int ret;
344
345 down_write(&bmap->b_sem);
346 ret = bmap->b_ops->bop_propagate(bmap, bh);
347 up_write(&bmap->b_sem);
348
349 return nilfs_bmap_convert_error(bmap, __func__, ret);
350}
351
352
353
354
355
356
357void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap,
358 struct list_head *listp)
359{
360 if (bmap->b_ops->bop_lookup_dirty_buffers != NULL)
361 bmap->b_ops->bop_lookup_dirty_buffers(bmap, listp);
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383int nilfs_bmap_assign(struct nilfs_bmap *bmap,
384 struct buffer_head **bh,
385 unsigned long blocknr,
386 union nilfs_binfo *binfo)
387{
388 int ret;
389
390 down_write(&bmap->b_sem);
391 ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo);
392 up_write(&bmap->b_sem);
393
394 return nilfs_bmap_convert_error(bmap, __func__, ret);
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
414{
415 int ret;
416
417 if (bmap->b_ops->bop_mark == NULL)
418 return 0;
419
420 down_write(&bmap->b_sem);
421 ret = bmap->b_ops->bop_mark(bmap, key, level);
422 up_write(&bmap->b_sem);
423
424 return nilfs_bmap_convert_error(bmap, __func__, ret);
425}
426
427
428
429
430
431
432
433
434
435
436int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
437{
438 int ret;
439
440 down_write(&bmap->b_sem);
441 ret = nilfs_bmap_dirty(bmap);
442 nilfs_bmap_clear_dirty(bmap);
443 up_write(&bmap->b_sem);
444 return ret;
445}
446
447
448
449
450
451__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
452 const struct buffer_head *bh)
453{
454 struct buffer_head *pbh;
455 __u64 key;
456
457 key = page_index(bh->b_page) << (PAGE_SHIFT -
458 bmap->b_inode->i_blkbits);
459 for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
460 key++;
461
462 return key;
463}
464
465__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key)
466{
467 __s64 diff;
468
469 diff = key - bmap->b_last_allocated_key;
470 if ((nilfs_bmap_keydiff_abs(diff) < NILFS_INODE_BMAP_SIZE) &&
471 (bmap->b_last_allocated_ptr != NILFS_BMAP_INVALID_PTR) &&
472 (bmap->b_last_allocated_ptr + diff > 0))
473 return bmap->b_last_allocated_ptr + diff;
474 else
475 return NILFS_BMAP_INVALID_PTR;
476}
477
478#define NILFS_BMAP_GROUP_DIV 8
479__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap)
480{
481 struct inode *dat = nilfs_bmap_get_dat(bmap);
482 unsigned long entries_per_group = nilfs_palloc_entries_per_group(dat);
483 unsigned long group = bmap->b_inode->i_ino / entries_per_group;
484
485 return group * entries_per_group +
486 (bmap->b_inode->i_ino % NILFS_BMAP_GROUP_DIV) *
487 (entries_per_group / NILFS_BMAP_GROUP_DIV);
488}
489
490static struct lock_class_key nilfs_bmap_dat_lock_key;
491static struct lock_class_key nilfs_bmap_mdt_lock_key;
492
493
494
495
496
497
498
499
500
501
502
503
504
505int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
506{
507 if (raw_inode == NULL)
508 memset(bmap->b_u.u_data, 0, NILFS_BMAP_SIZE);
509 else
510 memcpy(bmap->b_u.u_data, raw_inode->i_bmap, NILFS_BMAP_SIZE);
511
512 init_rwsem(&bmap->b_sem);
513 bmap->b_state = 0;
514 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
515 switch (bmap->b_inode->i_ino) {
516 case NILFS_DAT_INO:
517 bmap->b_ptr_type = NILFS_BMAP_PTR_P;
518 bmap->b_last_allocated_key = 0;
519 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT;
520 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
521 break;
522 case NILFS_CPFILE_INO:
523 case NILFS_SUFILE_INO:
524 bmap->b_ptr_type = NILFS_BMAP_PTR_VS;
525 bmap->b_last_allocated_key = 0;
526 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
527 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
528 break;
529 case NILFS_IFILE_INO:
530 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
531
532 default:
533 bmap->b_ptr_type = NILFS_BMAP_PTR_VM;
534 bmap->b_last_allocated_key = 0;
535 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
536 break;
537 }
538
539 return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ?
540 nilfs_btree_init(bmap) : nilfs_direct_init(bmap);
541}
542
543
544
545
546
547
548
549
550void nilfs_bmap_write(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
551{
552 down_write(&bmap->b_sem);
553 memcpy(raw_inode->i_bmap, bmap->b_u.u_data,
554 NILFS_INODE_BMAP_SIZE * sizeof(__le64));
555 if (bmap->b_inode->i_ino == NILFS_DAT_INO)
556 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT;
557
558 up_write(&bmap->b_sem);
559}
560
561void nilfs_bmap_init_gc(struct nilfs_bmap *bmap)
562{
563 memset(&bmap->b_u, 0, NILFS_BMAP_SIZE);
564 init_rwsem(&bmap->b_sem);
565 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
566 bmap->b_ptr_type = NILFS_BMAP_PTR_U;
567 bmap->b_last_allocated_key = 0;
568 bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR;
569 bmap->b_state = 0;
570 nilfs_btree_init_gc(bmap);
571}
572
573void nilfs_bmap_save(const struct nilfs_bmap *bmap,
574 struct nilfs_bmap_store *store)
575{
576 memcpy(store->data, bmap->b_u.u_data, sizeof(store->data));
577 store->last_allocated_key = bmap->b_last_allocated_key;
578 store->last_allocated_ptr = bmap->b_last_allocated_ptr;
579 store->state = bmap->b_state;
580}
581
582void nilfs_bmap_restore(struct nilfs_bmap *bmap,
583 const struct nilfs_bmap_store *store)
584{
585 memcpy(bmap->b_u.u_data, store->data, sizeof(store->data));
586 bmap->b_last_allocated_key = store->last_allocated_key;
587 bmap->b_last_allocated_ptr = store->last_allocated_ptr;
588 bmap->b_state = store->state;
589}
590