1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/types.h>
26#include <linux/buffer_head.h>
27#include <linux/fs.h>
28#include <linux/bitops.h>
29#include <linux/slab.h>
30#include "mdt.h"
31#include "alloc.h"
32
33
34
35
36
37
38
39static inline unsigned long
40nilfs_palloc_groups_per_desc_block(const struct inode *inode)
41{
42 return (1UL << inode->i_blkbits) /
43 sizeof(struct nilfs_palloc_group_desc);
44}
45
46
47
48
49
50static inline unsigned long
51nilfs_palloc_groups_count(const struct inode *inode)
52{
53 return 1UL << (BITS_PER_LONG - (inode->i_blkbits + 3 ));
54}
55
56
57
58
59
60
61int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
62{
63 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
64
65 mi->mi_bgl = kmalloc(sizeof(*mi->mi_bgl), GFP_NOFS);
66 if (!mi->mi_bgl)
67 return -ENOMEM;
68
69 bgl_lock_init(mi->mi_bgl);
70
71 nilfs_mdt_set_entry_size(inode, entry_size, 0);
72
73 mi->mi_blocks_per_group =
74 DIV_ROUND_UP(nilfs_palloc_entries_per_group(inode),
75 mi->mi_entries_per_block) + 1;
76
77
78 mi->mi_blocks_per_desc_block =
79 nilfs_palloc_groups_per_desc_block(inode) *
80 mi->mi_blocks_per_group + 1;
81
82
83 return 0;
84}
85
86
87
88
89
90
91
92static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
93 unsigned long *offset)
94{
95 __u64 group = nr;
96
97 *offset = do_div(group, nilfs_palloc_entries_per_group(inode));
98 return group;
99}
100
101
102
103
104
105
106
107
108
109static unsigned long
110nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
111{
112 unsigned long desc_block =
113 group / nilfs_palloc_groups_per_desc_block(inode);
114 return desc_block * NILFS_MDT(inode)->mi_blocks_per_desc_block;
115}
116
117
118
119
120
121
122
123
124
125static unsigned long
126nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
127{
128 unsigned long desc_offset =
129 group % nilfs_palloc_groups_per_desc_block(inode);
130 return nilfs_palloc_desc_blkoff(inode, group) + 1 +
131 desc_offset * NILFS_MDT(inode)->mi_blocks_per_group;
132}
133
134
135
136
137
138
139
140static unsigned long
141nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group,
142 const struct nilfs_palloc_group_desc *desc)
143{
144 unsigned long nfree;
145
146 spin_lock(nilfs_mdt_bgl_lock(inode, group));
147 nfree = le32_to_cpu(desc->pg_nfrees);
148 spin_unlock(nilfs_mdt_bgl_lock(inode, group));
149 return nfree;
150}
151
152
153
154
155
156
157
158
159static void
160nilfs_palloc_group_desc_add_entries(struct inode *inode,
161 unsigned long group,
162 struct nilfs_palloc_group_desc *desc,
163 u32 n)
164{
165 spin_lock(nilfs_mdt_bgl_lock(inode, group));
166 le32_add_cpu(&desc->pg_nfrees, n);
167 spin_unlock(nilfs_mdt_bgl_lock(inode, group));
168}
169
170
171
172
173
174
175static unsigned long
176nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
177{
178 unsigned long group, group_offset;
179
180 group = nilfs_palloc_group(inode, nr, &group_offset);
181
182 return nilfs_palloc_bitmap_blkoff(inode, group) + 1 +
183 group_offset / NILFS_MDT(inode)->mi_entries_per_block;
184}
185
186
187
188
189
190
191
192static void nilfs_palloc_desc_block_init(struct inode *inode,
193 struct buffer_head *bh, void *kaddr)
194{
195 struct nilfs_palloc_group_desc *desc = kaddr + bh_offset(bh);
196 unsigned long n = nilfs_palloc_groups_per_desc_block(inode);
197 __le32 nfrees;
198
199 nfrees = cpu_to_le32(nilfs_palloc_entries_per_group(inode));
200 while (n-- > 0) {
201 desc->pg_nfrees = nfrees;
202 desc++;
203 }
204}
205
206static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
207 int create,
208 void (*init_block)(struct inode *,
209 struct buffer_head *,
210 void *),
211 struct buffer_head **bhp,
212 struct nilfs_bh_assoc *prev,
213 spinlock_t *lock)
214{
215 int ret;
216
217 spin_lock(lock);
218 if (prev->bh && blkoff == prev->blkoff) {
219 get_bh(prev->bh);
220 *bhp = prev->bh;
221 spin_unlock(lock);
222 return 0;
223 }
224 spin_unlock(lock);
225
226 ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
227 if (!ret) {
228 spin_lock(lock);
229
230
231
232
233 brelse(prev->bh);
234 get_bh(*bhp);
235 prev->bh = *bhp;
236 prev->blkoff = blkoff;
237 spin_unlock(lock);
238 }
239 return ret;
240}
241
242
243
244
245
246
247
248
249static int nilfs_palloc_get_desc_block(struct inode *inode,
250 unsigned long group,
251 int create, struct buffer_head **bhp)
252{
253 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
254
255 return nilfs_palloc_get_block(inode,
256 nilfs_palloc_desc_blkoff(inode, group),
257 create, nilfs_palloc_desc_block_init,
258 bhp, &cache->prev_desc, &cache->lock);
259}
260
261
262
263
264
265
266
267
268static int nilfs_palloc_get_bitmap_block(struct inode *inode,
269 unsigned long group,
270 int create, struct buffer_head **bhp)
271{
272 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
273
274 return nilfs_palloc_get_block(inode,
275 nilfs_palloc_bitmap_blkoff(inode, group),
276 create, NULL, bhp,
277 &cache->prev_bitmap, &cache->lock);
278}
279
280
281
282
283
284
285
286
287int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
288 int create, struct buffer_head **bhp)
289{
290 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
291
292 return nilfs_palloc_get_block(inode,
293 nilfs_palloc_entry_blkoff(inode, nr),
294 create, NULL, bhp,
295 &cache->prev_entry, &cache->lock);
296}
297
298
299
300
301
302
303
304
305static struct nilfs_palloc_group_desc *
306nilfs_palloc_block_get_group_desc(const struct inode *inode,
307 unsigned long group,
308 const struct buffer_head *bh, void *kaddr)
309{
310 return (struct nilfs_palloc_group_desc *)(kaddr + bh_offset(bh)) +
311 group % nilfs_palloc_groups_per_desc_block(inode);
312}
313
314
315
316
317
318
319
320
321void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
322 const struct buffer_head *bh, void *kaddr)
323{
324 unsigned long entry_offset, group_offset;
325
326 nilfs_palloc_group(inode, nr, &group_offset);
327 entry_offset = group_offset % NILFS_MDT(inode)->mi_entries_per_block;
328
329 return kaddr + bh_offset(bh) +
330 entry_offset * NILFS_MDT(inode)->mi_entry_size;
331}
332
333
334
335
336
337
338
339
340
341static int nilfs_palloc_find_available_slot(struct inode *inode,
342 unsigned long group,
343 unsigned long target,
344 unsigned char *bitmap,
345 int bsize)
346{
347 int curr, pos, end, i;
348
349 if (target > 0) {
350 end = (target + BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1);
351 if (end > bsize)
352 end = bsize;
353 pos = nilfs_find_next_zero_bit(bitmap, end, target);
354 if (pos < end &&
355 !nilfs_set_bit_atomic(
356 nilfs_mdt_bgl_lock(inode, group), pos, bitmap))
357 return pos;
358 } else
359 end = 0;
360
361 for (i = 0, curr = end;
362 i < bsize;
363 i += BITS_PER_LONG, curr += BITS_PER_LONG) {
364
365 if (curr >= bsize)
366 curr = 0;
367 while (*((unsigned long *)bitmap + curr / BITS_PER_LONG)
368 != ~0UL) {
369 end = curr + BITS_PER_LONG;
370 if (end > bsize)
371 end = bsize;
372 pos = nilfs_find_next_zero_bit(bitmap, end, curr);
373 if ((pos < end) &&
374 !nilfs_set_bit_atomic(
375 nilfs_mdt_bgl_lock(inode, group), pos,
376 bitmap))
377 return pos;
378 }
379 }
380 return -ENOSPC;
381}
382
383
384
385
386
387
388
389
390static unsigned long
391nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
392 unsigned long curr, unsigned long max)
393{
394 return min_t(unsigned long,
395 nilfs_palloc_groups_per_desc_block(inode) -
396 curr % nilfs_palloc_groups_per_desc_block(inode),
397 max - curr + 1);
398}
399
400
401
402
403
404
405static int nilfs_palloc_count_desc_blocks(struct inode *inode,
406 unsigned long *desc_blocks)
407{
408 unsigned long blknum;
409 int ret;
410
411 ret = nilfs_bmap_last_key(NILFS_I(inode)->i_bmap, &blknum);
412 if (likely(!ret))
413 *desc_blocks = DIV_ROUND_UP(
414 blknum, NILFS_MDT(inode)->mi_blocks_per_desc_block);
415 return ret;
416}
417
418
419
420
421
422
423
424static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
425 unsigned long desc_blocks)
426{
427 return (nilfs_palloc_groups_per_desc_block(inode) * desc_blocks) <
428 nilfs_palloc_groups_count(inode);
429}
430
431
432
433
434
435
436
437
438int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
439{
440 unsigned long desc_blocks = 0;
441 u64 entries_per_desc_block, nmax;
442 int err;
443
444 err = nilfs_palloc_count_desc_blocks(inode, &desc_blocks);
445 if (unlikely(err))
446 return err;
447
448 entries_per_desc_block = (u64)nilfs_palloc_entries_per_group(inode) *
449 nilfs_palloc_groups_per_desc_block(inode);
450 nmax = entries_per_desc_block * desc_blocks;
451
452 if (nused == nmax &&
453 nilfs_palloc_mdt_file_can_grow(inode, desc_blocks))
454 nmax += entries_per_desc_block;
455
456 if (nused > nmax)
457 return -ERANGE;
458
459 *nmaxp = nmax;
460 return 0;
461}
462
463
464
465
466
467
468int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
469 struct nilfs_palloc_req *req)
470{
471 struct buffer_head *desc_bh, *bitmap_bh;
472 struct nilfs_palloc_group_desc *desc;
473 unsigned char *bitmap;
474 void *desc_kaddr, *bitmap_kaddr;
475 unsigned long group, maxgroup, ngroups;
476 unsigned long group_offset, maxgroup_offset;
477 unsigned long n, entries_per_group, groups_per_desc_block;
478 unsigned long i, j;
479 int pos, ret;
480
481 ngroups = nilfs_palloc_groups_count(inode);
482 maxgroup = ngroups - 1;
483 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
484 entries_per_group = nilfs_palloc_entries_per_group(inode);
485 groups_per_desc_block = nilfs_palloc_groups_per_desc_block(inode);
486
487 for (i = 0; i < ngroups; i += n) {
488 if (group >= ngroups) {
489
490 group = 0;
491 maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
492 &maxgroup_offset) - 1;
493 }
494 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
495 if (ret < 0)
496 return ret;
497 desc_kaddr = kmap(desc_bh->b_page);
498 desc = nilfs_palloc_block_get_group_desc(
499 inode, group, desc_bh, desc_kaddr);
500 n = nilfs_palloc_rest_groups_in_desc_block(inode, group,
501 maxgroup);
502 for (j = 0; j < n; j++, desc++, group++) {
503 if (nilfs_palloc_group_desc_nfrees(inode, group, desc)
504 > 0) {
505 ret = nilfs_palloc_get_bitmap_block(
506 inode, group, 1, &bitmap_bh);
507 if (ret < 0)
508 goto out_desc;
509 bitmap_kaddr = kmap(bitmap_bh->b_page);
510 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
511 pos = nilfs_palloc_find_available_slot(
512 inode, group, group_offset, bitmap,
513 entries_per_group);
514 if (pos >= 0) {
515
516 nilfs_palloc_group_desc_add_entries(
517 inode, group, desc, -1);
518 req->pr_entry_nr =
519 entries_per_group * group + pos;
520 kunmap(desc_bh->b_page);
521 kunmap(bitmap_bh->b_page);
522
523 req->pr_desc_bh = desc_bh;
524 req->pr_bitmap_bh = bitmap_bh;
525 return 0;
526 }
527 kunmap(bitmap_bh->b_page);
528 brelse(bitmap_bh);
529 }
530
531 group_offset = 0;
532 }
533
534 kunmap(desc_bh->b_page);
535 brelse(desc_bh);
536 }
537
538
539 return -ENOSPC;
540
541 out_desc:
542 kunmap(desc_bh->b_page);
543 brelse(desc_bh);
544 return ret;
545}
546
547
548
549
550
551
552void nilfs_palloc_commit_alloc_entry(struct inode *inode,
553 struct nilfs_palloc_req *req)
554{
555 mark_buffer_dirty(req->pr_bitmap_bh);
556 mark_buffer_dirty(req->pr_desc_bh);
557 nilfs_mdt_mark_dirty(inode);
558
559 brelse(req->pr_bitmap_bh);
560 brelse(req->pr_desc_bh);
561}
562
563
564
565
566
567
568void nilfs_palloc_commit_free_entry(struct inode *inode,
569 struct nilfs_palloc_req *req)
570{
571 struct nilfs_palloc_group_desc *desc;
572 unsigned long group, group_offset;
573 unsigned char *bitmap;
574 void *desc_kaddr, *bitmap_kaddr;
575
576 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
577 desc_kaddr = kmap(req->pr_desc_bh->b_page);
578 desc = nilfs_palloc_block_get_group_desc(inode, group,
579 req->pr_desc_bh, desc_kaddr);
580 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
581 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
582
583 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
584 group_offset, bitmap))
585 printk(KERN_WARNING "%s: entry number %llu already freed\n",
586 __func__, (unsigned long long)req->pr_entry_nr);
587 else
588 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
589
590 kunmap(req->pr_bitmap_bh->b_page);
591 kunmap(req->pr_desc_bh->b_page);
592
593 mark_buffer_dirty(req->pr_desc_bh);
594 mark_buffer_dirty(req->pr_bitmap_bh);
595 nilfs_mdt_mark_dirty(inode);
596
597 brelse(req->pr_bitmap_bh);
598 brelse(req->pr_desc_bh);
599}
600
601
602
603
604
605
606void nilfs_palloc_abort_alloc_entry(struct inode *inode,
607 struct nilfs_palloc_req *req)
608{
609 struct nilfs_palloc_group_desc *desc;
610 void *desc_kaddr, *bitmap_kaddr;
611 unsigned char *bitmap;
612 unsigned long group, group_offset;
613
614 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
615 desc_kaddr = kmap(req->pr_desc_bh->b_page);
616 desc = nilfs_palloc_block_get_group_desc(inode, group,
617 req->pr_desc_bh, desc_kaddr);
618 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
619 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
620 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
621 group_offset, bitmap))
622 printk(KERN_WARNING "%s: entry number %llu already freed\n",
623 __func__, (unsigned long long)req->pr_entry_nr);
624 else
625 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
626
627 kunmap(req->pr_bitmap_bh->b_page);
628 kunmap(req->pr_desc_bh->b_page);
629
630 brelse(req->pr_bitmap_bh);
631 brelse(req->pr_desc_bh);
632
633 req->pr_entry_nr = 0;
634 req->pr_bitmap_bh = NULL;
635 req->pr_desc_bh = NULL;
636}
637
638
639
640
641
642
643int nilfs_palloc_prepare_free_entry(struct inode *inode,
644 struct nilfs_palloc_req *req)
645{
646 struct buffer_head *desc_bh, *bitmap_bh;
647 unsigned long group, group_offset;
648 int ret;
649
650 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
651 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
652 if (ret < 0)
653 return ret;
654 ret = nilfs_palloc_get_bitmap_block(inode, group, 1, &bitmap_bh);
655 if (ret < 0) {
656 brelse(desc_bh);
657 return ret;
658 }
659
660 req->pr_desc_bh = desc_bh;
661 req->pr_bitmap_bh = bitmap_bh;
662 return 0;
663}
664
665
666
667
668
669
670void nilfs_palloc_abort_free_entry(struct inode *inode,
671 struct nilfs_palloc_req *req)
672{
673 brelse(req->pr_bitmap_bh);
674 brelse(req->pr_desc_bh);
675
676 req->pr_entry_nr = 0;
677 req->pr_bitmap_bh = NULL;
678 req->pr_desc_bh = NULL;
679}
680
681
682
683
684
685
686
687static int
688nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr)
689{
690 __u64 first, last;
691
692 first = group * nilfs_palloc_entries_per_group(inode);
693 last = first + nilfs_palloc_entries_per_group(inode) - 1;
694 return (nr >= first) && (nr <= last);
695}
696
697
698
699
700
701
702
703int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
704{
705 struct buffer_head *desc_bh, *bitmap_bh;
706 struct nilfs_palloc_group_desc *desc;
707 unsigned char *bitmap;
708 void *desc_kaddr, *bitmap_kaddr;
709 unsigned long group, group_offset;
710 int i, j, n, ret;
711
712 for (i = 0; i < nitems; i = j) {
713 group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset);
714 ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh);
715 if (ret < 0)
716 return ret;
717 ret = nilfs_palloc_get_bitmap_block(inode, group, 0,
718 &bitmap_bh);
719 if (ret < 0) {
720 brelse(desc_bh);
721 return ret;
722 }
723 desc_kaddr = kmap(desc_bh->b_page);
724 desc = nilfs_palloc_block_get_group_desc(
725 inode, group, desc_bh, desc_kaddr);
726 bitmap_kaddr = kmap(bitmap_bh->b_page);
727 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
728 for (j = i, n = 0;
729 (j < nitems) && nilfs_palloc_group_is_in(inode, group,
730 entry_nrs[j]);
731 j++) {
732 nilfs_palloc_group(inode, entry_nrs[j], &group_offset);
733 if (!nilfs_clear_bit_atomic(
734 nilfs_mdt_bgl_lock(inode, group),
735 group_offset, bitmap)) {
736 printk(KERN_WARNING
737 "%s: entry number %llu already freed\n",
738 __func__,
739 (unsigned long long)entry_nrs[j]);
740 } else {
741 n++;
742 }
743 }
744 nilfs_palloc_group_desc_add_entries(inode, group, desc, n);
745
746 kunmap(bitmap_bh->b_page);
747 kunmap(desc_bh->b_page);
748
749 mark_buffer_dirty(desc_bh);
750 mark_buffer_dirty(bitmap_bh);
751 nilfs_mdt_mark_dirty(inode);
752
753 brelse(bitmap_bh);
754 brelse(desc_bh);
755 }
756 return 0;
757}
758
759void nilfs_palloc_setup_cache(struct inode *inode,
760 struct nilfs_palloc_cache *cache)
761{
762 NILFS_MDT(inode)->mi_palloc_cache = cache;
763 spin_lock_init(&cache->lock);
764}
765
766void nilfs_palloc_clear_cache(struct inode *inode)
767{
768 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
769
770 spin_lock(&cache->lock);
771 brelse(cache->prev_desc.bh);
772 brelse(cache->prev_bitmap.bh);
773 brelse(cache->prev_entry.bh);
774 cache->prev_desc.bh = NULL;
775 cache->prev_bitmap.bh = NULL;
776 cache->prev_entry.bh = NULL;
777 spin_unlock(&cache->lock);
778}
779
780void nilfs_palloc_destroy_cache(struct inode *inode)
781{
782 nilfs_palloc_clear_cache(inode);
783 NILFS_MDT(inode)->mi_palloc_cache = NULL;
784}
785