1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/types.h>
26#include <linux/buffer_head.h>
27#include <linux/fs.h>
28#include <linux/bitops.h>
29#include <linux/slab.h>
30#include "mdt.h"
31#include "alloc.h"
32
33
34
35
36
37
38
39static inline unsigned long
40nilfs_palloc_groups_per_desc_block(const struct inode *inode)
41{
42 return (1UL << inode->i_blkbits) /
43 sizeof(struct nilfs_palloc_group_desc);
44}
45
46
47
48
49
50static inline unsigned long
51nilfs_palloc_groups_count(const struct inode *inode)
52{
53 return 1UL << (BITS_PER_LONG - (inode->i_blkbits + 3 ));
54}
55
56
57
58
59
60
61int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
62{
63 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
64
65 mi->mi_bgl = kmalloc(sizeof(*mi->mi_bgl), GFP_NOFS);
66 if (!mi->mi_bgl)
67 return -ENOMEM;
68
69 bgl_lock_init(mi->mi_bgl);
70
71 nilfs_mdt_set_entry_size(inode, entry_size, 0);
72
73 mi->mi_blocks_per_group =
74 DIV_ROUND_UP(nilfs_palloc_entries_per_group(inode),
75 mi->mi_entries_per_block) + 1;
76
77
78 mi->mi_blocks_per_desc_block =
79 nilfs_palloc_groups_per_desc_block(inode) *
80 mi->mi_blocks_per_group + 1;
81
82
83 return 0;
84}
85
86
87
88
89
90
91
92static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
93 unsigned long *offset)
94{
95 __u64 group = nr;
96
97 *offset = do_div(group, nilfs_palloc_entries_per_group(inode));
98 return group;
99}
100
101
102
103
104
105
106
107
108
109static unsigned long
110nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
111{
112 unsigned long desc_block =
113 group / nilfs_palloc_groups_per_desc_block(inode);
114 return desc_block * NILFS_MDT(inode)->mi_blocks_per_desc_block;
115}
116
117
118
119
120
121
122
123
124
125static unsigned long
126nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
127{
128 unsigned long desc_offset =
129 group % nilfs_palloc_groups_per_desc_block(inode);
130 return nilfs_palloc_desc_blkoff(inode, group) + 1 +
131 desc_offset * NILFS_MDT(inode)->mi_blocks_per_group;
132}
133
134
135
136
137
138
139
140static unsigned long
141nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group,
142 const struct nilfs_palloc_group_desc *desc)
143{
144 unsigned long nfree;
145
146 spin_lock(nilfs_mdt_bgl_lock(inode, group));
147 nfree = le32_to_cpu(desc->pg_nfrees);
148 spin_unlock(nilfs_mdt_bgl_lock(inode, group));
149 return nfree;
150}
151
152
153
154
155
156
157
158
159static void
160nilfs_palloc_group_desc_add_entries(struct inode *inode,
161 unsigned long group,
162 struct nilfs_palloc_group_desc *desc,
163 u32 n)
164{
165 spin_lock(nilfs_mdt_bgl_lock(inode, group));
166 le32_add_cpu(&desc->pg_nfrees, n);
167 spin_unlock(nilfs_mdt_bgl_lock(inode, group));
168}
169
170
171
172
173
174
175static unsigned long
176nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
177{
178 unsigned long group, group_offset;
179
180 group = nilfs_palloc_group(inode, nr, &group_offset);
181
182 return nilfs_palloc_bitmap_blkoff(inode, group) + 1 +
183 group_offset / NILFS_MDT(inode)->mi_entries_per_block;
184}
185
186
187
188
189
190
191
192static void nilfs_palloc_desc_block_init(struct inode *inode,
193 struct buffer_head *bh, void *kaddr)
194{
195 struct nilfs_palloc_group_desc *desc = kaddr + bh_offset(bh);
196 unsigned long n = nilfs_palloc_groups_per_desc_block(inode);
197 __le32 nfrees;
198
199 nfrees = cpu_to_le32(nilfs_palloc_entries_per_group(inode));
200 while (n-- > 0) {
201 desc->pg_nfrees = nfrees;
202 desc++;
203 }
204}
205
206static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
207 int create,
208 void (*init_block)(struct inode *,
209 struct buffer_head *,
210 void *),
211 struct buffer_head **bhp,
212 struct nilfs_bh_assoc *prev,
213 spinlock_t *lock)
214{
215 int ret;
216
217 spin_lock(lock);
218 if (prev->bh && blkoff == prev->blkoff) {
219 get_bh(prev->bh);
220 *bhp = prev->bh;
221 spin_unlock(lock);
222 return 0;
223 }
224 spin_unlock(lock);
225
226 ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
227 if (!ret) {
228 spin_lock(lock);
229
230
231
232
233 brelse(prev->bh);
234 get_bh(*bhp);
235 prev->bh = *bhp;
236 prev->blkoff = blkoff;
237 spin_unlock(lock);
238 }
239 return ret;
240}
241
242
243
244
245
246
247
248
249static int nilfs_palloc_get_desc_block(struct inode *inode,
250 unsigned long group,
251 int create, struct buffer_head **bhp)
252{
253 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
254
255 return nilfs_palloc_get_block(inode,
256 nilfs_palloc_desc_blkoff(inode, group),
257 create, nilfs_palloc_desc_block_init,
258 bhp, &cache->prev_desc, &cache->lock);
259}
260
261
262
263
264
265
266
267
268static int nilfs_palloc_get_bitmap_block(struct inode *inode,
269 unsigned long group,
270 int create, struct buffer_head **bhp)
271{
272 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
273
274 return nilfs_palloc_get_block(inode,
275 nilfs_palloc_bitmap_blkoff(inode, group),
276 create, NULL, bhp,
277 &cache->prev_bitmap, &cache->lock);
278}
279
280
281
282
283
284
285
286
287int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
288 int create, struct buffer_head **bhp)
289{
290 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
291
292 return nilfs_palloc_get_block(inode,
293 nilfs_palloc_entry_blkoff(inode, nr),
294 create, NULL, bhp,
295 &cache->prev_entry, &cache->lock);
296}
297
298
299
300
301
302
303
304
305static struct nilfs_palloc_group_desc *
306nilfs_palloc_block_get_group_desc(const struct inode *inode,
307 unsigned long group,
308 const struct buffer_head *bh, void *kaddr)
309{
310 return (struct nilfs_palloc_group_desc *)(kaddr + bh_offset(bh)) +
311 group % nilfs_palloc_groups_per_desc_block(inode);
312}
313
314
315
316
317
318
319
320
321void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
322 const struct buffer_head *bh, void *kaddr)
323{
324 unsigned long entry_offset, group_offset;
325
326 nilfs_palloc_group(inode, nr, &group_offset);
327 entry_offset = group_offset % NILFS_MDT(inode)->mi_entries_per_block;
328
329 return kaddr + bh_offset(bh) +
330 entry_offset * NILFS_MDT(inode)->mi_entry_size;
331}
332
333
334
335
336
337
338
339
340
341static int nilfs_palloc_find_available_slot(struct inode *inode,
342 unsigned long group,
343 unsigned long target,
344 unsigned char *bitmap,
345 int bsize)
346{
347 int curr, pos, end, i;
348
349 if (target > 0) {
350 end = (target + BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1);
351 if (end > bsize)
352 end = bsize;
353 pos = nilfs_find_next_zero_bit(bitmap, end, target);
354 if (pos < end &&
355 !nilfs_set_bit_atomic(
356 nilfs_mdt_bgl_lock(inode, group), pos, bitmap))
357 return pos;
358 } else
359 end = 0;
360
361 for (i = 0, curr = end;
362 i < bsize;
363 i += BITS_PER_LONG, curr += BITS_PER_LONG) {
364
365 if (curr >= bsize)
366 curr = 0;
367 while (*((unsigned long *)bitmap + curr / BITS_PER_LONG)
368 != ~0UL) {
369 end = curr + BITS_PER_LONG;
370 if (end > bsize)
371 end = bsize;
372 pos = nilfs_find_next_zero_bit(bitmap, end, curr);
373 if ((pos < end) &&
374 !nilfs_set_bit_atomic(
375 nilfs_mdt_bgl_lock(inode, group), pos,
376 bitmap))
377 return pos;
378 }
379 }
380 return -ENOSPC;
381}
382
383
384
385
386
387
388
389
390static unsigned long
391nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
392 unsigned long curr, unsigned long max)
393{
394 return min_t(unsigned long,
395 nilfs_palloc_groups_per_desc_block(inode) -
396 curr % nilfs_palloc_groups_per_desc_block(inode),
397 max - curr + 1);
398}
399
400
401
402
403
404
405int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
406 struct nilfs_palloc_req *req)
407{
408 struct buffer_head *desc_bh, *bitmap_bh;
409 struct nilfs_palloc_group_desc *desc;
410 unsigned char *bitmap;
411 void *desc_kaddr, *bitmap_kaddr;
412 unsigned long group, maxgroup, ngroups;
413 unsigned long group_offset, maxgroup_offset;
414 unsigned long n, entries_per_group, groups_per_desc_block;
415 unsigned long i, j;
416 int pos, ret;
417
418 ngroups = nilfs_palloc_groups_count(inode);
419 maxgroup = ngroups - 1;
420 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
421 entries_per_group = nilfs_palloc_entries_per_group(inode);
422 groups_per_desc_block = nilfs_palloc_groups_per_desc_block(inode);
423
424 for (i = 0; i < ngroups; i += n) {
425 if (group >= ngroups) {
426
427 group = 0;
428 maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
429 &maxgroup_offset) - 1;
430 }
431 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
432 if (ret < 0)
433 return ret;
434 desc_kaddr = kmap(desc_bh->b_page);
435 desc = nilfs_palloc_block_get_group_desc(
436 inode, group, desc_bh, desc_kaddr);
437 n = nilfs_palloc_rest_groups_in_desc_block(inode, group,
438 maxgroup);
439 for (j = 0; j < n; j++, desc++, group++) {
440 if (nilfs_palloc_group_desc_nfrees(inode, group, desc)
441 > 0) {
442 ret = nilfs_palloc_get_bitmap_block(
443 inode, group, 1, &bitmap_bh);
444 if (ret < 0)
445 goto out_desc;
446 bitmap_kaddr = kmap(bitmap_bh->b_page);
447 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
448 pos = nilfs_palloc_find_available_slot(
449 inode, group, group_offset, bitmap,
450 entries_per_group);
451 if (pos >= 0) {
452
453 nilfs_palloc_group_desc_add_entries(
454 inode, group, desc, -1);
455 req->pr_entry_nr =
456 entries_per_group * group + pos;
457 kunmap(desc_bh->b_page);
458 kunmap(bitmap_bh->b_page);
459
460 req->pr_desc_bh = desc_bh;
461 req->pr_bitmap_bh = bitmap_bh;
462 return 0;
463 }
464 kunmap(bitmap_bh->b_page);
465 brelse(bitmap_bh);
466 }
467
468 group_offset = 0;
469 }
470
471 kunmap(desc_bh->b_page);
472 brelse(desc_bh);
473 }
474
475
476 return -ENOSPC;
477
478 out_desc:
479 kunmap(desc_bh->b_page);
480 brelse(desc_bh);
481 return ret;
482}
483
484
485
486
487
488
489void nilfs_palloc_commit_alloc_entry(struct inode *inode,
490 struct nilfs_palloc_req *req)
491{
492 mark_buffer_dirty(req->pr_bitmap_bh);
493 mark_buffer_dirty(req->pr_desc_bh);
494 nilfs_mdt_mark_dirty(inode);
495
496 brelse(req->pr_bitmap_bh);
497 brelse(req->pr_desc_bh);
498}
499
500
501
502
503
504
505void nilfs_palloc_commit_free_entry(struct inode *inode,
506 struct nilfs_palloc_req *req)
507{
508 struct nilfs_palloc_group_desc *desc;
509 unsigned long group, group_offset;
510 unsigned char *bitmap;
511 void *desc_kaddr, *bitmap_kaddr;
512
513 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
514 desc_kaddr = kmap(req->pr_desc_bh->b_page);
515 desc = nilfs_palloc_block_get_group_desc(inode, group,
516 req->pr_desc_bh, desc_kaddr);
517 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
518 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
519
520 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
521 group_offset, bitmap))
522 printk(KERN_WARNING "%s: entry number %llu already freed\n",
523 __func__, (unsigned long long)req->pr_entry_nr);
524 else
525 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
526
527 kunmap(req->pr_bitmap_bh->b_page);
528 kunmap(req->pr_desc_bh->b_page);
529
530 mark_buffer_dirty(req->pr_desc_bh);
531 mark_buffer_dirty(req->pr_bitmap_bh);
532 nilfs_mdt_mark_dirty(inode);
533
534 brelse(req->pr_bitmap_bh);
535 brelse(req->pr_desc_bh);
536}
537
538
539
540
541
542
543void nilfs_palloc_abort_alloc_entry(struct inode *inode,
544 struct nilfs_palloc_req *req)
545{
546 struct nilfs_palloc_group_desc *desc;
547 void *desc_kaddr, *bitmap_kaddr;
548 unsigned char *bitmap;
549 unsigned long group, group_offset;
550
551 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
552 desc_kaddr = kmap(req->pr_desc_bh->b_page);
553 desc = nilfs_palloc_block_get_group_desc(inode, group,
554 req->pr_desc_bh, desc_kaddr);
555 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
556 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
557 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
558 group_offset, bitmap))
559 printk(KERN_WARNING "%s: entry number %llu already freed\n",
560 __func__, (unsigned long long)req->pr_entry_nr);
561 else
562 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
563
564 kunmap(req->pr_bitmap_bh->b_page);
565 kunmap(req->pr_desc_bh->b_page);
566
567 brelse(req->pr_bitmap_bh);
568 brelse(req->pr_desc_bh);
569
570 req->pr_entry_nr = 0;
571 req->pr_bitmap_bh = NULL;
572 req->pr_desc_bh = NULL;
573}
574
575
576
577
578
579
580int nilfs_palloc_prepare_free_entry(struct inode *inode,
581 struct nilfs_palloc_req *req)
582{
583 struct buffer_head *desc_bh, *bitmap_bh;
584 unsigned long group, group_offset;
585 int ret;
586
587 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
588 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
589 if (ret < 0)
590 return ret;
591 ret = nilfs_palloc_get_bitmap_block(inode, group, 1, &bitmap_bh);
592 if (ret < 0) {
593 brelse(desc_bh);
594 return ret;
595 }
596
597 req->pr_desc_bh = desc_bh;
598 req->pr_bitmap_bh = bitmap_bh;
599 return 0;
600}
601
602
603
604
605
606
607void nilfs_palloc_abort_free_entry(struct inode *inode,
608 struct nilfs_palloc_req *req)
609{
610 brelse(req->pr_bitmap_bh);
611 brelse(req->pr_desc_bh);
612
613 req->pr_entry_nr = 0;
614 req->pr_bitmap_bh = NULL;
615 req->pr_desc_bh = NULL;
616}
617
618
619
620
621
622
623
624static int
625nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr)
626{
627 __u64 first, last;
628
629 first = group * nilfs_palloc_entries_per_group(inode);
630 last = first + nilfs_palloc_entries_per_group(inode) - 1;
631 return (nr >= first) && (nr <= last);
632}
633
634
635
636
637
638
639
640int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
641{
642 struct buffer_head *desc_bh, *bitmap_bh;
643 struct nilfs_palloc_group_desc *desc;
644 unsigned char *bitmap;
645 void *desc_kaddr, *bitmap_kaddr;
646 unsigned long group, group_offset;
647 int i, j, n, ret;
648
649 for (i = 0; i < nitems; i = j) {
650 group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset);
651 ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh);
652 if (ret < 0)
653 return ret;
654 ret = nilfs_palloc_get_bitmap_block(inode, group, 0,
655 &bitmap_bh);
656 if (ret < 0) {
657 brelse(desc_bh);
658 return ret;
659 }
660 desc_kaddr = kmap(desc_bh->b_page);
661 desc = nilfs_palloc_block_get_group_desc(
662 inode, group, desc_bh, desc_kaddr);
663 bitmap_kaddr = kmap(bitmap_bh->b_page);
664 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
665 for (j = i, n = 0;
666 (j < nitems) && nilfs_palloc_group_is_in(inode, group,
667 entry_nrs[j]);
668 j++) {
669 nilfs_palloc_group(inode, entry_nrs[j], &group_offset);
670 if (!nilfs_clear_bit_atomic(
671 nilfs_mdt_bgl_lock(inode, group),
672 group_offset, bitmap)) {
673 printk(KERN_WARNING
674 "%s: entry number %llu already freed\n",
675 __func__,
676 (unsigned long long)entry_nrs[j]);
677 } else {
678 n++;
679 }
680 }
681 nilfs_palloc_group_desc_add_entries(inode, group, desc, n);
682
683 kunmap(bitmap_bh->b_page);
684 kunmap(desc_bh->b_page);
685
686 mark_buffer_dirty(desc_bh);
687 mark_buffer_dirty(bitmap_bh);
688 nilfs_mdt_mark_dirty(inode);
689
690 brelse(bitmap_bh);
691 brelse(desc_bh);
692 }
693 return 0;
694}
695
696void nilfs_palloc_setup_cache(struct inode *inode,
697 struct nilfs_palloc_cache *cache)
698{
699 NILFS_MDT(inode)->mi_palloc_cache = cache;
700 spin_lock_init(&cache->lock);
701}
702
703void nilfs_palloc_clear_cache(struct inode *inode)
704{
705 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
706
707 spin_lock(&cache->lock);
708 brelse(cache->prev_desc.bh);
709 brelse(cache->prev_bitmap.bh);
710 brelse(cache->prev_entry.bh);
711 cache->prev_desc.bh = NULL;
712 cache->prev_bitmap.bh = NULL;
713 cache->prev_entry.bh = NULL;
714 spin_unlock(&cache->lock);
715}
716
717void nilfs_palloc_destroy_cache(struct inode *inode)
718{
719 nilfs_palloc_clear_cache(inode);
720 NILFS_MDT(inode)->mi_palloc_cache = NULL;
721}
722