1
2
3
4
5
6
7
8
9
10#include <linux/spinlock.h>
11#include <linux/completion.h>
12#include <linux/buffer_head.h>
13#include <linux/blkdev.h>
14#include <linux/gfs2_ondisk.h>
15#include <linux/crc32.h>
16
17#include "gfs2.h"
18#include "incore.h"
19#include "bmap.h"
20#include "glock.h"
21#include "inode.h"
22#include "meta_io.h"
23#include "quota.h"
24#include "rgrp.h"
25#include "log.h"
26#include "super.h"
27#include "trans.h"
28#include "dir.h"
29#include "util.h"
30#include "trace_gfs2.h"
31
32
33
34
35
36struct metapath {
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39};
40
41struct strip_mine {
42 int sm_first;
43 unsigned int sm_height;
44};
45
46
47
48
49
50
51
52
53
54
55
56static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
57 u64 block, struct page *page)
58{
59 struct inode *inode = &ip->i_inode;
60 struct buffer_head *bh;
61 int release = 0;
62
63 if (!page || page->index) {
64 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
65 if (!page)
66 return -ENOMEM;
67 release = 1;
68 }
69
70 if (!PageUptodate(page)) {
71 void *kaddr = kmap(page);
72 u64 dsize = i_size_read(inode);
73
74 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
75 dsize = dibh->b_size - sizeof(struct gfs2_dinode);
76
77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
78 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
79 kunmap(page);
80
81 SetPageUptodate(page);
82 }
83
84 if (!page_has_buffers(page))
85 create_empty_buffers(page, 1 << inode->i_blkbits,
86 (1 << BH_Uptodate));
87
88 bh = page_buffers(page);
89
90 if (!buffer_mapped(bh))
91 map_bh(bh, inode->i_sb, block);
92
93 set_buffer_uptodate(bh);
94 if (!gfs2_is_jdata(ip))
95 mark_buffer_dirty(bh);
96 if (!gfs2_is_writeback(ip))
97 gfs2_trans_add_data(ip->i_gl, bh);
98
99 if (release) {
100 unlock_page(page);
101 page_cache_release(page);
102 }
103
104 return 0;
105}
106
107
108
109
110
111
112
113
114
115
116
117
118int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
119{
120 struct buffer_head *bh, *dibh;
121 struct gfs2_dinode *di;
122 u64 block = 0;
123 int isdir = gfs2_is_dir(ip);
124 int error;
125
126 down_write(&ip->i_rw_mutex);
127
128 error = gfs2_meta_inode_buffer(ip, &dibh);
129 if (error)
130 goto out;
131
132 if (i_size_read(&ip->i_inode)) {
133
134
135
136 unsigned int n = 1;
137 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
138 if (error)
139 goto out_brelse;
140 if (isdir) {
141 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
142 error = gfs2_dir_get_new_buffer(ip, block, &bh);
143 if (error)
144 goto out_brelse;
145 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
146 dibh, sizeof(struct gfs2_dinode));
147 brelse(bh);
148 } else {
149 error = gfs2_unstuffer_page(ip, dibh, block, page);
150 if (error)
151 goto out_brelse;
152 }
153 }
154
155
156
157 gfs2_trans_add_meta(ip->i_gl, dibh);
158 di = (struct gfs2_dinode *)dibh->b_data;
159 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
160
161 if (i_size_read(&ip->i_inode)) {
162 *(__be64 *)(di + 1) = cpu_to_be64(block);
163 gfs2_add_inode_blocks(&ip->i_inode, 1);
164 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
165 }
166
167 ip->i_height = 1;
168 di->di_height = cpu_to_be16(1);
169
170out_brelse:
171 brelse(dibh);
172out:
173 up_write(&ip->i_rw_mutex);
174 return error;
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
240{
241 unsigned int i;
242
243 for (i = height; i--;)
244 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
245
246}
247
248static inline unsigned int metapath_branch_start(const struct metapath *mp)
249{
250 if (mp->mp_list[0] == 0)
251 return 2;
252 return 1;
253}
254
255
256
257
258
259
260
261
262
263
264
265static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
266{
267 struct buffer_head *bh = mp->mp_bh[height];
268 unsigned int head_size = (height > 0) ?
269 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
270 return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
271}
272
273static void gfs2_metapath_ra(struct gfs2_glock *gl,
274 const struct buffer_head *bh, const __be64 *pos)
275{
276 struct buffer_head *rabh;
277 const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
278 const __be64 *t;
279
280 for (t = pos; t < endp; t++) {
281 if (!*t)
282 continue;
283
284 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
285 if (trylock_buffer(rabh)) {
286 if (!buffer_uptodate(rabh)) {
287 rabh->b_end_io = end_buffer_read_sync;
288 submit_bh(READA | REQ_META, rabh);
289 continue;
290 }
291 unlock_buffer(rabh);
292 }
293 brelse(rabh);
294 }
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
315{
316 unsigned int end_of_metadata = ip->i_height - 1;
317 unsigned int x;
318 __be64 *ptr;
319 u64 dblock;
320 int ret;
321
322 for (x = 0; x < end_of_metadata; x++) {
323 ptr = metapointer(x, mp);
324 dblock = be64_to_cpu(*ptr);
325 if (!dblock)
326 return x + 1;
327
328 ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
329 if (ret)
330 return ret;
331 }
332
333 return ip->i_height;
334}
335
336static inline void release_metapath(struct metapath *mp)
337{
338 int i;
339
340 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
341 if (mp->mp_bh[i] == NULL)
342 break;
343 brelse(mp->mp_bh[i]);
344 }
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
363{
364 const __be64 *end = (start + len);
365 const __be64 *first = ptr;
366 u64 d = be64_to_cpu(*ptr);
367
368 *eob = 0;
369 do {
370 ptr++;
371 if (ptr >= end)
372 break;
373 if (limit && --limit == 0)
374 break;
375 if (d)
376 d++;
377 } while(be64_to_cpu(*ptr) == d);
378 if (ptr >= end)
379 *eob = 1;
380 return (ptr - first);
381}
382
383static inline void bmap_lock(struct gfs2_inode *ip, int create)
384{
385 if (create)
386 down_write(&ip->i_rw_mutex);
387 else
388 down_read(&ip->i_rw_mutex);
389}
390
391static inline void bmap_unlock(struct gfs2_inode *ip, int create)
392{
393 if (create)
394 up_write(&ip->i_rw_mutex);
395 else
396 up_read(&ip->i_rw_mutex);
397}
398
399static inline __be64 *gfs2_indirect_init(struct metapath *mp,
400 struct gfs2_glock *gl, unsigned int i,
401 unsigned offset, u64 bn)
402{
403 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
404 ((i > 1) ? sizeof(struct gfs2_meta_header) :
405 sizeof(struct gfs2_dinode)));
406 BUG_ON(i < 1);
407 BUG_ON(mp->mp_bh[i] != NULL);
408 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
409 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
410 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
411 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
412 ptr += offset;
413 *ptr = cpu_to_be64(bn);
414 return ptr;
415}
416
417enum alloc_state {
418 ALLOC_DATA = 0,
419 ALLOC_GROW_DEPTH = 1,
420 ALLOC_GROW_HEIGHT = 2,
421
422};
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
449 struct buffer_head *bh_map, struct metapath *mp,
450 const unsigned int sheight,
451 const unsigned int height,
452 const size_t maxlen)
453{
454 struct gfs2_inode *ip = GFS2_I(inode);
455 struct gfs2_sbd *sdp = GFS2_SB(inode);
456 struct super_block *sb = sdp->sd_vfs;
457 struct buffer_head *dibh = mp->mp_bh[0];
458 u64 bn, dblock = 0;
459 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
460 unsigned dblks = 0;
461 unsigned ptrs_per_blk;
462 const unsigned end_of_metadata = height - 1;
463 int ret;
464 int eob = 0;
465 enum alloc_state state;
466 __be64 *ptr;
467 __be64 zero_bn = 0;
468
469 BUG_ON(sheight < 1);
470 BUG_ON(dibh == NULL);
471
472 gfs2_trans_add_meta(ip->i_gl, dibh);
473
474 if (height == sheight) {
475 struct buffer_head *bh;
476
477 ptr = metapointer(end_of_metadata, mp);
478 bh = mp->mp_bh[end_of_metadata];
479 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
480 &eob);
481 BUG_ON(dblks < 1);
482 state = ALLOC_DATA;
483 } else {
484
485 ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
486 dblks = min(maxlen, (size_t)(ptrs_per_blk -
487 mp->mp_list[end_of_metadata]));
488 if (height == ip->i_height) {
489
490 iblks = height - sheight;
491 state = ALLOC_GROW_DEPTH;
492 } else {
493
494 state = ALLOC_GROW_HEIGHT;
495 iblks = height - ip->i_height;
496 branch_start = metapath_branch_start(mp);
497 iblks += (height - branch_start);
498 }
499 }
500
501
502
503 blks = dblks + iblks;
504 i = sheight;
505 do {
506 int error;
507 n = blks - alloced;
508 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
509 if (error)
510 return error;
511 alloced += n;
512 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
513 gfs2_trans_add_unrevoke(sdp, bn, n);
514 switch (state) {
515
516 case ALLOC_GROW_HEIGHT:
517 if (i == 1) {
518 ptr = (__be64 *)(dibh->b_data +
519 sizeof(struct gfs2_dinode));
520 zero_bn = *ptr;
521 }
522 for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
523 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
524 if (i - 1 == height - ip->i_height) {
525 i--;
526 gfs2_buffer_copy_tail(mp->mp_bh[i],
527 sizeof(struct gfs2_meta_header),
528 dibh, sizeof(struct gfs2_dinode));
529 gfs2_buffer_clear_tail(dibh,
530 sizeof(struct gfs2_dinode) +
531 sizeof(__be64));
532 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
533 sizeof(struct gfs2_meta_header));
534 *ptr = zero_bn;
535 state = ALLOC_GROW_DEPTH;
536 for(i = branch_start; i < height; i++) {
537 if (mp->mp_bh[i] == NULL)
538 break;
539 brelse(mp->mp_bh[i]);
540 mp->mp_bh[i] = NULL;
541 }
542 i = branch_start;
543 }
544 if (n == 0)
545 break;
546
547 case ALLOC_GROW_DEPTH:
548 if (i > 1 && i < height)
549 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
550 for (; i < height && n > 0; i++, n--)
551 gfs2_indirect_init(mp, ip->i_gl, i,
552 mp->mp_list[i-1], bn++);
553 if (i == height)
554 state = ALLOC_DATA;
555 if (n == 0)
556 break;
557
558 case ALLOC_DATA:
559 BUG_ON(n > dblks);
560 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
561 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
562 dblks = n;
563 ptr = metapointer(end_of_metadata, mp);
564 dblock = bn;
565 while (n-- > 0)
566 *ptr++ = cpu_to_be64(bn++);
567 if (buffer_zeronew(bh_map)) {
568 ret = sb_issue_zeroout(sb, dblock, dblks,
569 GFP_NOFS);
570 if (ret) {
571 fs_err(sdp,
572 "Failed to zero data buffers\n");
573 clear_buffer_zeronew(bh_map);
574 }
575 }
576 break;
577 }
578 } while ((state != ALLOC_DATA) || !dblock);
579
580 ip->i_height = height;
581 gfs2_add_inode_blocks(&ip->i_inode, alloced);
582 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
583 map_bh(bh_map, inode->i_sb, dblock);
584 bh_map->b_size = dblks << inode->i_blkbits;
585 set_buffer_new(bh_map);
586 return 0;
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603int gfs2_block_map(struct inode *inode, sector_t lblock,
604 struct buffer_head *bh_map, int create)
605{
606 struct gfs2_inode *ip = GFS2_I(inode);
607 struct gfs2_sbd *sdp = GFS2_SB(inode);
608 unsigned int bsize = sdp->sd_sb.sb_bsize;
609 const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
610 const u64 *arr = sdp->sd_heightsize;
611 __be64 *ptr;
612 u64 size;
613 struct metapath mp;
614 int ret;
615 int eob;
616 unsigned int len;
617 struct buffer_head *bh;
618 u8 height;
619
620 BUG_ON(maxlen == 0);
621
622 memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
623 bmap_lock(ip, create);
624 clear_buffer_mapped(bh_map);
625 clear_buffer_new(bh_map);
626 clear_buffer_boundary(bh_map);
627 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
628 if (gfs2_is_dir(ip)) {
629 bsize = sdp->sd_jbsize;
630 arr = sdp->sd_jheightsize;
631 }
632
633 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
634 if (ret)
635 goto out;
636
637 height = ip->i_height;
638 size = (lblock + 1) * bsize;
639 while (size > arr[height])
640 height++;
641 find_metapath(sdp, lblock, &mp, height);
642 ret = 1;
643 if (height > ip->i_height || gfs2_is_stuffed(ip))
644 goto do_alloc;
645 ret = lookup_metapath(ip, &mp);
646 if (ret < 0)
647 goto out;
648 if (ret != ip->i_height)
649 goto do_alloc;
650 ptr = metapointer(ip->i_height - 1, &mp);
651 if (*ptr == 0)
652 goto do_alloc;
653 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
654 bh = mp.mp_bh[ip->i_height - 1];
655 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
656 bh_map->b_size = (len << inode->i_blkbits);
657 if (eob)
658 set_buffer_boundary(bh_map);
659 ret = 0;
660out:
661 release_metapath(&mp);
662 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
663 bmap_unlock(ip, create);
664 return ret;
665
666do_alloc:
667
668 if (!create) {
669 BUG_ON(gfs2_is_stuffed(ip));
670 ret = 0;
671 goto out;
672 }
673
674
675 ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
676 goto out;
677}
678
679
680
681
682int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
683{
684 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
685 int ret;
686 int create = *new;
687
688 BUG_ON(!extlen);
689 BUG_ON(!dblock);
690 BUG_ON(!new);
691
692 bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
693 ret = gfs2_block_map(inode, lblock, &bh, create);
694 *extlen = bh.b_size >> inode->i_blkbits;
695 *dblock = bh.b_blocknr;
696 if (buffer_new(&bh))
697 *new = 1;
698 else
699 *new = 0;
700 return ret;
701}
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
717 struct buffer_head *bh, __be64 *top, __be64 *bottom,
718 unsigned int height, struct strip_mine *sm)
719{
720 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
721 struct gfs2_rgrp_list rlist;
722 u64 bn, bstart;
723 u32 blen, btotal;
724 __be64 *p;
725 unsigned int rg_blocks = 0;
726 int metadata;
727 unsigned int revokes = 0;
728 int x;
729 int error;
730
731 error = gfs2_rindex_update(sdp);
732 if (error)
733 return error;
734
735 if (!*top)
736 sm->sm_first = 0;
737
738 if (height != sm->sm_height)
739 return 0;
740
741 if (sm->sm_first) {
742 top++;
743 sm->sm_first = 0;
744 }
745
746 metadata = (height != ip->i_height - 1);
747 if (metadata)
748 revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
749 else if (ip->i_depth)
750 revokes = sdp->sd_inptrs;
751
752 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
753 bstart = 0;
754 blen = 0;
755
756 for (p = top; p < bottom; p++) {
757 if (!*p)
758 continue;
759
760 bn = be64_to_cpu(*p);
761
762 if (bstart + blen == bn)
763 blen++;
764 else {
765 if (bstart)
766 gfs2_rlist_add(ip, &rlist, bstart);
767
768 bstart = bn;
769 blen = 1;
770 }
771 }
772
773 if (bstart)
774 gfs2_rlist_add(ip, &rlist, bstart);
775 else
776 goto out;
777
778 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
779
780 for (x = 0; x < rlist.rl_rgrps; x++) {
781 struct gfs2_rgrpd *rgd;
782 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
783 rg_blocks += rgd->rd_length;
784 }
785
786 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
787 if (error)
788 goto out_rlist;
789
790 if (gfs2_rs_active(ip->i_res))
791 gfs2_rs_deltree(ip->i_res);
792
793 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
794 RES_INDIRECT + RES_STATFS + RES_QUOTA,
795 revokes);
796 if (error)
797 goto out_rg_gunlock;
798
799 down_write(&ip->i_rw_mutex);
800
801 gfs2_trans_add_meta(ip->i_gl, dibh);
802 gfs2_trans_add_meta(ip->i_gl, bh);
803
804 bstart = 0;
805 blen = 0;
806 btotal = 0;
807
808 for (p = top; p < bottom; p++) {
809 if (!*p)
810 continue;
811
812 bn = be64_to_cpu(*p);
813
814 if (bstart + blen == bn)
815 blen++;
816 else {
817 if (bstart) {
818 __gfs2_free_blocks(ip, bstart, blen, metadata);
819 btotal += blen;
820 }
821
822 bstart = bn;
823 blen = 1;
824 }
825
826 *p = 0;
827 gfs2_add_inode_blocks(&ip->i_inode, -1);
828 }
829 if (bstart) {
830 __gfs2_free_blocks(ip, bstart, blen, metadata);
831 btotal += blen;
832 }
833
834 gfs2_statfs_change(sdp, 0, +btotal, 0);
835 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
836 ip->i_inode.i_gid);
837
838 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
839
840 gfs2_dinode_out(ip, dibh->b_data);
841
842 up_write(&ip->i_rw_mutex);
843
844 gfs2_trans_end(sdp);
845
846out_rg_gunlock:
847 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
848out_rlist:
849 gfs2_rlist_free(&rlist);
850out:
851 return error;
852}
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
871 struct metapath *mp, unsigned int height,
872 u64 block, int first, struct strip_mine *sm)
873{
874 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
875 struct buffer_head *bh = NULL;
876 __be64 *top, *bottom;
877 u64 bn;
878 int error;
879 int mh_size = sizeof(struct gfs2_meta_header);
880
881 if (!height) {
882 error = gfs2_meta_inode_buffer(ip, &bh);
883 if (error)
884 return error;
885 dibh = bh;
886
887 top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
888 bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
889 } else {
890 error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
891 if (error)
892 return error;
893
894 top = (__be64 *)(bh->b_data + mh_size) +
895 (first ? mp->mp_list[height] : 0);
896
897 bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
898 }
899
900 error = do_strip(ip, dibh, bh, top, bottom, height, sm);
901 if (error)
902 goto out;
903
904 if (height < ip->i_height - 1) {
905
906 gfs2_metapath_ra(ip->i_gl, bh, top);
907
908 for (; top < bottom; top++, first = 0) {
909 if (!*top)
910 continue;
911
912 bn = be64_to_cpu(*top);
913
914 error = recursive_scan(ip, dibh, mp, height + 1, bn,
915 first, sm);
916 if (error)
917 break;
918 }
919 }
920out:
921 brelse(bh);
922 return error;
923}
924
925
926
927
928
929
930
931static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
932{
933 struct inode *inode = mapping->host;
934 struct gfs2_inode *ip = GFS2_I(inode);
935 unsigned long index = from >> PAGE_CACHE_SHIFT;
936 unsigned offset = from & (PAGE_CACHE_SIZE-1);
937 unsigned blocksize, iblock, length, pos;
938 struct buffer_head *bh;
939 struct page *page;
940 int err;
941
942 page = find_or_create_page(mapping, index, GFP_NOFS);
943 if (!page)
944 return 0;
945
946 blocksize = inode->i_sb->s_blocksize;
947 length = blocksize - (offset & (blocksize - 1));
948 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
949
950 if (!page_has_buffers(page))
951 create_empty_buffers(page, blocksize, 0);
952
953
954 bh = page_buffers(page);
955 pos = blocksize;
956 while (offset >= pos) {
957 bh = bh->b_this_page;
958 iblock++;
959 pos += blocksize;
960 }
961
962 err = 0;
963
964 if (!buffer_mapped(bh)) {
965 gfs2_block_map(inode, iblock, bh, 0);
966
967 if (!buffer_mapped(bh))
968 goto unlock;
969 }
970
971
972 if (PageUptodate(page))
973 set_buffer_uptodate(bh);
974
975 if (!buffer_uptodate(bh)) {
976 err = -EIO;
977 ll_rw_block(READ, 1, &bh);
978 wait_on_buffer(bh);
979
980 if (!buffer_uptodate(bh))
981 goto unlock;
982 err = 0;
983 }
984
985 if (!gfs2_is_writeback(ip))
986 gfs2_trans_add_data(ip->i_gl, bh);
987
988 zero_user(page, offset, length);
989 mark_buffer_dirty(bh);
990unlock:
991 unlock_page(page);
992 page_cache_release(page);
993 return err;
994}
995
996#define GFS2_JTRUNC_REVOKES 8192
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1010{
1011 struct gfs2_sbd *sdp = GFS2_SB(inode);
1012 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1013 u64 chunk;
1014 int error;
1015
1016 while (oldsize != newsize) {
1017 chunk = oldsize - newsize;
1018 if (chunk > max_chunk)
1019 chunk = max_chunk;
1020 truncate_pagecache(inode, oldsize - chunk);
1021 oldsize -= chunk;
1022 gfs2_trans_end(sdp);
1023 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1024 if (error)
1025 return error;
1026 }
1027
1028 return 0;
1029}
1030
1031static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
1032{
1033 struct gfs2_inode *ip = GFS2_I(inode);
1034 struct gfs2_sbd *sdp = GFS2_SB(inode);
1035 struct address_space *mapping = inode->i_mapping;
1036 struct buffer_head *dibh;
1037 int journaled = gfs2_is_jdata(ip);
1038 int error;
1039
1040 if (journaled)
1041 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1042 else
1043 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1044 if (error)
1045 return error;
1046
1047 error = gfs2_meta_inode_buffer(ip, &dibh);
1048 if (error)
1049 goto out;
1050
1051 gfs2_trans_add_meta(ip->i_gl, dibh);
1052
1053 if (gfs2_is_stuffed(ip)) {
1054 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1055 } else {
1056 if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
1057 error = gfs2_block_truncate_page(mapping, newsize);
1058 if (error)
1059 goto out_brelse;
1060 }
1061 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1062 }
1063
1064 i_size_write(inode, newsize);
1065 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1066 gfs2_dinode_out(ip, dibh->b_data);
1067
1068 if (journaled)
1069 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1070 else
1071 truncate_pagecache(inode, newsize);
1072
1073 if (error) {
1074 brelse(dibh);
1075 return error;
1076 }
1077
1078out_brelse:
1079 brelse(dibh);
1080out:
1081 gfs2_trans_end(sdp);
1082 return error;
1083}
1084
1085static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
1086{
1087 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1088 unsigned int height = ip->i_height;
1089 u64 lblock;
1090 struct metapath mp;
1091 int error;
1092
1093 if (!size)
1094 lblock = 0;
1095 else
1096 lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
1097
1098 find_metapath(sdp, lblock, &mp, ip->i_height);
1099 error = gfs2_rindex_update(sdp);
1100 if (error)
1101 return error;
1102
1103 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1104 if (error)
1105 return error;
1106
1107 while (height--) {
1108 struct strip_mine sm;
1109 sm.sm_first = !!size;
1110 sm.sm_height = height;
1111
1112 error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
1113 if (error)
1114 break;
1115 }
1116
1117 gfs2_quota_unhold(ip);
1118
1119 return error;
1120}
1121
1122static int trunc_end(struct gfs2_inode *ip)
1123{
1124 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1125 struct buffer_head *dibh;
1126 int error;
1127
1128 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1129 if (error)
1130 return error;
1131
1132 down_write(&ip->i_rw_mutex);
1133
1134 error = gfs2_meta_inode_buffer(ip, &dibh);
1135 if (error)
1136 goto out;
1137
1138 if (!i_size_read(&ip->i_inode)) {
1139 ip->i_height = 0;
1140 ip->i_goal = ip->i_no_addr;
1141 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1142 gfs2_ordered_del_inode(ip);
1143 }
1144 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1145 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1146
1147 gfs2_trans_add_meta(ip->i_gl, dibh);
1148 gfs2_dinode_out(ip, dibh->b_data);
1149 brelse(dibh);
1150
1151out:
1152 up_write(&ip->i_rw_mutex);
1153 gfs2_trans_end(sdp);
1154 return error;
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
1170{
1171 struct gfs2_inode *ip = GFS2_I(inode);
1172 int error;
1173
1174 error = trunc_start(inode, oldsize, newsize);
1175 if (error < 0)
1176 return error;
1177 if (gfs2_is_stuffed(ip))
1178 return 0;
1179
1180 error = trunc_dealloc(ip, newsize);
1181 if (error == 0)
1182 error = trunc_end(ip);
1183
1184 return error;
1185}
1186
1187void gfs2_trim_blocks(struct inode *inode)
1188{
1189 u64 size = inode->i_size;
1190 int ret;
1191
1192 ret = do_shrink(inode, size, size);
1193 WARN_ON(ret != 0);
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static int do_grow(struct inode *inode, u64 size)
1217{
1218 struct gfs2_inode *ip = GFS2_I(inode);
1219 struct gfs2_sbd *sdp = GFS2_SB(inode);
1220 struct gfs2_alloc_parms ap = { .target = 1, };
1221 struct buffer_head *dibh;
1222 int error;
1223 int unstuff = 0;
1224
1225 if (gfs2_is_stuffed(ip) &&
1226 (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
1227 error = gfs2_quota_lock_check(ip);
1228 if (error)
1229 return error;
1230
1231 error = gfs2_inplace_reserve(ip, &ap);
1232 if (error)
1233 goto do_grow_qunlock;
1234 unstuff = 1;
1235 }
1236
1237 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
1238 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
1239 0 : RES_QUOTA), 0);
1240 if (error)
1241 goto do_grow_release;
1242
1243 if (unstuff) {
1244 error = gfs2_unstuff_dinode(ip, NULL);
1245 if (error)
1246 goto do_end_trans;
1247 }
1248
1249 error = gfs2_meta_inode_buffer(ip, &dibh);
1250 if (error)
1251 goto do_end_trans;
1252
1253 i_size_write(inode, size);
1254 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1255 gfs2_trans_add_meta(ip->i_gl, dibh);
1256 gfs2_dinode_out(ip, dibh->b_data);
1257 brelse(dibh);
1258
1259do_end_trans:
1260 gfs2_trans_end(sdp);
1261do_grow_release:
1262 if (unstuff) {
1263 gfs2_inplace_release(ip);
1264do_grow_qunlock:
1265 gfs2_quota_unlock(ip);
1266 }
1267 return error;
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282int gfs2_setattr_size(struct inode *inode, u64 newsize)
1283{
1284 struct gfs2_inode *ip = GFS2_I(inode);
1285 int ret;
1286 u64 oldsize;
1287
1288 BUG_ON(!S_ISREG(inode->i_mode));
1289
1290 ret = inode_newsize_ok(inode, newsize);
1291 if (ret)
1292 return ret;
1293
1294 ret = get_write_access(inode);
1295 if (ret)
1296 return ret;
1297
1298 inode_dio_wait(inode);
1299
1300 ret = gfs2_rs_alloc(ip);
1301 if (ret)
1302 goto out;
1303
1304 oldsize = inode->i_size;
1305 if (newsize >= oldsize) {
1306 ret = do_grow(inode, newsize);
1307 goto out;
1308 }
1309
1310 gfs2_rs_deltree(ip->i_res);
1311 ret = do_shrink(inode, oldsize, newsize);
1312out:
1313 put_write_access(inode);
1314 return ret;
1315}
1316
1317int gfs2_truncatei_resume(struct gfs2_inode *ip)
1318{
1319 int error;
1320 error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
1321 if (!error)
1322 error = trunc_end(ip);
1323 return error;
1324}
1325
1326int gfs2_file_dealloc(struct gfs2_inode *ip)
1327{
1328 return trunc_dealloc(ip, 0);
1329}
1330
1331
1332
1333
1334
1335
1336
1337void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
1338{
1339 struct gfs2_journal_extent *jext;
1340
1341 while(!list_empty(&jd->extent_list)) {
1342 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
1343 list_del(&jext->list);
1344 kfree(jext);
1345 }
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
1359{
1360 struct gfs2_journal_extent *jext;
1361
1362 if (!list_empty(&jd->extent_list)) {
1363 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
1364 if ((jext->dblock + jext->blocks) == dblock) {
1365 jext->blocks += blocks;
1366 return 0;
1367 }
1368 }
1369
1370 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
1371 if (jext == NULL)
1372 return -ENOMEM;
1373 jext->dblock = dblock;
1374 jext->lblock = lblock;
1375 jext->blocks = blocks;
1376 list_add_tail(&jext->list, &jd->extent_list);
1377 jd->nr_extents++;
1378 return 0;
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
1402{
1403 u64 lblock = 0;
1404 u64 lblock_stop;
1405 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1406 struct buffer_head bh;
1407 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1408 u64 size;
1409 int rc;
1410
1411 lblock_stop = i_size_read(jd->jd_inode) >> shift;
1412 size = (lblock_stop - lblock) << shift;
1413 jd->nr_extents = 0;
1414 WARN_ON(!list_empty(&jd->extent_list));
1415
1416 do {
1417 bh.b_state = 0;
1418 bh.b_blocknr = 0;
1419 bh.b_size = size;
1420 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
1421 if (rc || !buffer_mapped(&bh))
1422 goto fail;
1423 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
1424 if (rc)
1425 goto fail;
1426 size -= bh.b_size;
1427 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1428 } while(size > 0);
1429
1430 fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
1431 jd->nr_extents);
1432 return 0;
1433
1434fail:
1435 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
1436 rc, jd->jd_jid,
1437 (unsigned long long)(i_size_read(jd->jd_inode) - size),
1438 jd->nr_extents);
1439 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
1440 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
1441 bh.b_state, (unsigned long long)bh.b_size);
1442 gfs2_free_journal_extents(jd);
1443 return rc;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1456 unsigned int len)
1457{
1458 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1459 struct buffer_head bh;
1460 unsigned int shift;
1461 u64 lblock, lblock_stop, size;
1462 u64 end_of_file;
1463
1464 if (!len)
1465 return 0;
1466
1467 if (gfs2_is_stuffed(ip)) {
1468 if (offset + len >
1469 sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
1470 return 1;
1471 return 0;
1472 }
1473
1474 shift = sdp->sd_sb.sb_bsize_shift;
1475 BUG_ON(gfs2_is_dir(ip));
1476 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1477 lblock = offset >> shift;
1478 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1479 if (lblock_stop > end_of_file)
1480 return 1;
1481
1482 size = (lblock_stop - lblock) << shift;
1483 do {
1484 bh.b_state = 0;
1485 bh.b_size = size;
1486 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1487 if (!buffer_mapped(&bh))
1488 return 1;
1489 size -= bh.b_size;
1490 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1491 } while(size > 0);
1492
1493 return 0;
1494}
1495
1496