1
2
3
4
5
6
7
8
9
10#include <linux/spinlock.h>
11#include <linux/completion.h>
12#include <linux/buffer_head.h>
13#include <linux/gfs2_ondisk.h>
14#include <linux/bio.h>
15#include <linux/posix_acl.h>
16#include <linux/security.h>
17
18#include "gfs2.h"
19#include "incore.h"
20#include "bmap.h"
21#include "glock.h"
22#include "glops.h"
23#include "inode.h"
24#include "log.h"
25#include "meta_io.h"
26#include "recovery.h"
27#include "rgrp.h"
28#include "util.h"
29#include "trans.h"
30#include "dir.h"
31
32static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
33{
34 fs_err(gl->gl_name.ln_sbd,
35 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
36 "state 0x%lx\n",
37 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
38 bh->b_page->mapping, bh->b_page->flags);
39 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
40 gl->gl_name.ln_type, gl->gl_name.ln_number,
41 gfs2_glock2aspace(gl));
42 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
43}
44
45
46
47
48
49
50
51
52
53static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
54 unsigned int nr_revokes)
55{
56 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
57 struct list_head *head = &gl->gl_ail_list;
58 struct gfs2_bufdata *bd, *tmp;
59 struct buffer_head *bh;
60 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
61
62 gfs2_log_lock(sdp);
63 spin_lock(&sdp->sd_ail_lock);
64 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
65 if (nr_revokes == 0)
66 break;
67 bh = bd->bd_bh;
68 if (bh->b_state & b_state) {
69 if (fsync)
70 continue;
71 gfs2_ail_error(gl, bh);
72 }
73 gfs2_trans_add_revoke(sdp, bd);
74 nr_revokes--;
75 }
76 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
77 spin_unlock(&sdp->sd_ail_lock);
78 gfs2_log_unlock(sdp);
79}
80
81
82static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
83{
84 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
85 struct gfs2_trans tr;
86
87 memset(&tr, 0, sizeof(tr));
88 INIT_LIST_HEAD(&tr.tr_buf);
89 INIT_LIST_HEAD(&tr.tr_databuf);
90 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
91
92 if (!tr.tr_revokes)
93 return;
94
95
96 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
97 tr.tr_ip = (unsigned long)__builtin_return_address(0);
98 sb_start_intwrite(sdp->sd_vfs);
99 gfs2_log_reserve(sdp, tr.tr_reserved);
100 WARN_ON_ONCE(current->journal_info);
101 current->journal_info = &tr;
102
103 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
104
105 gfs2_trans_end(sdp);
106 gfs2_log_flush(sdp, NULL);
107}
108
109void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
110{
111 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
112 unsigned int revokes = atomic_read(&gl->gl_ail_count);
113 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
114 int ret;
115
116 if (!revokes)
117 return;
118
119 while (revokes > max_revokes)
120 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
121
122 ret = gfs2_trans_begin(sdp, 0, max_revokes);
123 if (ret)
124 return;
125 __gfs2_ail_flush(gl, fsync, max_revokes);
126 gfs2_trans_end(sdp);
127 gfs2_log_flush(sdp, NULL);
128}
129
130
131
132
133
134
135
136
137
138
139static void rgrp_go_sync(struct gfs2_glock *gl)
140{
141 struct address_space *metamapping = gfs2_glock2aspace(gl);
142 struct gfs2_rgrpd *rgd;
143 int error;
144
145 spin_lock(&gl->gl_lockref.lock);
146 rgd = gl->gl_object;
147 if (rgd)
148 gfs2_rgrp_brelse(rgd);
149 spin_unlock(&gl->gl_lockref.lock);
150
151 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
152 return;
153 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
154
155 gfs2_log_flush(gl->gl_name.ln_sbd, gl);
156 filemap_fdatawrite(metamapping);
157 error = filemap_fdatawait(metamapping);
158 mapping_set_error(metamapping, error);
159 gfs2_ail_empty_gl(gl);
160
161 spin_lock(&gl->gl_lockref.lock);
162 rgd = gl->gl_object;
163 if (rgd)
164 gfs2_free_clones(rgd);
165 spin_unlock(&gl->gl_lockref.lock);
166}
167
168
169
170
171
172
173
174
175
176
177
178static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
179{
180 struct address_space *mapping = gfs2_glock2aspace(gl);
181 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
182
183 if (rgd)
184 gfs2_rgrp_brelse(rgd);
185
186 WARN_ON_ONCE(!(flags & DIO_METADATA));
187 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
188 truncate_inode_pages(mapping, 0);
189
190 if (rgd)
191 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
192}
193
194static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
195{
196 struct gfs2_inode *ip;
197
198 spin_lock(&gl->gl_lockref.lock);
199 ip = gl->gl_object;
200 if (ip)
201 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
202 spin_unlock(&gl->gl_lockref.lock);
203 return ip;
204}
205
206struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
207{
208 struct gfs2_rgrpd *rgd;
209
210 spin_lock(&gl->gl_lockref.lock);
211 rgd = gl->gl_object;
212 spin_unlock(&gl->gl_lockref.lock);
213
214 return rgd;
215}
216
217static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
218{
219 if (!ip)
220 return;
221
222 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
223 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
224}
225
226
227
228
229
230
231
232static void inode_go_sync(struct gfs2_glock *gl)
233{
234 struct gfs2_inode *ip = gfs2_glock2inode(gl);
235 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
236 struct address_space *metamapping = gfs2_glock2aspace(gl);
237 int error;
238
239 if (isreg) {
240 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
241 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
242 inode_dio_wait(&ip->i_inode);
243 }
244 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
245 goto out;
246
247 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
248
249 gfs2_log_flush(gl->gl_name.ln_sbd, gl);
250 filemap_fdatawrite(metamapping);
251 if (isreg) {
252 struct address_space *mapping = ip->i_inode.i_mapping;
253 filemap_fdatawrite(mapping);
254 error = filemap_fdatawait(mapping);
255 mapping_set_error(mapping, error);
256 }
257 error = filemap_fdatawait(metamapping);
258 mapping_set_error(metamapping, error);
259 gfs2_ail_empty_gl(gl);
260
261
262
263
264 smp_mb__before_clear_bit();
265 clear_bit(GLF_DIRTY, &gl->gl_flags);
266
267out:
268 gfs2_clear_glop_pending(ip);
269}
270
271
272
273
274
275
276
277
278
279
280
281
282static void inode_go_inval(struct gfs2_glock *gl, int flags)
283{
284 struct gfs2_inode *ip = gfs2_glock2inode(gl);
285
286 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
287
288 if (flags & DIO_METADATA) {
289 struct address_space *mapping = gfs2_glock2aspace(gl);
290 truncate_inode_pages(mapping, 0);
291 if (ip) {
292 set_bit(GIF_INVALID, &ip->i_flags);
293 forget_all_cached_acls(&ip->i_inode);
294 security_inode_invalidate_secctx(&ip->i_inode);
295 gfs2_dir_hash_inval(ip);
296 }
297 }
298
299 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
300 gfs2_log_flush(gl->gl_name.ln_sbd, NULL);
301 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
302 }
303 if (ip && S_ISREG(ip->i_inode.i_mode))
304 truncate_inode_pages(ip->i_inode.i_mapping, 0);
305
306 gfs2_clear_glop_pending(ip);
307}
308
309
310
311
312
313
314
315
316static int inode_go_demote_ok(const struct gfs2_glock *gl)
317{
318 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
319 struct gfs2_holder *gh;
320
321 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
322 return 0;
323
324 if (!list_empty(&gl->gl_holders)) {
325 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
326 if (gh->gh_list.next != &gl->gl_holders)
327 return 0;
328 }
329
330 return 1;
331}
332
333static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
334{
335 const struct gfs2_dinode *str = buf;
336 struct timespec atime;
337 u16 height, depth;
338
339 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
340 goto corrupt;
341 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
342 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
343 ip->i_inode.i_rdev = 0;
344 switch (ip->i_inode.i_mode & S_IFMT) {
345 case S_IFBLK:
346 case S_IFCHR:
347 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
348 be32_to_cpu(str->di_minor));
349 break;
350 };
351
352 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
353 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
354 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
355 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
356 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
357 atime.tv_sec = be64_to_cpu(str->di_atime);
358 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
359 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
360 ip->i_inode.i_atime = atime;
361 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
362 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
363 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
364 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
365
366 ip->i_goal = be64_to_cpu(str->di_goal_meta);
367 ip->i_generation = be64_to_cpu(str->di_generation);
368
369 ip->i_diskflags = be32_to_cpu(str->di_flags);
370 ip->i_eattr = be64_to_cpu(str->di_eattr);
371
372 gfs2_set_inode_flags(&ip->i_inode);
373 height = be16_to_cpu(str->di_height);
374 if (unlikely(height > GFS2_MAX_META_HEIGHT))
375 goto corrupt;
376 ip->i_height = (u8)height;
377
378 depth = be16_to_cpu(str->di_depth);
379 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
380 goto corrupt;
381 ip->i_depth = (u8)depth;
382 ip->i_entries = be32_to_cpu(str->di_entries);
383
384 if (S_ISREG(ip->i_inode.i_mode))
385 gfs2_set_aops(&ip->i_inode);
386
387 return 0;
388corrupt:
389 gfs2_consist_inode(ip);
390 return -EIO;
391}
392
393
394
395
396
397
398
399
400int gfs2_inode_refresh(struct gfs2_inode *ip)
401{
402 struct buffer_head *dibh;
403 int error;
404
405 error = gfs2_meta_inode_buffer(ip, &dibh);
406 if (error)
407 return error;
408
409 error = gfs2_dinode_in(ip, dibh->b_data);
410 brelse(dibh);
411 clear_bit(GIF_INVALID, &ip->i_flags);
412
413 return error;
414}
415
416
417
418
419
420
421
422
423
424static int inode_go_lock(struct gfs2_holder *gh)
425{
426 struct gfs2_glock *gl = gh->gh_gl;
427 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
428 struct gfs2_inode *ip = gl->gl_object;
429 int error = 0;
430
431 if (!ip || (gh->gh_flags & GL_SKIP))
432 return 0;
433
434 if (test_bit(GIF_INVALID, &ip->i_flags)) {
435 error = gfs2_inode_refresh(ip);
436 if (error)
437 return error;
438 }
439
440 if (gh->gh_state != LM_ST_DEFERRED)
441 inode_dio_wait(&ip->i_inode);
442
443 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
444 (gl->gl_state == LM_ST_EXCLUSIVE) &&
445 (gh->gh_state == LM_ST_EXCLUSIVE)) {
446 spin_lock(&sdp->sd_trunc_lock);
447 if (list_empty(&ip->i_trunc_list))
448 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
449 spin_unlock(&sdp->sd_trunc_lock);
450 wake_up(&sdp->sd_quota_wait);
451 return 1;
452 }
453
454 return error;
455}
456
457
458
459
460
461
462
463
464
465static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
466{
467 const struct gfs2_inode *ip = gl->gl_object;
468 if (ip == NULL)
469 return 0;
470 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
471 (unsigned long long)ip->i_no_formal_ino,
472 (unsigned long long)ip->i_no_addr,
473 IF2DT(ip->i_inode.i_mode), ip->i_flags,
474 (unsigned int)ip->i_diskflags,
475 (unsigned long long)i_size_read(&ip->i_inode));
476 return 0;
477}
478
479
480
481
482
483
484
485
486
487static void trans_go_sync(struct gfs2_glock *gl)
488{
489 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
490
491 if (gl->gl_state != LM_ST_UNLOCKED &&
492 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
493 gfs2_meta_syncfs(sdp);
494 gfs2_log_shutdown(sdp);
495 }
496}
497
498
499
500
501
502
503
504static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
505{
506 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
507 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
508 struct gfs2_glock *j_gl = ip->i_gl;
509 struct gfs2_log_header_host head;
510 int error;
511
512 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
513 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
514
515 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
516 if (error)
517 gfs2_consist(sdp);
518 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
519 gfs2_consist(sdp);
520
521
522 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
523 sdp->sd_log_sequence = head.lh_sequence + 1;
524 gfs2_log_pointers_init(sdp, head.lh_blkno);
525 }
526 }
527 return 0;
528}
529
530
531
532
533
534
535
536
537static int trans_go_demote_ok(const struct gfs2_glock *gl)
538{
539 return 0;
540}
541
542
543
544
545
546
547
548static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
549{
550 struct gfs2_inode *ip = gl->gl_object;
551 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
552
553 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
554 return;
555
556 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
557 gl->gl_state == LM_ST_SHARED && ip) {
558 gl->gl_lockref.count++;
559 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
560 gl->gl_lockref.count--;
561 }
562}
563
564const struct gfs2_glock_operations gfs2_meta_glops = {
565 .go_type = LM_TYPE_META,
566};
567
568const struct gfs2_glock_operations gfs2_inode_glops = {
569 .go_sync = inode_go_sync,
570 .go_inval = inode_go_inval,
571 .go_demote_ok = inode_go_demote_ok,
572 .go_lock = inode_go_lock,
573 .go_dump = inode_go_dump,
574 .go_type = LM_TYPE_INODE,
575 .go_flags = GLOF_ASPACE | GLOF_LRU,
576};
577
578const struct gfs2_glock_operations gfs2_rgrp_glops = {
579 .go_sync = rgrp_go_sync,
580 .go_inval = rgrp_go_inval,
581 .go_lock = gfs2_rgrp_go_lock,
582 .go_unlock = gfs2_rgrp_go_unlock,
583 .go_dump = gfs2_rgrp_dump,
584 .go_type = LM_TYPE_RGRP,
585 .go_flags = GLOF_ASPACE | GLOF_LVB,
586};
587
588const struct gfs2_glock_operations gfs2_trans_glops = {
589 .go_sync = trans_go_sync,
590 .go_xmote_bh = trans_go_xmote_bh,
591 .go_demote_ok = trans_go_demote_ok,
592 .go_type = LM_TYPE_NONDISK,
593};
594
595const struct gfs2_glock_operations gfs2_iopen_glops = {
596 .go_type = LM_TYPE_IOPEN,
597 .go_callback = iopen_go_callback,
598 .go_flags = GLOF_LRU,
599};
600
601const struct gfs2_glock_operations gfs2_flock_glops = {
602 .go_type = LM_TYPE_FLOCK,
603 .go_flags = GLOF_LRU,
604};
605
606const struct gfs2_glock_operations gfs2_nondisk_glops = {
607 .go_type = LM_TYPE_NONDISK,
608};
609
610const struct gfs2_glock_operations gfs2_quota_glops = {
611 .go_type = LM_TYPE_QUOTA,
612 .go_flags = GLOF_LVB | GLOF_LRU,
613};
614
615const struct gfs2_glock_operations gfs2_journal_glops = {
616 .go_type = LM_TYPE_JOURNAL,
617};
618
619const struct gfs2_glock_operations *gfs2_glops_list[] = {
620 [LM_TYPE_META] = &gfs2_meta_glops,
621 [LM_TYPE_INODE] = &gfs2_inode_glops,
622 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
623 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
624 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
625 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
626 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
627 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
628};
629
630