1
2
3
4
5
6
7#include <linux/spinlock.h>
8#include <linux/completion.h>
9#include <linux/buffer_head.h>
10#include <linux/gfs2_ondisk.h>
11#include <linux/bio.h>
12#include <linux/posix_acl.h>
13#include <linux/security.h>
14
15#include "gfs2.h"
16#include "incore.h"
17#include "bmap.h"
18#include "glock.h"
19#include "glops.h"
20#include "inode.h"
21#include "log.h"
22#include "meta_io.h"
23#include "recovery.h"
24#include "rgrp.h"
25#include "util.h"
26#include "trans.h"
27#include "dir.h"
28#include "lops.h"
29
30struct workqueue_struct *gfs2_freeze_wq;
31
32extern struct workqueue_struct *gfs2_control_wq;
33
34static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35{
36 fs_err(gl->gl_name.ln_sbd,
37 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38 "state 0x%lx\n",
39 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40 bh->b_page->mapping, bh->b_page->flags);
41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42 gl->gl_name.ln_type, gl->gl_name.ln_number,
43 gfs2_glock2aspace(gl));
44 gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45 gfs2_withdraw(gl->gl_name.ln_sbd);
46}
47
48
49
50
51
52
53
54
55
56static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
57 unsigned int nr_revokes)
58{
59 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
60 struct list_head *head = &gl->gl_ail_list;
61 struct gfs2_bufdata *bd, *tmp;
62 struct buffer_head *bh;
63 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
64
65 gfs2_log_lock(sdp);
66 spin_lock(&sdp->sd_ail_lock);
67 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
68 if (nr_revokes == 0)
69 break;
70 bh = bd->bd_bh;
71 if (bh->b_state & b_state) {
72 if (fsync)
73 continue;
74 gfs2_ail_error(gl, bh);
75 }
76 gfs2_trans_add_revoke(sdp, bd);
77 nr_revokes--;
78 }
79 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
80 spin_unlock(&sdp->sd_ail_lock);
81 gfs2_log_unlock(sdp);
82}
83
84
85static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
86{
87 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
88 struct gfs2_trans tr;
89 int ret;
90
91 memset(&tr, 0, sizeof(tr));
92 INIT_LIST_HEAD(&tr.tr_buf);
93 INIT_LIST_HEAD(&tr.tr_databuf);
94 INIT_LIST_HEAD(&tr.tr_ail1_list);
95 INIT_LIST_HEAD(&tr.tr_ail2_list);
96 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
97
98 if (!tr.tr_revokes) {
99 bool have_revokes;
100 bool log_in_flight;
101
102
103
104
105
106
107
108
109
110
111
112
113
114 gfs2_log_lock(sdp);
115 have_revokes = !list_empty(&sdp->sd_log_revokes);
116 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
117 gfs2_log_unlock(sdp);
118 if (have_revokes)
119 goto flush;
120 if (log_in_flight)
121 log_flush_wait(sdp);
122 return 0;
123 }
124
125
126
127
128 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
129 tr.tr_ip = _RET_IP_;
130 ret = gfs2_log_reserve(sdp, tr.tr_reserved);
131 if (ret < 0)
132 return ret;
133 WARN_ON_ONCE(current->journal_info);
134 current->journal_info = &tr;
135
136 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
137
138 gfs2_trans_end(sdp);
139flush:
140 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
141 GFS2_LFC_AIL_EMPTY_GL);
142 return 0;
143}
144
145void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
146{
147 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
148 unsigned int revokes = atomic_read(&gl->gl_ail_count);
149 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
150 int ret;
151
152 if (!revokes)
153 return;
154
155 while (revokes > max_revokes)
156 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
157
158 ret = gfs2_trans_begin(sdp, 0, max_revokes);
159 if (ret)
160 return;
161 __gfs2_ail_flush(gl, fsync, max_revokes);
162 gfs2_trans_end(sdp);
163 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
164 GFS2_LFC_AIL_FLUSH);
165}
166
167
168
169
170
171
172
173static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
174{
175 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
176 struct address_space *metamapping = &sdp->sd_aspace;
177 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
178 const unsigned bsize = sdp->sd_sb.sb_bsize;
179 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
180 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
181 int error;
182
183 filemap_fdatawrite_range(metamapping, start, end);
184 error = filemap_fdatawait_range(metamapping, start, end);
185 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
186 mapping_set_error(metamapping, error);
187 if (error)
188 gfs2_io_error(sdp);
189 return error;
190}
191
192
193
194
195
196
197
198
199
200
201static int rgrp_go_sync(struct gfs2_glock *gl)
202{
203 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
204 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
205 int error;
206
207 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
208 return 0;
209 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
210
211 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
212 GFS2_LFC_RGRP_GO_SYNC);
213 error = gfs2_rgrp_metasync(gl);
214 if (!error)
215 error = gfs2_ail_empty_gl(gl);
216 gfs2_free_clones(rgd);
217 return error;
218}
219
220
221
222
223
224
225
226
227
228
229
230static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
231{
232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
233 struct address_space *mapping = &sdp->sd_aspace;
234 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
235 const unsigned bsize = sdp->sd_sb.sb_bsize;
236 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
237 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
238
239 gfs2_rgrp_brelse(rgd);
240 WARN_ON_ONCE(!(flags & DIO_METADATA));
241 truncate_inode_pages_range(mapping, start, end);
242 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
243}
244
245static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
246 const char *fs_id_buf)
247{
248 struct gfs2_rgrpd *rgd = gl->gl_object;
249
250 if (rgd)
251 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
252}
253
254static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
255{
256 struct gfs2_inode *ip;
257
258 spin_lock(&gl->gl_lockref.lock);
259 ip = gl->gl_object;
260 if (ip)
261 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
262 spin_unlock(&gl->gl_lockref.lock);
263 return ip;
264}
265
266struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
267{
268 struct gfs2_rgrpd *rgd;
269
270 spin_lock(&gl->gl_lockref.lock);
271 rgd = gl->gl_object;
272 spin_unlock(&gl->gl_lockref.lock);
273
274 return rgd;
275}
276
277static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
278{
279 if (!ip)
280 return;
281
282 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
283 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
284}
285
286
287
288
289
290
291int gfs2_inode_metasync(struct gfs2_glock *gl)
292{
293 struct address_space *metamapping = gfs2_glock2aspace(gl);
294 int error;
295
296 filemap_fdatawrite(metamapping);
297 error = filemap_fdatawait(metamapping);
298 if (error)
299 gfs2_io_error(gl->gl_name.ln_sbd);
300 return error;
301}
302
303
304
305
306
307
308
309static int inode_go_sync(struct gfs2_glock *gl)
310{
311 struct gfs2_inode *ip = gfs2_glock2inode(gl);
312 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
313 struct address_space *metamapping = gfs2_glock2aspace(gl);
314 int error = 0, ret;
315
316 if (isreg) {
317 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
318 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
319 inode_dio_wait(&ip->i_inode);
320 }
321 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
322 goto out;
323
324 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
325
326 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
327 GFS2_LFC_INODE_GO_SYNC);
328 filemap_fdatawrite(metamapping);
329 if (isreg) {
330 struct address_space *mapping = ip->i_inode.i_mapping;
331 filemap_fdatawrite(mapping);
332 error = filemap_fdatawait(mapping);
333 mapping_set_error(mapping, error);
334 }
335 ret = gfs2_inode_metasync(gl);
336 if (!error)
337 error = ret;
338 gfs2_ail_empty_gl(gl);
339
340
341
342
343 smp_mb__before_atomic();
344 clear_bit(GLF_DIRTY, &gl->gl_flags);
345
346out:
347 gfs2_clear_glop_pending(ip);
348 return error;
349}
350
351
352
353
354
355
356
357
358
359
360
361
362static void inode_go_inval(struct gfs2_glock *gl, int flags)
363{
364 struct gfs2_inode *ip = gfs2_glock2inode(gl);
365
366 if (flags & DIO_METADATA) {
367 struct address_space *mapping = gfs2_glock2aspace(gl);
368 truncate_inode_pages(mapping, 0);
369 if (ip) {
370 set_bit(GIF_INVALID, &ip->i_flags);
371 forget_all_cached_acls(&ip->i_inode);
372 security_inode_invalidate_secctx(&ip->i_inode);
373 gfs2_dir_hash_inval(ip);
374 }
375 }
376
377 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
378 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
379 GFS2_LOG_HEAD_FLUSH_NORMAL |
380 GFS2_LFC_INODE_GO_INVAL);
381 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
382 }
383 if (ip && S_ISREG(ip->i_inode.i_mode))
384 truncate_inode_pages(ip->i_inode.i_mapping, 0);
385
386 gfs2_clear_glop_pending(ip);
387}
388
389
390
391
392
393
394
395
396static int inode_go_demote_ok(const struct gfs2_glock *gl)
397{
398 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
399
400 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
401 return 0;
402
403 return 1;
404}
405
406static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
407{
408 const struct gfs2_dinode *str = buf;
409 struct timespec64 atime;
410 u16 height, depth;
411
412 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
413 goto corrupt;
414 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
415 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
416 ip->i_inode.i_rdev = 0;
417 switch (ip->i_inode.i_mode & S_IFMT) {
418 case S_IFBLK:
419 case S_IFCHR:
420 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
421 be32_to_cpu(str->di_minor));
422 break;
423 }
424
425 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
426 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
427 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
428 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
429 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
430 atime.tv_sec = be64_to_cpu(str->di_atime);
431 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
432 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
433 ip->i_inode.i_atime = atime;
434 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
435 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
436 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
437 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
438
439 ip->i_goal = be64_to_cpu(str->di_goal_meta);
440 ip->i_generation = be64_to_cpu(str->di_generation);
441
442 ip->i_diskflags = be32_to_cpu(str->di_flags);
443 ip->i_eattr = be64_to_cpu(str->di_eattr);
444
445 gfs2_set_inode_flags(&ip->i_inode);
446 height = be16_to_cpu(str->di_height);
447 if (unlikely(height > GFS2_MAX_META_HEIGHT))
448 goto corrupt;
449 ip->i_height = (u8)height;
450
451 depth = be16_to_cpu(str->di_depth);
452 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
453 goto corrupt;
454 ip->i_depth = (u8)depth;
455 ip->i_entries = be32_to_cpu(str->di_entries);
456
457 if (S_ISREG(ip->i_inode.i_mode))
458 gfs2_set_aops(&ip->i_inode);
459
460 return 0;
461corrupt:
462 gfs2_consist_inode(ip);
463 return -EIO;
464}
465
466
467
468
469
470
471
472
473int gfs2_inode_refresh(struct gfs2_inode *ip)
474{
475 struct buffer_head *dibh;
476 int error;
477
478 error = gfs2_meta_inode_buffer(ip, &dibh);
479 if (error)
480 return error;
481
482 error = gfs2_dinode_in(ip, dibh->b_data);
483 brelse(dibh);
484 clear_bit(GIF_INVALID, &ip->i_flags);
485
486 return error;
487}
488
489
490
491
492
493
494
495
496
497static int inode_go_lock(struct gfs2_holder *gh)
498{
499 struct gfs2_glock *gl = gh->gh_gl;
500 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
501 struct gfs2_inode *ip = gl->gl_object;
502 int error = 0;
503
504 if (!ip || (gh->gh_flags & GL_SKIP))
505 return 0;
506
507 if (test_bit(GIF_INVALID, &ip->i_flags)) {
508 error = gfs2_inode_refresh(ip);
509 if (error)
510 return error;
511 }
512
513 if (gh->gh_state != LM_ST_DEFERRED)
514 inode_dio_wait(&ip->i_inode);
515
516 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
517 (gl->gl_state == LM_ST_EXCLUSIVE) &&
518 (gh->gh_state == LM_ST_EXCLUSIVE)) {
519 spin_lock(&sdp->sd_trunc_lock);
520 if (list_empty(&ip->i_trunc_list))
521 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
522 spin_unlock(&sdp->sd_trunc_lock);
523 wake_up(&sdp->sd_quota_wait);
524 return 1;
525 }
526
527 return error;
528}
529
530
531
532
533
534
535
536
537
538static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
539 const char *fs_id_buf)
540{
541 struct gfs2_inode *ip = gl->gl_object;
542 struct inode *inode = &ip->i_inode;
543 unsigned long nrpages;
544
545 if (ip == NULL)
546 return;
547
548 xa_lock_irq(&inode->i_data.i_pages);
549 nrpages = inode->i_data.nrpages;
550 xa_unlock_irq(&inode->i_data.i_pages);
551
552 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
553 "p:%lu\n", fs_id_buf,
554 (unsigned long long)ip->i_no_formal_ino,
555 (unsigned long long)ip->i_no_addr,
556 IF2DT(ip->i_inode.i_mode), ip->i_flags,
557 (unsigned int)ip->i_diskflags,
558 (unsigned long long)i_size_read(inode), nrpages);
559}
560
561
562
563
564
565
566
567
568
569static int freeze_go_sync(struct gfs2_glock *gl)
570{
571 int error = 0;
572 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
573
574
575
576
577
578
579
580
581
582
583
584
585 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
586 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
587 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
588 error = freeze_super(sdp->sd_vfs);
589 if (error) {
590 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
591 error);
592 if (gfs2_withdrawn(sdp)) {
593 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
594 return 0;
595 }
596 gfs2_assert_withdraw(sdp, 0);
597 }
598 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
599 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
600 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
601 GFS2_LFC_FREEZE_GO_SYNC);
602 else
603 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
604 }
605 return 0;
606}
607
608
609
610
611
612
613
614static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
615{
616 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
617 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
618 struct gfs2_glock *j_gl = ip->i_gl;
619 struct gfs2_log_header_host head;
620 int error;
621
622 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
623 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
624
625 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
626 if (error)
627 gfs2_consist(sdp);
628 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
629 gfs2_consist(sdp);
630
631
632 if (!gfs2_withdrawn(sdp)) {
633 sdp->sd_log_sequence = head.lh_sequence + 1;
634 gfs2_log_pointers_init(sdp, head.lh_blkno);
635 }
636 }
637 return 0;
638}
639
640
641
642
643
644
645
646
647static int freeze_go_demote_ok(const struct gfs2_glock *gl)
648{
649 return 0;
650}
651
652
653
654
655
656
657
658static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
659{
660 struct gfs2_inode *ip = gl->gl_object;
661 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
662
663 if (!remote || sb_rdonly(sdp->sd_vfs))
664 return;
665
666 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
667 gl->gl_state == LM_ST_SHARED && ip) {
668 gl->gl_lockref.count++;
669 if (!queue_delayed_work(gfs2_delete_workqueue,
670 &gl->gl_delete, 0))
671 gl->gl_lockref.count--;
672 }
673}
674
675static int iopen_go_demote_ok(const struct gfs2_glock *gl)
676{
677 return !gfs2_delete_work_queued(gl);
678}
679
680
681
682
683
684
685
686
687
688static void inode_go_free(struct gfs2_glock *gl)
689{
690
691
692 if (!test_bit(GLF_FREEING, &gl->gl_flags))
693 return;
694 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
695 wake_up_bit(&gl->gl_flags, GLF_FREEING);
696}
697
698
699
700
701
702
703
704static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
705{
706 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
707
708
709
710 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
711 return;
712
713
714
715
716 clear_bit(GLF_DEMOTE, &gl->gl_flags);
717 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
718
719
720 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
721 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
722 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
723 return;
724
725
726
727 if (gl->gl_demote_state != LM_ST_UNLOCKED)
728 return;
729
730 if (sdp->sd_args.ar_spectator) {
731 fs_warn(sdp, "Spectator node cannot recover journals.\n");
732 return;
733 }
734
735 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
736 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
737
738
739
740
741
742
743
744 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
745}
746
747const struct gfs2_glock_operations gfs2_meta_glops = {
748 .go_type = LM_TYPE_META,
749 .go_flags = GLOF_NONDISK,
750};
751
752const struct gfs2_glock_operations gfs2_inode_glops = {
753 .go_sync = inode_go_sync,
754 .go_inval = inode_go_inval,
755 .go_demote_ok = inode_go_demote_ok,
756 .go_lock = inode_go_lock,
757 .go_dump = inode_go_dump,
758 .go_type = LM_TYPE_INODE,
759 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
760 .go_free = inode_go_free,
761};
762
763const struct gfs2_glock_operations gfs2_rgrp_glops = {
764 .go_sync = rgrp_go_sync,
765 .go_inval = rgrp_go_inval,
766 .go_lock = gfs2_rgrp_go_lock,
767 .go_dump = gfs2_rgrp_go_dump,
768 .go_type = LM_TYPE_RGRP,
769 .go_flags = GLOF_LVB,
770};
771
772const struct gfs2_glock_operations gfs2_freeze_glops = {
773 .go_sync = freeze_go_sync,
774 .go_xmote_bh = freeze_go_xmote_bh,
775 .go_demote_ok = freeze_go_demote_ok,
776 .go_type = LM_TYPE_NONDISK,
777 .go_flags = GLOF_NONDISK,
778};
779
780const struct gfs2_glock_operations gfs2_iopen_glops = {
781 .go_type = LM_TYPE_IOPEN,
782 .go_callback = iopen_go_callback,
783 .go_demote_ok = iopen_go_demote_ok,
784 .go_flags = GLOF_LRU | GLOF_NONDISK,
785 .go_subclass = 1,
786};
787
788const struct gfs2_glock_operations gfs2_flock_glops = {
789 .go_type = LM_TYPE_FLOCK,
790 .go_flags = GLOF_LRU | GLOF_NONDISK,
791};
792
793const struct gfs2_glock_operations gfs2_nondisk_glops = {
794 .go_type = LM_TYPE_NONDISK,
795 .go_flags = GLOF_NONDISK,
796 .go_callback = nondisk_go_callback,
797};
798
799const struct gfs2_glock_operations gfs2_quota_glops = {
800 .go_type = LM_TYPE_QUOTA,
801 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
802};
803
804const struct gfs2_glock_operations gfs2_journal_glops = {
805 .go_type = LM_TYPE_JOURNAL,
806 .go_flags = GLOF_NONDISK,
807};
808
809const struct gfs2_glock_operations *gfs2_glops_list[] = {
810 [LM_TYPE_META] = &gfs2_meta_glops,
811 [LM_TYPE_INODE] = &gfs2_inode_glops,
812 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
813 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
814 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
815 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
816 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
817 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
818};
819
820