1
2
3
4
5
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
14#include <linux/blkdev.h>
15#include <linux/kthread.h>
16#include <linux/export.h>
17#include <linux/namei.h>
18#include <linux/mount.h>
19#include <linux/gfs2_ondisk.h>
20#include <linux/quotaops.h>
21#include <linux/lockdep.h>
22#include <linux/module.h>
23#include <linux/backing-dev.h>
24#include <linux/fs_parser.h>
25
26#include "gfs2.h"
27#include "incore.h"
28#include "bmap.h"
29#include "glock.h"
30#include "glops.h"
31#include "inode.h"
32#include "recovery.h"
33#include "rgrp.h"
34#include "super.h"
35#include "sys.h"
36#include "util.h"
37#include "log.h"
38#include "quota.h"
39#include "dir.h"
40#include "meta_io.h"
41#include "trace_gfs2.h"
42#include "lops.h"
43
44#define DO 0
45#define UNDO 1
46
47
48
49
50
51
52
53static void gfs2_tune_init(struct gfs2_tune *gt)
54{
55 spin_lock_init(>->gt_spin);
56
57 gt->gt_quota_warn_period = 10;
58 gt->gt_quota_scale_num = 1;
59 gt->gt_quota_scale_den = 1;
60 gt->gt_new_files_jdata = 0;
61 gt->gt_max_readahead = BIT(18);
62 gt->gt_complain_secs = 10;
63}
64
65void free_sbd(struct gfs2_sbd *sdp)
66{
67 if (sdp->sd_lkstats)
68 free_percpu(sdp->sd_lkstats);
69 kfree(sdp);
70}
71
72static struct gfs2_sbd *init_sbd(struct super_block *sb)
73{
74 struct gfs2_sbd *sdp;
75 struct address_space *mapping;
76
77 sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
78 if (!sdp)
79 return NULL;
80
81 sdp->sd_vfs = sb;
82 sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
83 if (!sdp->sd_lkstats)
84 goto fail;
85 sb->s_fs_info = sdp;
86
87 set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
88 gfs2_tune_init(&sdp->sd_tune);
89
90 init_waitqueue_head(&sdp->sd_glock_wait);
91 init_waitqueue_head(&sdp->sd_async_glock_wait);
92 atomic_set(&sdp->sd_glock_disposal, 0);
93 init_completion(&sdp->sd_locking_init);
94 init_completion(&sdp->sd_wdack);
95 spin_lock_init(&sdp->sd_statfs_spin);
96
97 spin_lock_init(&sdp->sd_rindex_spin);
98 sdp->sd_rindex_tree.rb_node = NULL;
99
100 INIT_LIST_HEAD(&sdp->sd_jindex_list);
101 spin_lock_init(&sdp->sd_jindex_spin);
102 mutex_init(&sdp->sd_jindex_mutex);
103 init_completion(&sdp->sd_journal_ready);
104
105 INIT_LIST_HEAD(&sdp->sd_quota_list);
106 mutex_init(&sdp->sd_quota_mutex);
107 mutex_init(&sdp->sd_quota_sync_mutex);
108 init_waitqueue_head(&sdp->sd_quota_wait);
109 INIT_LIST_HEAD(&sdp->sd_trunc_list);
110 spin_lock_init(&sdp->sd_trunc_lock);
111 spin_lock_init(&sdp->sd_bitmap_lock);
112
113 INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
114
115 mapping = &sdp->sd_aspace;
116
117 address_space_init_once(mapping);
118 mapping->a_ops = &gfs2_rgrp_aops;
119 mapping->host = sb->s_bdev->bd_inode;
120 mapping->flags = 0;
121 mapping_set_gfp_mask(mapping, GFP_NOFS);
122 mapping->private_data = NULL;
123 mapping->writeback_index = 0;
124
125 spin_lock_init(&sdp->sd_log_lock);
126 atomic_set(&sdp->sd_log_pinned, 0);
127 INIT_LIST_HEAD(&sdp->sd_log_revokes);
128 INIT_LIST_HEAD(&sdp->sd_log_ordered);
129 spin_lock_init(&sdp->sd_ordered_lock);
130
131 init_waitqueue_head(&sdp->sd_log_waitq);
132 init_waitqueue_head(&sdp->sd_logd_waitq);
133 spin_lock_init(&sdp->sd_ail_lock);
134 INIT_LIST_HEAD(&sdp->sd_ail1_list);
135 INIT_LIST_HEAD(&sdp->sd_ail2_list);
136
137 init_rwsem(&sdp->sd_log_flush_lock);
138 atomic_set(&sdp->sd_log_in_flight, 0);
139 init_waitqueue_head(&sdp->sd_log_flush_wait);
140 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
141 mutex_init(&sdp->sd_freeze_mutex);
142
143 return sdp;
144
145fail:
146 free_sbd(sdp);
147 return NULL;
148}
149
150
151
152
153
154
155
156
157
158
159
160static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
161{
162 struct gfs2_sb_host *sb = &sdp->sd_sb;
163
164 if (sb->sb_magic != GFS2_MAGIC ||
165 sb->sb_type != GFS2_METATYPE_SB) {
166 if (!silent)
167 pr_warn("not a GFS2 filesystem\n");
168 return -EINVAL;
169 }
170
171 if (sb->sb_fs_format < GFS2_FS_FORMAT_MIN ||
172 sb->sb_fs_format > GFS2_FS_FORMAT_MAX ||
173 sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
174 fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
175 return -EINVAL;
176 }
177
178 if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
179 (sb->sb_bsize & (sb->sb_bsize - 1))) {
180 pr_warn("Invalid block size\n");
181 return -EINVAL;
182 }
183
184 return 0;
185}
186
187static void end_bio_io_page(struct bio *bio)
188{
189 struct page *page = bio->bi_private;
190
191 if (!bio->bi_status)
192 SetPageUptodate(page);
193 else
194 pr_warn("error %d reading superblock\n", bio->bi_status);
195 unlock_page(page);
196}
197
198static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
199{
200 struct gfs2_sb_host *sb = &sdp->sd_sb;
201 struct super_block *s = sdp->sd_vfs;
202 const struct gfs2_sb *str = buf;
203
204 sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
205 sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
206 sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
207 sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
208 sb->sb_bsize = be32_to_cpu(str->sb_bsize);
209 sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
210 sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
211 sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
212 sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
213 sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
214
215 memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
216 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
217 memcpy(&s->s_uuid, str->sb_uuid, 16);
218}
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
240{
241 struct super_block *sb = sdp->sd_vfs;
242 struct gfs2_sb *p;
243 struct page *page;
244 struct bio *bio;
245
246 page = alloc_page(GFP_NOFS);
247 if (unlikely(!page))
248 return -ENOMEM;
249
250 ClearPageUptodate(page);
251 ClearPageDirty(page);
252 lock_page(page);
253
254 bio = bio_alloc(GFP_NOFS, 1);
255 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
256 bio_set_dev(bio, sb->s_bdev);
257 bio_add_page(bio, page, PAGE_SIZE, 0);
258
259 bio->bi_end_io = end_bio_io_page;
260 bio->bi_private = page;
261 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
262 submit_bio(bio);
263 wait_on_page_locked(page);
264 bio_put(bio);
265 if (!PageUptodate(page)) {
266 __free_page(page);
267 return -EIO;
268 }
269 p = kmap(page);
270 gfs2_sb_in(sdp, p);
271 kunmap(page);
272 __free_page(page);
273 return gfs2_check_sb(sdp, silent);
274}
275
276
277
278
279
280
281
282
283static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
284{
285 u32 hash_blocks, ind_blocks, leaf_blocks;
286 u32 tmp_blocks;
287 unsigned int x;
288 int error;
289
290 error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
291 if (error) {
292 if (!silent)
293 fs_err(sdp, "can't read superblock\n");
294 return error;
295 }
296
297 sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
298 GFS2_BASIC_BLOCK_SHIFT;
299 sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
300 sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
301 sizeof(struct gfs2_dinode)) / sizeof(u64);
302 sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
303 sizeof(struct gfs2_meta_header)) / sizeof(u64);
304 sdp->sd_ldptrs = (sdp->sd_sb.sb_bsize -
305 sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
306 sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
307 sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
308 sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
309 sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
310 sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
311 sizeof(struct gfs2_meta_header)) /
312 sizeof(struct gfs2_quota_change);
313 sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
314 sizeof(struct gfs2_meta_header))
315 * GFS2_NBBY;
316
317
318
319
320
321
322 atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
323
324
325
326 hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
327 sdp->sd_jbsize);
328
329 ind_blocks = 0;
330 for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
331 tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
332 ind_blocks += tmp_blocks;
333 }
334
335 leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
336
337 sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
338
339 sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
340 sizeof(struct gfs2_dinode);
341 sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
342 for (x = 2;; x++) {
343 u64 space, d;
344 u32 m;
345
346 space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
347 d = space;
348 m = do_div(d, sdp->sd_inptrs);
349
350 if (d != sdp->sd_heightsize[x - 1] || m)
351 break;
352 sdp->sd_heightsize[x] = space;
353 }
354 sdp->sd_max_height = x;
355 sdp->sd_heightsize[x] = ~0;
356 gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
357
358 sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize -
359 sizeof(struct gfs2_leaf)) /
360 GFS2_MIN_DIRENT_SIZE;
361 return 0;
362}
363
364static int init_names(struct gfs2_sbd *sdp, int silent)
365{
366 char *proto, *table;
367 int error = 0;
368
369 proto = sdp->sd_args.ar_lockproto;
370 table = sdp->sd_args.ar_locktable;
371
372
373
374 if (!proto[0] || !table[0]) {
375 error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
376 if (error)
377 return error;
378
379 if (!proto[0])
380 proto = sdp->sd_sb.sb_lockproto;
381 if (!table[0])
382 table = sdp->sd_sb.sb_locktable;
383 }
384
385 if (!table[0])
386 table = sdp->sd_vfs->s_id;
387
388 strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
389 strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
390
391 table = sdp->sd_table_name;
392 while ((table = strchr(table, '/')))
393 *table = '_';
394
395 return error;
396}
397
398static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
399 int undo)
400{
401 int error = 0;
402
403 if (undo)
404 goto fail_trans;
405
406 error = gfs2_glock_nq_num(sdp,
407 GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
408 LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
409 mount_gh);
410 if (error) {
411 fs_err(sdp, "can't acquire mount glock: %d\n", error);
412 goto fail;
413 }
414
415 error = gfs2_glock_nq_num(sdp,
416 GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
417 LM_ST_SHARED,
418 LM_FLAG_NOEXP | GL_EXACT,
419 &sdp->sd_live_gh);
420 if (error) {
421 fs_err(sdp, "can't acquire live glock: %d\n", error);
422 goto fail_mount;
423 }
424
425 error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
426 CREATE, &sdp->sd_rename_gl);
427 if (error) {
428 fs_err(sdp, "can't create rename glock: %d\n", error);
429 goto fail_live;
430 }
431
432 error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
433 CREATE, &sdp->sd_freeze_gl);
434 if (error) {
435 fs_err(sdp, "can't create transaction glock: %d\n", error);
436 goto fail_rename;
437 }
438
439 return 0;
440
441fail_trans:
442 gfs2_glock_put(sdp->sd_freeze_gl);
443fail_rename:
444 gfs2_glock_put(sdp->sd_rename_gl);
445fail_live:
446 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
447fail_mount:
448 gfs2_glock_dq_uninit(mount_gh);
449fail:
450 return error;
451}
452
453static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
454 u64 no_addr, const char *name)
455{
456 struct gfs2_sbd *sdp = sb->s_fs_info;
457 struct dentry *dentry;
458 struct inode *inode;
459
460 inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
461 GFS2_BLKST_FREE );
462 if (IS_ERR(inode)) {
463 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
464 return PTR_ERR(inode);
465 }
466 dentry = d_make_root(inode);
467 if (!dentry) {
468 fs_err(sdp, "can't alloc %s dentry\n", name);
469 return -ENOMEM;
470 }
471 *dptr = dentry;
472 return 0;
473}
474
475static int init_sb(struct gfs2_sbd *sdp, int silent)
476{
477 struct super_block *sb = sdp->sd_vfs;
478 struct gfs2_holder sb_gh;
479 u64 no_addr;
480 int ret;
481
482 ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
483 LM_ST_SHARED, 0, &sb_gh);
484 if (ret) {
485 fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
486 return ret;
487 }
488
489 ret = gfs2_read_sb(sdp, silent);
490 if (ret) {
491 fs_err(sdp, "can't read superblock: %d\n", ret);
492 goto out;
493 }
494
495 switch(sdp->sd_sb.sb_fs_format) {
496 case GFS2_FS_FORMAT_MAX:
497 sb->s_xattr = gfs2_xattr_handlers_max;
498 break;
499
500 case GFS2_FS_FORMAT_MIN:
501 sb->s_xattr = gfs2_xattr_handlers_min;
502 break;
503
504 default:
505 BUG();
506 }
507
508
509 if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
510 ret = -EINVAL;
511 fs_err(sdp, "FS block size (%u) is too small for device "
512 "block size (%u)\n",
513 sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
514 goto out;
515 }
516 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
517 ret = -EINVAL;
518 fs_err(sdp, "FS block size (%u) is too big for machine "
519 "page size (%u)\n",
520 sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
521 goto out;
522 }
523 sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
524
525
526 no_addr = sdp->sd_sb.sb_root_dir.no_addr;
527 ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
528 if (ret)
529 goto out;
530
531
532 no_addr = sdp->sd_sb.sb_master_dir.no_addr;
533 ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
534 if (ret) {
535 dput(sdp->sd_root_dir);
536 goto out;
537 }
538 sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
539out:
540 gfs2_glock_dq_uninit(&sb_gh);
541 return ret;
542}
543
544static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
545{
546 char *message = "FIRSTMOUNT=Done";
547 char *envp[] = { message, NULL };
548
549 fs_info(sdp, "first mount done, others may mount\n");
550
551 if (sdp->sd_lockstruct.ls_ops->lm_first_done)
552 sdp->sd_lockstruct.ls_ops->lm_first_done(sdp);
553
554 kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
555}
556
557
558
559
560
561
562
563
564
565static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
566{
567 struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
568 struct qstr name;
569 char buf[20];
570 struct gfs2_jdesc *jd;
571 int error;
572
573 name.name = buf;
574
575 mutex_lock(&sdp->sd_jindex_mutex);
576
577 for (;;) {
578 struct gfs2_inode *jip;
579
580 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
581 if (error)
582 break;
583
584 name.len = sprintf(buf, "journal%u", sdp->sd_journals);
585 name.hash = gfs2_disk_hash(name.name, name.len);
586
587 error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
588 if (error == -ENOENT) {
589 error = 0;
590 break;
591 }
592
593 gfs2_glock_dq_uninit(ji_gh);
594
595 if (error)
596 break;
597
598 error = -ENOMEM;
599 jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
600 if (!jd)
601 break;
602
603 INIT_LIST_HEAD(&jd->extent_list);
604 INIT_LIST_HEAD(&jd->jd_revoke_list);
605
606 INIT_WORK(&jd->jd_work, gfs2_recover_func);
607 jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
608 if (IS_ERR_OR_NULL(jd->jd_inode)) {
609 if (!jd->jd_inode)
610 error = -ENOENT;
611 else
612 error = PTR_ERR(jd->jd_inode);
613 kfree(jd);
614 break;
615 }
616
617 spin_lock(&sdp->sd_jindex_spin);
618 jd->jd_jid = sdp->sd_journals++;
619 jip = GFS2_I(jd->jd_inode);
620 jd->jd_no_addr = jip->i_no_addr;
621 list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
622 spin_unlock(&sdp->sd_jindex_spin);
623 }
624
625 mutex_unlock(&sdp->sd_jindex_mutex);
626
627 return error;
628}
629
630
631
632
633
634
635
636
637
638
639
640static int init_statfs(struct gfs2_sbd *sdp)
641{
642 int error = 0;
643 struct inode *master = d_inode(sdp->sd_master_dir);
644 struct inode *pn = NULL;
645 char buf[30];
646 struct gfs2_jdesc *jd;
647 struct gfs2_inode *ip;
648
649 sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
650 if (IS_ERR(sdp->sd_statfs_inode)) {
651 error = PTR_ERR(sdp->sd_statfs_inode);
652 fs_err(sdp, "can't read in statfs inode: %d\n", error);
653 goto out;
654 }
655 if (sdp->sd_args.ar_spectator)
656 goto out;
657
658 pn = gfs2_lookup_simple(master, "per_node");
659 if (IS_ERR(pn)) {
660 error = PTR_ERR(pn);
661 fs_err(sdp, "can't find per_node directory: %d\n", error);
662 goto put_statfs;
663 }
664
665
666
667 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
668 struct local_statfs_inode *lsi =
669 kmalloc(sizeof(struct local_statfs_inode), GFP_NOFS);
670 if (!lsi) {
671 error = -ENOMEM;
672 goto free_local;
673 }
674 sprintf(buf, "statfs_change%u", jd->jd_jid);
675 lsi->si_sc_inode = gfs2_lookup_simple(pn, buf);
676 if (IS_ERR(lsi->si_sc_inode)) {
677 error = PTR_ERR(lsi->si_sc_inode);
678 fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
679 jd->jd_jid, error);
680 goto free_local;
681 }
682 lsi->si_jid = jd->jd_jid;
683 if (jd->jd_jid == sdp->sd_jdesc->jd_jid)
684 sdp->sd_sc_inode = lsi->si_sc_inode;
685
686 list_add_tail(&lsi->si_list, &sdp->sd_sc_inodes_list);
687 }
688
689 iput(pn);
690 pn = NULL;
691 ip = GFS2_I(sdp->sd_sc_inode);
692 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
693 &sdp->sd_sc_gh);
694 if (error) {
695 fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
696 goto free_local;
697 }
698 return 0;
699
700free_local:
701 free_local_statfs_inodes(sdp);
702 iput(pn);
703put_statfs:
704 iput(sdp->sd_statfs_inode);
705out:
706 return error;
707}
708
709
710static void uninit_statfs(struct gfs2_sbd *sdp)
711{
712 if (!sdp->sd_args.ar_spectator) {
713 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
714 free_local_statfs_inodes(sdp);
715 }
716 iput(sdp->sd_statfs_inode);
717}
718
719static int init_journal(struct gfs2_sbd *sdp, int undo)
720{
721 struct inode *master = d_inode(sdp->sd_master_dir);
722 struct gfs2_holder ji_gh;
723 struct gfs2_inode *ip;
724 int jindex = 1;
725 int error = 0;
726
727 if (undo) {
728 jindex = 0;
729 goto fail_statfs;
730 }
731
732 sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
733 if (IS_ERR(sdp->sd_jindex)) {
734 fs_err(sdp, "can't lookup journal index: %d\n", error);
735 return PTR_ERR(sdp->sd_jindex);
736 }
737
738
739
740 error = gfs2_jindex_hold(sdp, &ji_gh);
741 if (error) {
742 fs_err(sdp, "can't read journal index: %d\n", error);
743 goto fail;
744 }
745
746 error = -EUSERS;
747 if (!gfs2_jindex_size(sdp)) {
748 fs_err(sdp, "no journals!\n");
749 goto fail_jindex;
750 }
751
752 atomic_set(&sdp->sd_log_blks_needed, 0);
753 if (sdp->sd_args.ar_spectator) {
754 sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
755 atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
756 atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
757 atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
758 } else {
759 if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
760 fs_err(sdp, "can't mount journal #%u\n",
761 sdp->sd_lockstruct.ls_jid);
762 fs_err(sdp, "there are only %u journals (0 - %u)\n",
763 gfs2_jindex_size(sdp),
764 gfs2_jindex_size(sdp) - 1);
765 goto fail_jindex;
766 }
767 sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
768
769 error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
770 &gfs2_journal_glops,
771 LM_ST_EXCLUSIVE,
772 LM_FLAG_NOEXP | GL_NOCACHE,
773 &sdp->sd_journal_gh);
774 if (error) {
775 fs_err(sdp, "can't acquire journal glock: %d\n", error);
776 goto fail_jindex;
777 }
778
779 ip = GFS2_I(sdp->sd_jdesc->jd_inode);
780 sdp->sd_jinode_gl = ip->i_gl;
781 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
782 LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
783 &sdp->sd_jinode_gh);
784 if (error) {
785 fs_err(sdp, "can't acquire journal inode glock: %d\n",
786 error);
787 goto fail_journal_gh;
788 }
789
790 error = gfs2_jdesc_check(sdp->sd_jdesc);
791 if (error) {
792 fs_err(sdp, "my journal (%u) is bad: %d\n",
793 sdp->sd_jdesc->jd_jid, error);
794 goto fail_jinode_gh;
795 }
796 atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
797 atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
798 atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
799
800
801 gfs2_map_journal_extents(sdp, sdp->sd_jdesc);
802 }
803 trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
804
805
806 error = init_statfs(sdp);
807 if (error)
808 goto fail_jinode_gh;
809
810 if (sdp->sd_lockstruct.ls_first) {
811 unsigned int x;
812 for (x = 0; x < sdp->sd_journals; x++) {
813 struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
814
815 if (sdp->sd_args.ar_spectator) {
816 error = check_journal_clean(sdp, jd, true);
817 if (error)
818 goto fail_statfs;
819 continue;
820 }
821 error = gfs2_recover_journal(jd, true);
822 if (error) {
823 fs_err(sdp, "error recovering journal %u: %d\n",
824 x, error);
825 goto fail_statfs;
826 }
827 }
828
829 gfs2_others_may_mount(sdp);
830 } else if (!sdp->sd_args.ar_spectator) {
831 error = gfs2_recover_journal(sdp->sd_jdesc, true);
832 if (error) {
833 fs_err(sdp, "error recovering my journal: %d\n", error);
834 goto fail_statfs;
835 }
836 }
837
838 sdp->sd_log_idle = 1;
839 set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
840 gfs2_glock_dq_uninit(&ji_gh);
841 jindex = 0;
842 INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
843 return 0;
844
845fail_statfs:
846 uninit_statfs(sdp);
847fail_jinode_gh:
848
849 if (!sdp->sd_args.ar_spectator &&
850 gfs2_holder_initialized(&sdp->sd_jinode_gh))
851 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
852fail_journal_gh:
853 if (!sdp->sd_args.ar_spectator &&
854 gfs2_holder_initialized(&sdp->sd_journal_gh))
855 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
856fail_jindex:
857 gfs2_jindex_free(sdp);
858 if (jindex)
859 gfs2_glock_dq_uninit(&ji_gh);
860fail:
861 iput(sdp->sd_jindex);
862 return error;
863}
864
865static struct lock_class_key gfs2_quota_imutex_key;
866
867static int init_inodes(struct gfs2_sbd *sdp, int undo)
868{
869 int error = 0;
870 struct inode *master = d_inode(sdp->sd_master_dir);
871
872 if (undo)
873 goto fail_qinode;
874
875 error = init_journal(sdp, undo);
876 complete_all(&sdp->sd_journal_ready);
877 if (error)
878 goto fail;
879
880
881 sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
882 if (IS_ERR(sdp->sd_rindex)) {
883 error = PTR_ERR(sdp->sd_rindex);
884 fs_err(sdp, "can't get resource index inode: %d\n", error);
885 goto fail_journal;
886 }
887 sdp->sd_rindex_uptodate = 0;
888
889
890 sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
891 if (IS_ERR(sdp->sd_quota_inode)) {
892 error = PTR_ERR(sdp->sd_quota_inode);
893 fs_err(sdp, "can't get quota file inode: %d\n", error);
894 goto fail_rindex;
895 }
896
897
898
899
900 lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
901 &gfs2_quota_imutex_key);
902
903 error = gfs2_rindex_update(sdp);
904 if (error)
905 goto fail_qinode;
906
907 return 0;
908
909fail_qinode:
910 iput(sdp->sd_quota_inode);
911fail_rindex:
912 gfs2_clear_rgrpd(sdp);
913 iput(sdp->sd_rindex);
914fail_journal:
915 init_journal(sdp, UNDO);
916fail:
917 return error;
918}
919
920static int init_per_node(struct gfs2_sbd *sdp, int undo)
921{
922 struct inode *pn = NULL;
923 char buf[30];
924 int error = 0;
925 struct gfs2_inode *ip;
926 struct inode *master = d_inode(sdp->sd_master_dir);
927
928 if (sdp->sd_args.ar_spectator)
929 return 0;
930
931 if (undo)
932 goto fail_qc_gh;
933
934 pn = gfs2_lookup_simple(master, "per_node");
935 if (IS_ERR(pn)) {
936 error = PTR_ERR(pn);
937 fs_err(sdp, "can't find per_node directory: %d\n", error);
938 return error;
939 }
940
941 sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
942 sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
943 if (IS_ERR(sdp->sd_qc_inode)) {
944 error = PTR_ERR(sdp->sd_qc_inode);
945 fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
946 goto fail_ut_i;
947 }
948
949 iput(pn);
950 pn = NULL;
951
952 ip = GFS2_I(sdp->sd_qc_inode);
953 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
954 &sdp->sd_qc_gh);
955 if (error) {
956 fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
957 goto fail_qc_i;
958 }
959
960 return 0;
961
962fail_qc_gh:
963 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
964fail_qc_i:
965 iput(sdp->sd_qc_inode);
966fail_ut_i:
967 iput(pn);
968 return error;
969}
970
971static const match_table_t nolock_tokens = {
972 { Opt_jid, "jid=%d", },
973 { Opt_err, NULL },
974};
975
976static const struct lm_lockops nolock_ops = {
977 .lm_proto_name = "lock_nolock",
978 .lm_put_lock = gfs2_glock_free,
979 .lm_tokens = &nolock_tokens,
980};
981
982
983
984
985
986
987
988
989
990static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
991{
992 const struct lm_lockops *lm;
993 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
994 struct gfs2_args *args = &sdp->sd_args;
995 const char *proto = sdp->sd_proto_name;
996 const char *table = sdp->sd_table_name;
997 char *o, *options;
998 int ret;
999
1000 if (!strcmp("lock_nolock", proto)) {
1001 lm = &nolock_ops;
1002 sdp->sd_args.ar_localflocks = 1;
1003#ifdef CONFIG_GFS2_FS_LOCKING_DLM
1004 } else if (!strcmp("lock_dlm", proto)) {
1005 lm = &gfs2_dlm_ops;
1006#endif
1007 } else {
1008 pr_info("can't find protocol %s\n", proto);
1009 return -ENOENT;
1010 }
1011
1012 fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
1013
1014 ls->ls_ops = lm;
1015 ls->ls_first = 1;
1016
1017 for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) {
1018 substring_t tmp[MAX_OPT_ARGS];
1019 int token, option;
1020
1021 if (!o || !*o)
1022 continue;
1023
1024 token = match_token(o, *lm->lm_tokens, tmp);
1025 switch (token) {
1026 case Opt_jid:
1027 ret = match_int(&tmp[0], &option);
1028 if (ret || option < 0)
1029 goto hostdata_error;
1030 if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
1031 ls->ls_jid = option;
1032 break;
1033 case Opt_id:
1034 case Opt_nodir:
1035
1036 break;
1037 case Opt_first:
1038 ret = match_int(&tmp[0], &option);
1039 if (ret || (option != 0 && option != 1))
1040 goto hostdata_error;
1041 ls->ls_first = option;
1042 break;
1043 case Opt_err:
1044 default:
1045hostdata_error:
1046 fs_info(sdp, "unknown hostdata (%s)\n", o);
1047 return -EINVAL;
1048 }
1049 }
1050
1051 if (lm->lm_mount == NULL) {
1052 fs_info(sdp, "Now mounting FS (format %u)...\n", sdp->sd_sb.sb_fs_format);
1053 complete_all(&sdp->sd_locking_init);
1054 return 0;
1055 }
1056 ret = lm->lm_mount(sdp, table);
1057 if (ret == 0)
1058 fs_info(sdp, "Joined cluster. Now mounting FS (format %u)...\n",
1059 sdp->sd_sb.sb_fs_format);
1060 complete_all(&sdp->sd_locking_init);
1061 return ret;
1062}
1063
1064void gfs2_lm_unmount(struct gfs2_sbd *sdp)
1065{
1066 const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
1067 if (likely(!gfs2_withdrawn(sdp)) && lm->lm_unmount)
1068 lm->lm_unmount(sdp);
1069}
1070
1071static int wait_on_journal(struct gfs2_sbd *sdp)
1072{
1073 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
1074 return 0;
1075
1076 return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
1077 ? -EINTR : 0;
1078}
1079
1080void gfs2_online_uevent(struct gfs2_sbd *sdp)
1081{
1082 struct super_block *sb = sdp->sd_vfs;
1083 char ro[20];
1084 char spectator[20];
1085 char *envp[] = { ro, spectator, NULL };
1086 sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
1087 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
1088 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
1099{
1100 struct gfs2_args *args = fc->fs_private;
1101 int silent = fc->sb_flags & SB_SILENT;
1102 struct gfs2_sbd *sdp;
1103 struct gfs2_holder mount_gh;
1104 struct gfs2_holder freeze_gh;
1105 int error;
1106
1107 sdp = init_sbd(sb);
1108 if (!sdp) {
1109 pr_warn("can't alloc struct gfs2_sbd\n");
1110 return -ENOMEM;
1111 }
1112 sdp->sd_args = *args;
1113
1114 if (sdp->sd_args.ar_spectator) {
1115 sb->s_flags |= SB_RDONLY;
1116 set_bit(SDF_RORECOVERY, &sdp->sd_flags);
1117 }
1118 if (sdp->sd_args.ar_posix_acl)
1119 sb->s_flags |= SB_POSIXACL;
1120 if (sdp->sd_args.ar_nobarrier)
1121 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1122
1123 sb->s_flags |= SB_NOSEC;
1124 sb->s_magic = GFS2_MAGIC;
1125 sb->s_op = &gfs2_super_ops;
1126 sb->s_d_op = &gfs2_dops;
1127 sb->s_export_op = &gfs2_export_ops;
1128 sb->s_qcop = &gfs2_quotactl_ops;
1129 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
1130 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1131 sb->s_time_gran = 1;
1132 sb->s_maxbytes = MAX_LFS_FILESIZE;
1133
1134
1135
1136 sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
1137 sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
1138 sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
1139 GFS2_BASIC_BLOCK_SHIFT;
1140 sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
1141
1142 sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
1143 sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1144 if (sdp->sd_args.ar_statfs_quantum) {
1145 sdp->sd_tune.gt_statfs_slow = 0;
1146 sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1147 } else {
1148 sdp->sd_tune.gt_statfs_slow = 1;
1149 sdp->sd_tune.gt_statfs_quantum = 30;
1150 }
1151
1152 error = init_names(sdp, silent);
1153 if (error)
1154 goto fail_free;
1155
1156 snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
1157
1158 error = gfs2_sys_fs_add(sdp);
1159 if (error)
1160 goto fail_free;
1161
1162 gfs2_create_debugfs_file(sdp);
1163
1164 error = gfs2_lm_mount(sdp, silent);
1165 if (error)
1166 goto fail_debug;
1167
1168 error = init_locking(sdp, &mount_gh, DO);
1169 if (error)
1170 goto fail_lm;
1171
1172 error = init_sb(sdp, silent);
1173 if (error)
1174 goto fail_locking;
1175
1176
1177 if (!sdp->sd_args.ar_got_rgrplvb && sdp->sd_sb.sb_fs_format > 1801)
1178 sdp->sd_args.ar_rgrplvb = 1;
1179
1180 error = wait_on_journal(sdp);
1181 if (error)
1182 goto fail_sb;
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 if (sdp->sd_lockstruct.ls_jid < 0) {
1193 error = sdp->sd_lockstruct.ls_jid;
1194 sdp->sd_lockstruct.ls_jid = 0;
1195 goto fail_sb;
1196 }
1197
1198 if (sdp->sd_args.ar_spectator)
1199 snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.s",
1200 sdp->sd_table_name);
1201 else
1202 snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.%u",
1203 sdp->sd_table_name, sdp->sd_lockstruct.ls_jid);
1204
1205 error = init_inodes(sdp, DO);
1206 if (error)
1207 goto fail_sb;
1208
1209 error = init_per_node(sdp, DO);
1210 if (error)
1211 goto fail_inodes;
1212
1213 error = gfs2_statfs_init(sdp);
1214 if (error) {
1215 fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
1216 goto fail_per_node;
1217 }
1218
1219 error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
1220 if (error)
1221 goto fail_per_node;
1222
1223 if (!sb_rdonly(sb))
1224 error = gfs2_make_fs_rw(sdp);
1225
1226 gfs2_freeze_unlock(&freeze_gh);
1227 if (error) {
1228 fs_err(sdp, "can't make FS RW: %d\n", error);
1229 goto fail_per_node;
1230 }
1231 gfs2_glock_dq_uninit(&mount_gh);
1232 gfs2_online_uevent(sdp);
1233 return 0;
1234
1235fail_per_node:
1236 init_per_node(sdp, UNDO);
1237fail_inodes:
1238 init_inodes(sdp, UNDO);
1239fail_sb:
1240 if (sdp->sd_root_dir)
1241 dput(sdp->sd_root_dir);
1242 if (sdp->sd_master_dir)
1243 dput(sdp->sd_master_dir);
1244 if (sb->s_root)
1245 dput(sb->s_root);
1246 sb->s_root = NULL;
1247fail_locking:
1248 init_locking(sdp, &mount_gh, UNDO);
1249fail_lm:
1250 complete_all(&sdp->sd_journal_ready);
1251 gfs2_gl_hash_clear(sdp);
1252 gfs2_lm_unmount(sdp);
1253fail_debug:
1254 gfs2_delete_debugfs_file(sdp);
1255 gfs2_sys_fs_del(sdp);
1256fail_free:
1257 free_sbd(sdp);
1258 sb->s_fs_info = NULL;
1259 return error;
1260}
1261
1262
1263
1264
1265
1266
1267
1268static int gfs2_get_tree(struct fs_context *fc)
1269{
1270 struct gfs2_args *args = fc->fs_private;
1271 struct gfs2_sbd *sdp;
1272 int error;
1273
1274 error = get_tree_bdev(fc, gfs2_fill_super);
1275 if (error)
1276 return error;
1277
1278 sdp = fc->root->d_sb->s_fs_info;
1279 dput(fc->root);
1280 if (args->ar_meta)
1281 fc->root = dget(sdp->sd_master_dir);
1282 else
1283 fc->root = dget(sdp->sd_root_dir);
1284 return 0;
1285}
1286
1287static void gfs2_fc_free(struct fs_context *fc)
1288{
1289 struct gfs2_args *args = fc->fs_private;
1290
1291 kfree(args);
1292}
1293
1294enum gfs2_param {
1295 Opt_lockproto,
1296 Opt_locktable,
1297 Opt_hostdata,
1298 Opt_spectator,
1299 Opt_ignore_local_fs,
1300 Opt_localflocks,
1301 Opt_localcaching,
1302 Opt_debug,
1303 Opt_upgrade,
1304 Opt_acl,
1305 Opt_quota,
1306 Opt_quota_flag,
1307 Opt_suiddir,
1308 Opt_data,
1309 Opt_meta,
1310 Opt_discard,
1311 Opt_commit,
1312 Opt_errors,
1313 Opt_statfs_quantum,
1314 Opt_statfs_percent,
1315 Opt_quota_quantum,
1316 Opt_barrier,
1317 Opt_rgrplvb,
1318 Opt_loccookie,
1319};
1320
1321static const struct constant_table gfs2_param_quota[] = {
1322 {"off", GFS2_QUOTA_OFF},
1323 {"account", GFS2_QUOTA_ACCOUNT},
1324 {"on", GFS2_QUOTA_ON},
1325 {}
1326};
1327
1328enum opt_data {
1329 Opt_data_writeback = GFS2_DATA_WRITEBACK,
1330 Opt_data_ordered = GFS2_DATA_ORDERED,
1331};
1332
1333static const struct constant_table gfs2_param_data[] = {
1334 {"writeback", Opt_data_writeback },
1335 {"ordered", Opt_data_ordered },
1336 {}
1337};
1338
1339enum opt_errors {
1340 Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
1341 Opt_errors_panic = GFS2_ERRORS_PANIC,
1342};
1343
1344static const struct constant_table gfs2_param_errors[] = {
1345 {"withdraw", Opt_errors_withdraw },
1346 {"panic", Opt_errors_panic },
1347 {}
1348};
1349
1350static const struct fs_parameter_spec gfs2_fs_parameters[] = {
1351 fsparam_string ("lockproto", Opt_lockproto),
1352 fsparam_string ("locktable", Opt_locktable),
1353 fsparam_string ("hostdata", Opt_hostdata),
1354 fsparam_flag ("spectator", Opt_spectator),
1355 fsparam_flag ("norecovery", Opt_spectator),
1356 fsparam_flag ("ignore_local_fs", Opt_ignore_local_fs),
1357 fsparam_flag ("localflocks", Opt_localflocks),
1358 fsparam_flag ("localcaching", Opt_localcaching),
1359 fsparam_flag_no("debug", Opt_debug),
1360 fsparam_flag ("upgrade", Opt_upgrade),
1361 fsparam_flag_no("acl", Opt_acl),
1362 fsparam_flag_no("suiddir", Opt_suiddir),
1363 fsparam_enum ("data", Opt_data, gfs2_param_data),
1364 fsparam_flag ("meta", Opt_meta),
1365 fsparam_flag_no("discard", Opt_discard),
1366 fsparam_s32 ("commit", Opt_commit),
1367 fsparam_enum ("errors", Opt_errors, gfs2_param_errors),
1368 fsparam_s32 ("statfs_quantum", Opt_statfs_quantum),
1369 fsparam_s32 ("statfs_percent", Opt_statfs_percent),
1370 fsparam_s32 ("quota_quantum", Opt_quota_quantum),
1371 fsparam_flag_no("barrier", Opt_barrier),
1372 fsparam_flag_no("rgrplvb", Opt_rgrplvb),
1373 fsparam_flag_no("loccookie", Opt_loccookie),
1374
1375 fsparam_flag_no("quota", Opt_quota_flag),
1376 fsparam_enum("quota", Opt_quota, gfs2_param_quota),
1377 {}
1378};
1379
1380
1381static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
1382{
1383 struct gfs2_args *args = fc->fs_private;
1384 struct fs_parse_result result;
1385 int o;
1386
1387 o = fs_parse(fc, gfs2_fs_parameters, param, &result);
1388 if (o < 0)
1389 return o;
1390
1391 switch (o) {
1392 case Opt_lockproto:
1393 strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
1394 break;
1395 case Opt_locktable:
1396 strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
1397 break;
1398 case Opt_hostdata:
1399 strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
1400 break;
1401 case Opt_spectator:
1402 args->ar_spectator = 1;
1403 break;
1404 case Opt_ignore_local_fs:
1405
1406 break;
1407 case Opt_localflocks:
1408 args->ar_localflocks = 1;
1409 break;
1410 case Opt_localcaching:
1411
1412 break;
1413 case Opt_debug:
1414 if (result.boolean && args->ar_errors == GFS2_ERRORS_PANIC)
1415 return invalfc(fc, "-o debug and -o errors=panic are mutually exclusive");
1416 args->ar_debug = result.boolean;
1417 break;
1418 case Opt_upgrade:
1419
1420 break;
1421 case Opt_acl:
1422 args->ar_posix_acl = result.boolean;
1423 break;
1424 case Opt_quota_flag:
1425 args->ar_quota = result.negated ? GFS2_QUOTA_OFF : GFS2_QUOTA_ON;
1426 break;
1427 case Opt_quota:
1428 args->ar_quota = result.int_32;
1429 break;
1430 case Opt_suiddir:
1431 args->ar_suiddir = result.boolean;
1432 break;
1433 case Opt_data:
1434
1435 args->ar_data = result.uint_32;
1436 break;
1437 case Opt_meta:
1438 args->ar_meta = 1;
1439 break;
1440 case Opt_discard:
1441 args->ar_discard = result.boolean;
1442 break;
1443 case Opt_commit:
1444 if (result.int_32 <= 0)
1445 return invalfc(fc, "commit mount option requires a positive numeric argument");
1446 args->ar_commit = result.int_32;
1447 break;
1448 case Opt_statfs_quantum:
1449 if (result.int_32 < 0)
1450 return invalfc(fc, "statfs_quantum mount option requires a non-negative numeric argument");
1451 args->ar_statfs_quantum = result.int_32;
1452 break;
1453 case Opt_quota_quantum:
1454 if (result.int_32 <= 0)
1455 return invalfc(fc, "quota_quantum mount option requires a positive numeric argument");
1456 args->ar_quota_quantum = result.int_32;
1457 break;
1458 case Opt_statfs_percent:
1459 if (result.int_32 < 0 || result.int_32 > 100)
1460 return invalfc(fc, "statfs_percent mount option requires a numeric argument between 0 and 100");
1461 args->ar_statfs_percent = result.int_32;
1462 break;
1463 case Opt_errors:
1464 if (args->ar_debug && result.uint_32 == GFS2_ERRORS_PANIC)
1465 return invalfc(fc, "-o debug and -o errors=panic are mutually exclusive");
1466 args->ar_errors = result.uint_32;
1467 break;
1468 case Opt_barrier:
1469 args->ar_nobarrier = result.boolean;
1470 break;
1471 case Opt_rgrplvb:
1472 args->ar_rgrplvb = result.boolean;
1473 args->ar_got_rgrplvb = 1;
1474 break;
1475 case Opt_loccookie:
1476 args->ar_loccookie = result.boolean;
1477 break;
1478 default:
1479 return invalfc(fc, "invalid mount option: %s", param->key);
1480 }
1481 return 0;
1482}
1483
1484static int gfs2_reconfigure(struct fs_context *fc)
1485{
1486 struct super_block *sb = fc->root->d_sb;
1487 struct gfs2_sbd *sdp = sb->s_fs_info;
1488 struct gfs2_args *oldargs = &sdp->sd_args;
1489 struct gfs2_args *newargs = fc->fs_private;
1490 struct gfs2_tune *gt = &sdp->sd_tune;
1491 int error = 0;
1492
1493 sync_filesystem(sb);
1494
1495 spin_lock(>->gt_spin);
1496 oldargs->ar_commit = gt->gt_logd_secs;
1497 oldargs->ar_quota_quantum = gt->gt_quota_quantum;
1498 if (gt->gt_statfs_slow)
1499 oldargs->ar_statfs_quantum = 0;
1500 else
1501 oldargs->ar_statfs_quantum = gt->gt_statfs_quantum;
1502 spin_unlock(>->gt_spin);
1503
1504 if (strcmp(newargs->ar_lockproto, oldargs->ar_lockproto)) {
1505 errorfc(fc, "reconfiguration of locking protocol not allowed");
1506 return -EINVAL;
1507 }
1508 if (strcmp(newargs->ar_locktable, oldargs->ar_locktable)) {
1509 errorfc(fc, "reconfiguration of lock table not allowed");
1510 return -EINVAL;
1511 }
1512 if (strcmp(newargs->ar_hostdata, oldargs->ar_hostdata)) {
1513 errorfc(fc, "reconfiguration of host data not allowed");
1514 return -EINVAL;
1515 }
1516 if (newargs->ar_spectator != oldargs->ar_spectator) {
1517 errorfc(fc, "reconfiguration of spectator mode not allowed");
1518 return -EINVAL;
1519 }
1520 if (newargs->ar_localflocks != oldargs->ar_localflocks) {
1521 errorfc(fc, "reconfiguration of localflocks not allowed");
1522 return -EINVAL;
1523 }
1524 if (newargs->ar_meta != oldargs->ar_meta) {
1525 errorfc(fc, "switching between gfs2 and gfs2meta not allowed");
1526 return -EINVAL;
1527 }
1528 if (oldargs->ar_spectator)
1529 fc->sb_flags |= SB_RDONLY;
1530
1531 if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
1532 struct gfs2_holder freeze_gh;
1533
1534 error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
1535 if (error)
1536 return -EINVAL;
1537
1538 if (fc->sb_flags & SB_RDONLY) {
1539 gfs2_make_fs_ro(sdp);
1540 } else {
1541 error = gfs2_make_fs_rw(sdp);
1542 if (error)
1543 errorfc(fc, "unable to remount read-write");
1544 }
1545 gfs2_freeze_unlock(&freeze_gh);
1546 }
1547 sdp->sd_args = *newargs;
1548
1549 if (sdp->sd_args.ar_posix_acl)
1550 sb->s_flags |= SB_POSIXACL;
1551 else
1552 sb->s_flags &= ~SB_POSIXACL;
1553 if (sdp->sd_args.ar_nobarrier)
1554 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1555 else
1556 clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1557 spin_lock(>->gt_spin);
1558 gt->gt_logd_secs = newargs->ar_commit;
1559 gt->gt_quota_quantum = newargs->ar_quota_quantum;
1560 if (newargs->ar_statfs_quantum) {
1561 gt->gt_statfs_slow = 0;
1562 gt->gt_statfs_quantum = newargs->ar_statfs_quantum;
1563 }
1564 else {
1565 gt->gt_statfs_slow = 1;
1566 gt->gt_statfs_quantum = 30;
1567 }
1568 spin_unlock(>->gt_spin);
1569
1570 gfs2_online_uevent(sdp);
1571 return error;
1572}
1573
1574static const struct fs_context_operations gfs2_context_ops = {
1575 .free = gfs2_fc_free,
1576 .parse_param = gfs2_parse_param,
1577 .get_tree = gfs2_get_tree,
1578 .reconfigure = gfs2_reconfigure,
1579};
1580
1581
1582static int gfs2_init_fs_context(struct fs_context *fc)
1583{
1584 struct gfs2_args *args;
1585
1586 args = kmalloc(sizeof(*args), GFP_KERNEL);
1587 if (args == NULL)
1588 return -ENOMEM;
1589
1590 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
1591 struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info;
1592
1593 *args = sdp->sd_args;
1594 } else {
1595 memset(args, 0, sizeof(*args));
1596 args->ar_quota = GFS2_QUOTA_DEFAULT;
1597 args->ar_data = GFS2_DATA_DEFAULT;
1598 args->ar_commit = 30;
1599 args->ar_statfs_quantum = 30;
1600 args->ar_quota_quantum = 60;
1601 args->ar_errors = GFS2_ERRORS_DEFAULT;
1602 }
1603 fc->fs_private = args;
1604 fc->ops = &gfs2_context_ops;
1605 return 0;
1606}
1607
1608static int set_meta_super(struct super_block *s, struct fs_context *fc)
1609{
1610 return -EINVAL;
1611}
1612
1613static int test_meta_super(struct super_block *s, struct fs_context *fc)
1614{
1615 return (fc->sget_key == s->s_bdev);
1616}
1617
1618static int gfs2_meta_get_tree(struct fs_context *fc)
1619{
1620 struct super_block *s;
1621 struct gfs2_sbd *sdp;
1622 struct path path;
1623 int error;
1624
1625 if (!fc->source || !*fc->source)
1626 return -EINVAL;
1627
1628 error = kern_path(fc->source, LOOKUP_FOLLOW, &path);
1629 if (error) {
1630 pr_warn("path_lookup on %s returned error %d\n",
1631 fc->source, error);
1632 return error;
1633 }
1634 fc->fs_type = &gfs2_fs_type;
1635 fc->sget_key = path.dentry->d_sb->s_bdev;
1636 s = sget_fc(fc, test_meta_super, set_meta_super);
1637 path_put(&path);
1638 if (IS_ERR(s)) {
1639 pr_warn("gfs2 mount does not exist\n");
1640 return PTR_ERR(s);
1641 }
1642 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1643 deactivate_locked_super(s);
1644 return -EBUSY;
1645 }
1646 sdp = s->s_fs_info;
1647 fc->root = dget(sdp->sd_master_dir);
1648 return 0;
1649}
1650
1651static const struct fs_context_operations gfs2_meta_context_ops = {
1652 .free = gfs2_fc_free,
1653 .get_tree = gfs2_meta_get_tree,
1654};
1655
1656static int gfs2_meta_init_fs_context(struct fs_context *fc)
1657{
1658 int ret = gfs2_init_fs_context(fc);
1659
1660 if (ret)
1661 return ret;
1662
1663 fc->ops = &gfs2_meta_context_ops;
1664 return 0;
1665}
1666
1667static void gfs2_kill_sb(struct super_block *sb)
1668{
1669 struct gfs2_sbd *sdp = sb->s_fs_info;
1670
1671 if (sdp == NULL) {
1672 kill_block_super(sb);
1673 return;
1674 }
1675
1676 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB);
1677 dput(sdp->sd_root_dir);
1678 dput(sdp->sd_master_dir);
1679 sdp->sd_root_dir = NULL;
1680 sdp->sd_master_dir = NULL;
1681 shrink_dcache_sb(sb);
1682 kill_block_super(sb);
1683}
1684
1685struct file_system_type gfs2_fs_type = {
1686 .name = "gfs2",
1687 .fs_flags = FS_REQUIRES_DEV,
1688 .init_fs_context = gfs2_init_fs_context,
1689 .parameters = gfs2_fs_parameters,
1690 .kill_sb = gfs2_kill_sb,
1691 .owner = THIS_MODULE,
1692};
1693MODULE_ALIAS_FS("gfs2");
1694
1695struct file_system_type gfs2meta_fs_type = {
1696 .name = "gfs2meta",
1697 .fs_flags = FS_REQUIRES_DEV,
1698 .init_fs_context = gfs2_meta_init_fs_context,
1699 .owner = THIS_MODULE,
1700};
1701MODULE_ALIAS_FS("gfs2meta");
1702