1
2
3
4
5
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/bio.h>
10#include <linux/sched/signal.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/statfs.h>
16#include <linux/seq_file.h>
17#include <linux/mount.h>
18#include <linux/kthread.h>
19#include <linux/delay.h>
20#include <linux/gfs2_ondisk.h>
21#include <linux/crc32.h>
22#include <linux/time.h>
23#include <linux/wait.h>
24#include <linux/writeback.h>
25#include <linux/backing-dev.h>
26#include <linux/kernel.h>
27
28#include "gfs2.h"
29#include "incore.h"
30#include "bmap.h"
31#include "dir.h"
32#include "glock.h"
33#include "glops.h"
34#include "inode.h"
35#include "log.h"
36#include "meta_io.h"
37#include "quota.h"
38#include "recovery.h"
39#include "rgrp.h"
40#include "super.h"
41#include "trans.h"
42#include "util.h"
43#include "sys.h"
44#include "xattr.h"
45#include "lops.h"
46
47
48
49
50
51
52
53void gfs2_jindex_free(struct gfs2_sbd *sdp)
54{
55 struct list_head list;
56 struct gfs2_jdesc *jd;
57
58 spin_lock(&sdp->sd_jindex_spin);
59 list_add(&list, &sdp->sd_jindex_list);
60 list_del_init(&sdp->sd_jindex_list);
61 sdp->sd_journals = 0;
62 spin_unlock(&sdp->sd_jindex_spin);
63
64 while (!list_empty(&list)) {
65 jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
66 gfs2_free_journal_extents(jd);
67 list_del(&jd->jd_list);
68 iput(jd->jd_inode);
69 kfree(jd);
70 }
71}
72
73static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
74{
75 struct gfs2_jdesc *jd;
76 int found = 0;
77
78 list_for_each_entry(jd, head, jd_list) {
79 if (jd->jd_jid == jid) {
80 found = 1;
81 break;
82 }
83 }
84
85 if (!found)
86 jd = NULL;
87
88 return jd;
89}
90
91struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
92{
93 struct gfs2_jdesc *jd;
94
95 spin_lock(&sdp->sd_jindex_spin);
96 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
97 spin_unlock(&sdp->sd_jindex_spin);
98
99 return jd;
100}
101
102int gfs2_jdesc_check(struct gfs2_jdesc *jd)
103{
104 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
105 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
106 u64 size = i_size_read(jd->jd_inode);
107
108 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
109 return -EIO;
110
111 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
112
113 if (gfs2_write_alloc_required(ip, 0, size)) {
114 gfs2_consist_inode(ip);
115 return -EIO;
116 }
117
118 return 0;
119}
120
121static int init_threads(struct gfs2_sbd *sdp)
122{
123 struct task_struct *p;
124 int error = 0;
125
126 p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
127 if (IS_ERR(p)) {
128 error = PTR_ERR(p);
129 fs_err(sdp, "can't start logd thread: %d\n", error);
130 return error;
131 }
132 sdp->sd_logd_process = p;
133
134 p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
135 if (IS_ERR(p)) {
136 error = PTR_ERR(p);
137 fs_err(sdp, "can't start quotad thread: %d\n", error);
138 goto fail;
139 }
140 sdp->sd_quotad_process = p;
141 return 0;
142
143fail:
144 kthread_stop(sdp->sd_logd_process);
145 sdp->sd_logd_process = NULL;
146 return error;
147}
148
149
150
151
152
153
154
155
156int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
157{
158 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
159 struct gfs2_glock *j_gl = ip->i_gl;
160 struct gfs2_holder freeze_gh;
161 struct gfs2_log_header_host head;
162 int error;
163
164 error = init_threads(sdp);
165 if (error)
166 return error;
167
168 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
169 &freeze_gh);
170 if (error)
171 goto fail_threads;
172
173 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
174
175 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
176 if (error)
177 goto fail;
178
179 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
180 gfs2_consist(sdp);
181 error = -EIO;
182 goto fail;
183 }
184
185
186 sdp->sd_log_sequence = head.lh_sequence + 1;
187 gfs2_log_pointers_init(sdp, head.lh_blkno);
188
189 error = gfs2_quota_init(sdp);
190 if (error)
191 goto fail;
192
193 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
194
195 gfs2_glock_dq_uninit(&freeze_gh);
196
197 return 0;
198
199fail:
200 freeze_gh.gh_flags |= GL_NOCACHE;
201 gfs2_glock_dq_uninit(&freeze_gh);
202fail_threads:
203 if (sdp->sd_quotad_process)
204 kthread_stop(sdp->sd_quotad_process);
205 sdp->sd_quotad_process = NULL;
206 if (sdp->sd_logd_process)
207 kthread_stop(sdp->sd_logd_process);
208 sdp->sd_logd_process = NULL;
209 return error;
210}
211
212void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
213{
214 const struct gfs2_statfs_change *str = buf;
215
216 sc->sc_total = be64_to_cpu(str->sc_total);
217 sc->sc_free = be64_to_cpu(str->sc_free);
218 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
219}
220
221static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
222{
223 struct gfs2_statfs_change *str = buf;
224
225 str->sc_total = cpu_to_be64(sc->sc_total);
226 str->sc_free = cpu_to_be64(sc->sc_free);
227 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
228}
229
230int gfs2_statfs_init(struct gfs2_sbd *sdp)
231{
232 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
233 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
234 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
235 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
236 struct buffer_head *m_bh, *l_bh;
237 struct gfs2_holder gh;
238 int error;
239
240 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
241 &gh);
242 if (error)
243 return error;
244
245 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
246 if (error)
247 goto out;
248
249 if (sdp->sd_args.ar_spectator) {
250 spin_lock(&sdp->sd_statfs_spin);
251 gfs2_statfs_change_in(m_sc, m_bh->b_data +
252 sizeof(struct gfs2_dinode));
253 spin_unlock(&sdp->sd_statfs_spin);
254 } else {
255 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
256 if (error)
257 goto out_m_bh;
258
259 spin_lock(&sdp->sd_statfs_spin);
260 gfs2_statfs_change_in(m_sc, m_bh->b_data +
261 sizeof(struct gfs2_dinode));
262 gfs2_statfs_change_in(l_sc, l_bh->b_data +
263 sizeof(struct gfs2_dinode));
264 spin_unlock(&sdp->sd_statfs_spin);
265
266 brelse(l_bh);
267 }
268
269out_m_bh:
270 brelse(m_bh);
271out:
272 gfs2_glock_dq_uninit(&gh);
273 return 0;
274}
275
276void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
277 s64 dinodes)
278{
279 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
280 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
281 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
282 struct buffer_head *l_bh;
283 s64 x, y;
284 int need_sync = 0;
285 int error;
286
287 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
288 if (error)
289 return;
290
291 gfs2_trans_add_meta(l_ip->i_gl, l_bh);
292
293 spin_lock(&sdp->sd_statfs_spin);
294 l_sc->sc_total += total;
295 l_sc->sc_free += free;
296 l_sc->sc_dinodes += dinodes;
297 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
298 if (sdp->sd_args.ar_statfs_percent) {
299 x = 100 * l_sc->sc_free;
300 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
301 if (x >= y || x <= -y)
302 need_sync = 1;
303 }
304 spin_unlock(&sdp->sd_statfs_spin);
305
306 brelse(l_bh);
307 if (need_sync)
308 gfs2_wake_up_statfs(sdp);
309}
310
311void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
312 struct buffer_head *l_bh)
313{
314 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
315 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
316 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
317 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
318
319 gfs2_trans_add_meta(l_ip->i_gl, l_bh);
320 gfs2_trans_add_meta(m_ip->i_gl, m_bh);
321
322 spin_lock(&sdp->sd_statfs_spin);
323 m_sc->sc_total += l_sc->sc_total;
324 m_sc->sc_free += l_sc->sc_free;
325 m_sc->sc_dinodes += l_sc->sc_dinodes;
326 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
327 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
328 0, sizeof(struct gfs2_statfs_change));
329 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
330 spin_unlock(&sdp->sd_statfs_spin);
331}
332
333int gfs2_statfs_sync(struct super_block *sb, int type)
334{
335 struct gfs2_sbd *sdp = sb->s_fs_info;
336 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
337 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
338 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
339 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
340 struct gfs2_holder gh;
341 struct buffer_head *m_bh, *l_bh;
342 int error;
343
344 sb_start_write(sb);
345 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
346 &gh);
347 if (error)
348 goto out;
349
350 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
351 if (error)
352 goto out_unlock;
353
354 spin_lock(&sdp->sd_statfs_spin);
355 gfs2_statfs_change_in(m_sc, m_bh->b_data +
356 sizeof(struct gfs2_dinode));
357 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
358 spin_unlock(&sdp->sd_statfs_spin);
359 goto out_bh;
360 }
361 spin_unlock(&sdp->sd_statfs_spin);
362
363 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
364 if (error)
365 goto out_bh;
366
367 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
368 if (error)
369 goto out_bh2;
370
371 update_statfs(sdp, m_bh, l_bh);
372 sdp->sd_statfs_force_sync = 0;
373
374 gfs2_trans_end(sdp);
375
376out_bh2:
377 brelse(l_bh);
378out_bh:
379 brelse(m_bh);
380out_unlock:
381 gfs2_glock_dq_uninit(&gh);
382out:
383 sb_end_write(sb);
384 return error;
385}
386
387struct lfcc {
388 struct list_head list;
389 struct gfs2_holder gh;
390};
391
392
393
394
395
396
397
398
399
400
401
402static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
403{
404 struct gfs2_inode *ip;
405 struct gfs2_jdesc *jd;
406 struct lfcc *lfcc;
407 LIST_HEAD(list);
408 struct gfs2_log_header_host lh;
409 int error;
410
411 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
412 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
413 if (!lfcc) {
414 error = -ENOMEM;
415 goto out;
416 }
417 ip = GFS2_I(jd->jd_inode);
418 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
419 if (error) {
420 kfree(lfcc);
421 goto out;
422 }
423 list_add(&lfcc->list, &list);
424 }
425
426 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
427 GL_NOCACHE, &sdp->sd_freeze_gh);
428 if (error)
429 goto out;
430
431 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
432 error = gfs2_jdesc_check(jd);
433 if (error)
434 break;
435 error = gfs2_find_jhead(jd, &lh, false);
436 if (error)
437 break;
438 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
439 error = -EBUSY;
440 break;
441 }
442 }
443
444 if (error)
445 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
446
447out:
448 while (!list_empty(&list)) {
449 lfcc = list_entry(list.next, struct lfcc, list);
450 list_del(&lfcc->list);
451 gfs2_glock_dq_uninit(&lfcc->gh);
452 kfree(lfcc);
453 }
454 return error;
455}
456
457void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
458{
459 struct gfs2_dinode *str = buf;
460
461 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
462 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
463 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
464 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
465 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
466 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
467 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
468 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
469 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
470 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
471 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
472 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
473 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
474 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
475
476 str->di_goal_meta = cpu_to_be64(ip->i_goal);
477 str->di_goal_data = cpu_to_be64(ip->i_goal);
478 str->di_generation = cpu_to_be64(ip->i_generation);
479
480 str->di_flags = cpu_to_be32(ip->i_diskflags);
481 str->di_height = cpu_to_be16(ip->i_height);
482 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
483 !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
484 GFS2_FORMAT_DE : 0);
485 str->di_depth = cpu_to_be16(ip->i_depth);
486 str->di_entries = cpu_to_be32(ip->i_entries);
487
488 str->di_eattr = cpu_to_be64(ip->i_eattr);
489 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
490 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
491 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
492}
493
494
495
496
497
498
499
500
501
502static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
503{
504 struct gfs2_inode *ip = GFS2_I(inode);
505 struct gfs2_sbd *sdp = GFS2_SB(inode);
506 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
507 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
508 int ret = 0;
509 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
510
511 if (flush_all)
512 gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
513 GFS2_LOG_HEAD_FLUSH_NORMAL |
514 GFS2_LFC_WRITE_INODE);
515 if (bdi->wb.dirty_exceeded)
516 gfs2_ail1_flush(sdp, wbc);
517 else
518 filemap_fdatawrite(metamapping);
519 if (flush_all)
520 ret = filemap_fdatawait(metamapping);
521 if (ret)
522 mark_inode_dirty_sync(inode);
523 else {
524 spin_lock(&inode->i_lock);
525 if (!(inode->i_flags & I_DIRTY))
526 gfs2_ordered_del_inode(ip);
527 spin_unlock(&inode->i_lock);
528 }
529 return ret;
530}
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545static void gfs2_dirty_inode(struct inode *inode, int flags)
546{
547 struct gfs2_inode *ip = GFS2_I(inode);
548 struct gfs2_sbd *sdp = GFS2_SB(inode);
549 struct buffer_head *bh;
550 struct gfs2_holder gh;
551 int need_unlock = 0;
552 int need_endtrans = 0;
553 int ret;
554
555 if (!(flags & I_DIRTY_INODE))
556 return;
557 if (unlikely(gfs2_withdrawn(sdp)))
558 return;
559 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
560 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
561 if (ret) {
562 fs_err(sdp, "dirty_inode: glock %d\n", ret);
563 return;
564 }
565 need_unlock = 1;
566 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
567 return;
568
569 if (current->journal_info == NULL) {
570 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
571 if (ret) {
572 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
573 goto out;
574 }
575 need_endtrans = 1;
576 }
577
578 ret = gfs2_meta_inode_buffer(ip, &bh);
579 if (ret == 0) {
580 gfs2_trans_add_meta(ip->i_gl, bh);
581 gfs2_dinode_out(ip, bh->b_data);
582 brelse(bh);
583 }
584
585 if (need_endtrans)
586 gfs2_trans_end(sdp);
587out:
588 if (need_unlock)
589 gfs2_glock_dq_uninit(&gh);
590}
591
592
593
594
595
596
597
598
599int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
600{
601 struct gfs2_holder freeze_gh;
602 int error;
603
604 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
605 &freeze_gh);
606 if (error && !gfs2_withdrawn(sdp))
607 return error;
608
609 flush_workqueue(gfs2_delete_workqueue);
610 if (sdp->sd_quotad_process)
611 kthread_stop(sdp->sd_quotad_process);
612 sdp->sd_quotad_process = NULL;
613 if (sdp->sd_logd_process)
614 kthread_stop(sdp->sd_logd_process);
615 sdp->sd_logd_process = NULL;
616
617 gfs2_quota_sync(sdp->sd_vfs, 0);
618 gfs2_statfs_sync(sdp->sd_vfs, 0);
619
620 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
621 GFS2_LFC_MAKE_FS_RO);
622 wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
623 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
624
625 if (gfs2_holder_initialized(&freeze_gh))
626 gfs2_glock_dq_uninit(&freeze_gh);
627
628 gfs2_quota_cleanup(sdp);
629
630 return error;
631}
632
633
634
635
636
637
638
639static void gfs2_put_super(struct super_block *sb)
640{
641 struct gfs2_sbd *sdp = sb->s_fs_info;
642 int error;
643 struct gfs2_jdesc *jd;
644
645
646 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
647 smp_mb();
648
649
650restart:
651 spin_lock(&sdp->sd_jindex_spin);
652 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
653 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
654 continue;
655 spin_unlock(&sdp->sd_jindex_spin);
656 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
657 TASK_UNINTERRUPTIBLE);
658 goto restart;
659 }
660 spin_unlock(&sdp->sd_jindex_spin);
661
662 if (!sb_rdonly(sb)) {
663 error = gfs2_make_fs_ro(sdp);
664 if (error)
665 gfs2_io_error(sdp);
666 }
667
668
669
670
671 iput(sdp->sd_jindex);
672 iput(sdp->sd_statfs_inode);
673 iput(sdp->sd_rindex);
674 iput(sdp->sd_quota_inode);
675
676 gfs2_glock_put(sdp->sd_rename_gl);
677 gfs2_glock_put(sdp->sd_freeze_gl);
678
679 if (!sdp->sd_args.ar_spectator) {
680 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
681 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
682 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
683 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
684 iput(sdp->sd_sc_inode);
685 iput(sdp->sd_qc_inode);
686 }
687
688 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
689 gfs2_clear_rgrpd(sdp);
690 gfs2_jindex_free(sdp);
691
692 gfs2_gl_hash_clear(sdp);
693 gfs2_delete_debugfs_file(sdp);
694
695 gfs2_lm_unmount(sdp);
696
697
698 gfs2_sys_fs_del(sdp);
699}
700
701
702
703
704
705
706
707
708static int gfs2_sync_fs(struct super_block *sb, int wait)
709{
710 struct gfs2_sbd *sdp = sb->s_fs_info;
711
712 gfs2_quota_sync(sb, -1);
713 if (wait)
714 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
715 GFS2_LFC_SYNC_FS);
716 return sdp->sd_log_error;
717}
718
719void gfs2_freeze_func(struct work_struct *work)
720{
721 int error;
722 struct gfs2_holder freeze_gh;
723 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
724 struct super_block *sb = sdp->sd_vfs;
725
726 atomic_inc(&sb->s_active);
727 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
728 &freeze_gh);
729 if (error) {
730 fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
731 gfs2_assert_withdraw(sdp, 0);
732 } else {
733 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
734 error = thaw_super(sb);
735 if (error) {
736 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
737 error);
738 gfs2_assert_withdraw(sdp, 0);
739 }
740 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
741 freeze_gh.gh_flags |= GL_NOCACHE;
742 gfs2_glock_dq_uninit(&freeze_gh);
743 }
744 deactivate_super(sb);
745 clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
746 wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
747 return;
748}
749
750
751
752
753
754
755
756static int gfs2_freeze(struct super_block *sb)
757{
758 struct gfs2_sbd *sdp = sb->s_fs_info;
759 int error = 0;
760
761 mutex_lock(&sdp->sd_freeze_mutex);
762 if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
763 goto out;
764
765 for (;;) {
766 if (gfs2_withdrawn(sdp)) {
767 error = -EINVAL;
768 goto out;
769 }
770
771 error = gfs2_lock_fs_check_clean(sdp);
772 if (!error)
773 break;
774
775 if (error == -EBUSY)
776 fs_err(sdp, "waiting for recovery before freeze\n");
777 else if (error == -EIO) {
778 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
779 "to recovery error.\n");
780 goto out;
781 } else {
782 fs_err(sdp, "error freezing FS: %d\n", error);
783 }
784 fs_err(sdp, "retrying...\n");
785 msleep(1000);
786 }
787 set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
788out:
789 mutex_unlock(&sdp->sd_freeze_mutex);
790 return error;
791}
792
793
794
795
796
797
798
799static int gfs2_unfreeze(struct super_block *sb)
800{
801 struct gfs2_sbd *sdp = sb->s_fs_info;
802
803 mutex_lock(&sdp->sd_freeze_mutex);
804 if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
805 !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
806 mutex_unlock(&sdp->sd_freeze_mutex);
807 return 0;
808 }
809
810 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
811 mutex_unlock(&sdp->sd_freeze_mutex);
812 return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
813}
814
815
816
817
818
819
820
821
822
823static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
824 struct gfs2_statfs_change_host *sc)
825{
826 gfs2_rgrp_verify(rgd);
827 sc->sc_total += rgd->rd_data;
828 sc->sc_free += rgd->rd_free;
829 sc->sc_dinodes += rgd->rd_dinodes;
830 return 0;
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
847{
848 struct gfs2_rgrpd *rgd_next;
849 struct gfs2_holder *gha, *gh;
850 unsigned int slots = 64;
851 unsigned int x;
852 int done;
853 int error = 0, err;
854
855 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
856 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
857 if (!gha)
858 return -ENOMEM;
859 for (x = 0; x < slots; x++)
860 gfs2_holder_mark_uninitialized(gha + x);
861
862 rgd_next = gfs2_rgrpd_get_first(sdp);
863
864 for (;;) {
865 done = 1;
866
867 for (x = 0; x < slots; x++) {
868 gh = gha + x;
869
870 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
871 err = gfs2_glock_wait(gh);
872 if (err) {
873 gfs2_holder_uninit(gh);
874 error = err;
875 } else {
876 if (!error) {
877 struct gfs2_rgrpd *rgd =
878 gfs2_glock2rgrp(gh->gh_gl);
879
880 error = statfs_slow_fill(rgd, sc);
881 }
882 gfs2_glock_dq_uninit(gh);
883 }
884 }
885
886 if (gfs2_holder_initialized(gh))
887 done = 0;
888 else if (rgd_next && !error) {
889 error = gfs2_glock_nq_init(rgd_next->rd_gl,
890 LM_ST_SHARED,
891 GL_ASYNC,
892 gh);
893 rgd_next = gfs2_rgrpd_get_next(rgd_next);
894 done = 0;
895 }
896
897 if (signal_pending(current))
898 error = -ERESTARTSYS;
899 }
900
901 if (done)
902 break;
903
904 yield();
905 }
906
907 kfree(gha);
908 return error;
909}
910
911
912
913
914
915
916
917
918
919static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
920{
921 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
922 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
923
924 spin_lock(&sdp->sd_statfs_spin);
925
926 *sc = *m_sc;
927 sc->sc_total += l_sc->sc_total;
928 sc->sc_free += l_sc->sc_free;
929 sc->sc_dinodes += l_sc->sc_dinodes;
930
931 spin_unlock(&sdp->sd_statfs_spin);
932
933 if (sc->sc_free < 0)
934 sc->sc_free = 0;
935 if (sc->sc_free > sc->sc_total)
936 sc->sc_free = sc->sc_total;
937 if (sc->sc_dinodes < 0)
938 sc->sc_dinodes = 0;
939
940 return 0;
941}
942
943
944
945
946
947
948
949
950
951static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
952{
953 struct super_block *sb = dentry->d_sb;
954 struct gfs2_sbd *sdp = sb->s_fs_info;
955 struct gfs2_statfs_change_host sc;
956 int error;
957
958 error = gfs2_rindex_update(sdp);
959 if (error)
960 return error;
961
962 if (gfs2_tune_get(sdp, gt_statfs_slow))
963 error = gfs2_statfs_slow(sdp, &sc);
964 else
965 error = gfs2_statfs_i(sdp, &sc);
966
967 if (error)
968 return error;
969
970 buf->f_type = GFS2_MAGIC;
971 buf->f_bsize = sdp->sd_sb.sb_bsize;
972 buf->f_blocks = sc.sc_total;
973 buf->f_bfree = sc.sc_free;
974 buf->f_bavail = sc.sc_free;
975 buf->f_files = sc.sc_dinodes + sc.sc_free;
976 buf->f_ffree = sc.sc_free;
977 buf->f_namelen = GFS2_FNAMESIZE;
978
979 return 0;
980}
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997static int gfs2_drop_inode(struct inode *inode)
998{
999 struct gfs2_inode *ip = GFS2_I(inode);
1000
1001 if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
1002 inode->i_nlink &&
1003 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1004 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1005 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1006 clear_nlink(inode);
1007 }
1008
1009
1010
1011
1012
1013
1014 if (!inode->i_nlink &&
1015 unlikely(current->flags & PF_MEMALLOC) &&
1016 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1017 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1018
1019 gfs2_glock_hold(gl);
1020 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1021 gfs2_glock_queue_put(gl);
1022 return false;
1023 }
1024
1025 return generic_drop_inode(inode);
1026}
1027
1028static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1029{
1030 do {
1031 if (d1 == d2)
1032 return 1;
1033 d1 = d1->d_parent;
1034 } while (!IS_ROOT(d1));
1035 return 0;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1047{
1048 struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1049 struct gfs2_args *args = &sdp->sd_args;
1050 int val;
1051
1052 if (is_ancestor(root, sdp->sd_master_dir))
1053 seq_puts(s, ",meta");
1054 if (args->ar_lockproto[0])
1055 seq_show_option(s, "lockproto", args->ar_lockproto);
1056 if (args->ar_locktable[0])
1057 seq_show_option(s, "locktable", args->ar_locktable);
1058 if (args->ar_hostdata[0])
1059 seq_show_option(s, "hostdata", args->ar_hostdata);
1060 if (args->ar_spectator)
1061 seq_puts(s, ",spectator");
1062 if (args->ar_localflocks)
1063 seq_puts(s, ",localflocks");
1064 if (args->ar_debug)
1065 seq_puts(s, ",debug");
1066 if (args->ar_posix_acl)
1067 seq_puts(s, ",acl");
1068 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1069 char *state;
1070 switch (args->ar_quota) {
1071 case GFS2_QUOTA_OFF:
1072 state = "off";
1073 break;
1074 case GFS2_QUOTA_ACCOUNT:
1075 state = "account";
1076 break;
1077 case GFS2_QUOTA_ON:
1078 state = "on";
1079 break;
1080 default:
1081 state = "unknown";
1082 break;
1083 }
1084 seq_printf(s, ",quota=%s", state);
1085 }
1086 if (args->ar_suiddir)
1087 seq_puts(s, ",suiddir");
1088 if (args->ar_data != GFS2_DATA_DEFAULT) {
1089 char *state;
1090 switch (args->ar_data) {
1091 case GFS2_DATA_WRITEBACK:
1092 state = "writeback";
1093 break;
1094 case GFS2_DATA_ORDERED:
1095 state = "ordered";
1096 break;
1097 default:
1098 state = "unknown";
1099 break;
1100 }
1101 seq_printf(s, ",data=%s", state);
1102 }
1103 if (args->ar_discard)
1104 seq_puts(s, ",discard");
1105 val = sdp->sd_tune.gt_logd_secs;
1106 if (val != 30)
1107 seq_printf(s, ",commit=%d", val);
1108 val = sdp->sd_tune.gt_statfs_quantum;
1109 if (val != 30)
1110 seq_printf(s, ",statfs_quantum=%d", val);
1111 else if (sdp->sd_tune.gt_statfs_slow)
1112 seq_puts(s, ",statfs_quantum=0");
1113 val = sdp->sd_tune.gt_quota_quantum;
1114 if (val != 60)
1115 seq_printf(s, ",quota_quantum=%d", val);
1116 if (args->ar_statfs_percent)
1117 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1118 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1119 const char *state;
1120
1121 switch (args->ar_errors) {
1122 case GFS2_ERRORS_WITHDRAW:
1123 state = "withdraw";
1124 break;
1125 case GFS2_ERRORS_PANIC:
1126 state = "panic";
1127 break;
1128 default:
1129 state = "unknown";
1130 break;
1131 }
1132 seq_printf(s, ",errors=%s", state);
1133 }
1134 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1135 seq_puts(s, ",nobarrier");
1136 if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1137 seq_puts(s, ",demote_interface_used");
1138 if (args->ar_rgrplvb)
1139 seq_puts(s, ",rgrplvb");
1140 if (args->ar_loccookie)
1141 seq_puts(s, ",loccookie");
1142 return 0;
1143}
1144
1145static void gfs2_final_release_pages(struct gfs2_inode *ip)
1146{
1147 struct inode *inode = &ip->i_inode;
1148 struct gfs2_glock *gl = ip->i_gl;
1149
1150 truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1151 truncate_inode_pages(&inode->i_data, 0);
1152
1153 if (atomic_read(&gl->gl_revokes) == 0) {
1154 clear_bit(GLF_LFLUSH, &gl->gl_flags);
1155 clear_bit(GLF_DIRTY, &gl->gl_flags);
1156 }
1157}
1158
1159static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1160{
1161 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1162 struct gfs2_rgrpd *rgd;
1163 struct gfs2_holder gh;
1164 int error;
1165
1166 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1167 gfs2_consist_inode(ip);
1168 return -EIO;
1169 }
1170
1171 error = gfs2_rindex_update(sdp);
1172 if (error)
1173 return error;
1174
1175 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1176 if (error)
1177 return error;
1178
1179 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1180 if (!rgd) {
1181 gfs2_consist_inode(ip);
1182 error = -EIO;
1183 goto out_qs;
1184 }
1185
1186 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1187 if (error)
1188 goto out_qs;
1189
1190 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1191 sdp->sd_jdesc->jd_blocks);
1192 if (error)
1193 goto out_rg_gunlock;
1194
1195 gfs2_free_di(rgd, ip);
1196
1197 gfs2_final_release_pages(ip);
1198
1199 gfs2_trans_end(sdp);
1200
1201out_rg_gunlock:
1202 gfs2_glock_dq_uninit(&gh);
1203out_qs:
1204 gfs2_quota_unhold(ip);
1205 return error;
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1217{
1218 if (current->flags & PF_MEMALLOC)
1219 gfs2_glock_queue_put(gl);
1220 else
1221 gfs2_glock_put(gl);
1222}
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245static void gfs2_evict_inode(struct inode *inode)
1246{
1247 struct super_block *sb = inode->i_sb;
1248 struct gfs2_sbd *sdp = sb->s_fs_info;
1249 struct gfs2_inode *ip = GFS2_I(inode);
1250 struct gfs2_holder gh;
1251 struct address_space *metamapping;
1252 int error;
1253
1254 if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
1255 clear_inode(inode);
1256 return;
1257 }
1258
1259 if (inode->i_nlink || sb_rdonly(sb))
1260 goto out;
1261
1262 if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1263 BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1264 gfs2_holder_mark_uninitialized(&gh);
1265 goto alloc_failed;
1266 }
1267
1268
1269 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1270 goto out;
1271
1272
1273 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1274 if (unlikely(error)) {
1275 glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1276 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1277 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1278 goto out;
1279 }
1280
1281 error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1282 if (error)
1283 goto out_truncate;
1284
1285 if (test_bit(GIF_INVALID, &ip->i_flags)) {
1286 error = gfs2_inode_refresh(ip);
1287 if (error)
1288 goto out_truncate;
1289 }
1290
1291
1292
1293
1294 if (inode->i_nlink)
1295 goto out_truncate;
1296
1297alloc_failed:
1298 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1299 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1300 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1301 gfs2_glock_dq_wait(&ip->i_iopen_gh);
1302 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
1303 &ip->i_iopen_gh);
1304 error = gfs2_glock_nq(&ip->i_iopen_gh);
1305 if (error)
1306 goto out_truncate;
1307 }
1308
1309 if (S_ISDIR(inode->i_mode) &&
1310 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1311 error = gfs2_dir_exhash_dealloc(ip);
1312 if (error)
1313 goto out_unlock;
1314 }
1315
1316 if (ip->i_eattr) {
1317 error = gfs2_ea_dealloc(ip);
1318 if (error)
1319 goto out_unlock;
1320 }
1321
1322 if (!gfs2_is_stuffed(ip)) {
1323 error = gfs2_file_dealloc(ip);
1324 if (error)
1325 goto out_unlock;
1326 }
1327
1328
1329
1330
1331
1332 glock_clear_object(ip->i_gl, ip);
1333 error = gfs2_dinode_dealloc(ip);
1334 goto out_unlock;
1335
1336out_truncate:
1337 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1338 GFS2_LFC_EVICT_INODE);
1339 metamapping = gfs2_glock2aspace(ip->i_gl);
1340 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1341 filemap_fdatawrite(metamapping);
1342 filemap_fdatawait(metamapping);
1343 }
1344 write_inode_now(inode, 1);
1345 gfs2_ail_flush(ip->i_gl, 0);
1346
1347 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1348 if (error)
1349 goto out_unlock;
1350
1351 truncate_inode_pages(&inode->i_data, 0);
1352 truncate_inode_pages(metamapping, 0);
1353 gfs2_trans_end(sdp);
1354
1355out_unlock:
1356 if (gfs2_rs_active(&ip->i_res))
1357 gfs2_rs_deltree(&ip->i_res);
1358
1359 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1360 glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1361 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1362 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1363 gfs2_glock_dq(&ip->i_iopen_gh);
1364 }
1365 gfs2_holder_uninit(&ip->i_iopen_gh);
1366 }
1367 if (gfs2_holder_initialized(&gh)) {
1368 glock_clear_object(ip->i_gl, ip);
1369 gfs2_glock_dq_uninit(&gh);
1370 }
1371 if (error && error != GLR_TRYFAILED && error != -EROFS)
1372 fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
1373out:
1374 truncate_inode_pages_final(&inode->i_data);
1375 gfs2_rsqa_delete(ip, NULL);
1376 gfs2_ordered_del_inode(ip);
1377 clear_inode(inode);
1378 gfs2_dir_hash_inval(ip);
1379 glock_clear_object(ip->i_gl, ip);
1380 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1381 gfs2_glock_add_to_lru(ip->i_gl);
1382 gfs2_glock_put_eventually(ip->i_gl);
1383 ip->i_gl = NULL;
1384 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1385 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1386
1387 glock_clear_object(gl, ip);
1388 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1389 gfs2_glock_hold(gl);
1390 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1391 gfs2_glock_put_eventually(gl);
1392 }
1393}
1394
1395static struct inode *gfs2_alloc_inode(struct super_block *sb)
1396{
1397 struct gfs2_inode *ip;
1398
1399 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1400 if (!ip)
1401 return NULL;
1402 ip->i_flags = 0;
1403 ip->i_gl = NULL;
1404 memset(&ip->i_res, 0, sizeof(ip->i_res));
1405 RB_CLEAR_NODE(&ip->i_res.rs_node);
1406 ip->i_rahead = 0;
1407 return &ip->i_inode;
1408}
1409
1410static void gfs2_free_inode(struct inode *inode)
1411{
1412 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1413}
1414
1415const struct super_operations gfs2_super_ops = {
1416 .alloc_inode = gfs2_alloc_inode,
1417 .free_inode = gfs2_free_inode,
1418 .write_inode = gfs2_write_inode,
1419 .dirty_inode = gfs2_dirty_inode,
1420 .evict_inode = gfs2_evict_inode,
1421 .put_super = gfs2_put_super,
1422 .sync_fs = gfs2_sync_fs,
1423 .freeze_super = gfs2_freeze,
1424 .thaw_super = gfs2_unfreeze,
1425 .statfs = gfs2_statfs,
1426 .drop_inode = gfs2_drop_inode,
1427 .show_options = gfs2_show_options,
1428};
1429
1430