1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_trans_priv.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_dir2.h"
29#include "xfs_mount.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_dinode.h"
34#include "xfs_inode.h"
35#include "xfs_btree.h"
36#include "xfs_ialloc.h"
37#include "xfs_alloc.h"
38#include "xfs_rtalloc.h"
39#include "xfs_bmap.h"
40#include "xfs_error.h"
41#include "xfs_quota.h"
42#include "xfs_fsops.h"
43#include "xfs_utils.h"
44#include "xfs_trace.h"
45#include "xfs_icache.h"
46
47
48#ifdef HAVE_PERCPU_SB
49STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
50 int);
51STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
52 int);
53STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
54#else
55
56#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
57#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
58#endif
59
60static const struct {
61 short offset;
62 short type;
63
64
65} xfs_sb_info[] = {
66 { offsetof(xfs_sb_t, sb_magicnum), 0 },
67 { offsetof(xfs_sb_t, sb_blocksize), 0 },
68 { offsetof(xfs_sb_t, sb_dblocks), 0 },
69 { offsetof(xfs_sb_t, sb_rblocks), 0 },
70 { offsetof(xfs_sb_t, sb_rextents), 0 },
71 { offsetof(xfs_sb_t, sb_uuid), 1 },
72 { offsetof(xfs_sb_t, sb_logstart), 0 },
73 { offsetof(xfs_sb_t, sb_rootino), 0 },
74 { offsetof(xfs_sb_t, sb_rbmino), 0 },
75 { offsetof(xfs_sb_t, sb_rsumino), 0 },
76 { offsetof(xfs_sb_t, sb_rextsize), 0 },
77 { offsetof(xfs_sb_t, sb_agblocks), 0 },
78 { offsetof(xfs_sb_t, sb_agcount), 0 },
79 { offsetof(xfs_sb_t, sb_rbmblocks), 0 },
80 { offsetof(xfs_sb_t, sb_logblocks), 0 },
81 { offsetof(xfs_sb_t, sb_versionnum), 0 },
82 { offsetof(xfs_sb_t, sb_sectsize), 0 },
83 { offsetof(xfs_sb_t, sb_inodesize), 0 },
84 { offsetof(xfs_sb_t, sb_inopblock), 0 },
85 { offsetof(xfs_sb_t, sb_fname[0]), 1 },
86 { offsetof(xfs_sb_t, sb_blocklog), 0 },
87 { offsetof(xfs_sb_t, sb_sectlog), 0 },
88 { offsetof(xfs_sb_t, sb_inodelog), 0 },
89 { offsetof(xfs_sb_t, sb_inopblog), 0 },
90 { offsetof(xfs_sb_t, sb_agblklog), 0 },
91 { offsetof(xfs_sb_t, sb_rextslog), 0 },
92 { offsetof(xfs_sb_t, sb_inprogress), 0 },
93 { offsetof(xfs_sb_t, sb_imax_pct), 0 },
94 { offsetof(xfs_sb_t, sb_icount), 0 },
95 { offsetof(xfs_sb_t, sb_ifree), 0 },
96 { offsetof(xfs_sb_t, sb_fdblocks), 0 },
97 { offsetof(xfs_sb_t, sb_frextents), 0 },
98 { offsetof(xfs_sb_t, sb_uquotino), 0 },
99 { offsetof(xfs_sb_t, sb_gquotino), 0 },
100 { offsetof(xfs_sb_t, sb_qflags), 0 },
101 { offsetof(xfs_sb_t, sb_flags), 0 },
102 { offsetof(xfs_sb_t, sb_shared_vn), 0 },
103 { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
104 { offsetof(xfs_sb_t, sb_unit), 0 },
105 { offsetof(xfs_sb_t, sb_width), 0 },
106 { offsetof(xfs_sb_t, sb_dirblklog), 0 },
107 { offsetof(xfs_sb_t, sb_logsectlog), 0 },
108 { offsetof(xfs_sb_t, sb_logsectsize),0 },
109 { offsetof(xfs_sb_t, sb_logsunit), 0 },
110 { offsetof(xfs_sb_t, sb_features2), 0 },
111 { offsetof(xfs_sb_t, sb_bad_features2), 0 },
112 { sizeof(xfs_sb_t), 0 }
113};
114
115static DEFINE_MUTEX(xfs_uuid_table_mutex);
116static int xfs_uuid_table_size;
117static uuid_t *xfs_uuid_table;
118
119
120
121
122
123STATIC int
124xfs_uuid_mount(
125 struct xfs_mount *mp)
126{
127 uuid_t *uuid = &mp->m_sb.sb_uuid;
128 int hole, i;
129
130 if (mp->m_flags & XFS_MOUNT_NOUUID)
131 return 0;
132
133 if (uuid_is_nil(uuid)) {
134 xfs_warn(mp, "Filesystem has nil UUID - can't mount");
135 return XFS_ERROR(EINVAL);
136 }
137
138 mutex_lock(&xfs_uuid_table_mutex);
139 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
140 if (uuid_is_nil(&xfs_uuid_table[i])) {
141 hole = i;
142 continue;
143 }
144 if (uuid_equal(uuid, &xfs_uuid_table[i]))
145 goto out_duplicate;
146 }
147
148 if (hole < 0) {
149 xfs_uuid_table = kmem_realloc(xfs_uuid_table,
150 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
151 xfs_uuid_table_size * sizeof(*xfs_uuid_table),
152 KM_SLEEP);
153 hole = xfs_uuid_table_size++;
154 }
155 xfs_uuid_table[hole] = *uuid;
156 mutex_unlock(&xfs_uuid_table_mutex);
157
158 return 0;
159
160 out_duplicate:
161 mutex_unlock(&xfs_uuid_table_mutex);
162 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
163 return XFS_ERROR(EINVAL);
164}
165
166STATIC void
167xfs_uuid_unmount(
168 struct xfs_mount *mp)
169{
170 uuid_t *uuid = &mp->m_sb.sb_uuid;
171 int i;
172
173 if (mp->m_flags & XFS_MOUNT_NOUUID)
174 return;
175
176 mutex_lock(&xfs_uuid_table_mutex);
177 for (i = 0; i < xfs_uuid_table_size; i++) {
178 if (uuid_is_nil(&xfs_uuid_table[i]))
179 continue;
180 if (!uuid_equal(uuid, &xfs_uuid_table[i]))
181 continue;
182 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
183 break;
184 }
185 ASSERT(i < xfs_uuid_table_size);
186 mutex_unlock(&xfs_uuid_table_mutex);
187}
188
189
190
191
192
193
194
195struct xfs_perag *
196xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
197{
198 struct xfs_perag *pag;
199 int ref = 0;
200
201 rcu_read_lock();
202 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
203 if (pag) {
204 ASSERT(atomic_read(&pag->pag_ref) >= 0);
205 ref = atomic_inc_return(&pag->pag_ref);
206 }
207 rcu_read_unlock();
208 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
209 return pag;
210}
211
212
213
214
215struct xfs_perag *
216xfs_perag_get_tag(
217 struct xfs_mount *mp,
218 xfs_agnumber_t first,
219 int tag)
220{
221 struct xfs_perag *pag;
222 int found;
223 int ref;
224
225 rcu_read_lock();
226 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
227 (void **)&pag, first, 1, tag);
228 if (found <= 0) {
229 rcu_read_unlock();
230 return NULL;
231 }
232 ref = atomic_inc_return(&pag->pag_ref);
233 rcu_read_unlock();
234 trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
235 return pag;
236}
237
238void
239xfs_perag_put(struct xfs_perag *pag)
240{
241 int ref;
242
243 ASSERT(atomic_read(&pag->pag_ref) > 0);
244 ref = atomic_dec_return(&pag->pag_ref);
245 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
246}
247
248STATIC void
249__xfs_free_perag(
250 struct rcu_head *head)
251{
252 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
253
254 ASSERT(atomic_read(&pag->pag_ref) == 0);
255 kmem_free(pag);
256}
257
258
259
260
261STATIC void
262xfs_free_perag(
263 xfs_mount_t *mp)
264{
265 xfs_agnumber_t agno;
266 struct xfs_perag *pag;
267
268 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
269 spin_lock(&mp->m_perag_lock);
270 pag = radix_tree_delete(&mp->m_perag_tree, agno);
271 spin_unlock(&mp->m_perag_lock);
272 ASSERT(pag);
273 ASSERT(atomic_read(&pag->pag_ref) == 0);
274 call_rcu(&pag->rcu_head, __xfs_free_perag);
275 }
276}
277
278
279
280
281
282int
283xfs_sb_validate_fsb_count(
284 xfs_sb_t *sbp,
285 __uint64_t nblocks)
286{
287 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
288 ASSERT(sbp->sb_blocklog >= BBSHIFT);
289
290#if XFS_BIG_BLKNOS
291 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
292 return EFBIG;
293#else
294 if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX)
295 return EFBIG;
296#endif
297 return 0;
298}
299
300
301
302
303STATIC int
304xfs_mount_validate_sb(
305 xfs_mount_t *mp,
306 xfs_sb_t *sbp,
307 bool check_inprogress)
308{
309
310
311
312
313
314
315
316
317 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
318 xfs_warn(mp, "bad magic number");
319 return XFS_ERROR(EWRONGFS);
320 }
321
322 if (!xfs_sb_good_version(sbp)) {
323 xfs_warn(mp, "bad version");
324 return XFS_ERROR(EWRONGFS);
325 }
326
327 if (unlikely(
328 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
329 xfs_warn(mp,
330 "filesystem is marked as having an external log; "
331 "specify logdev on the mount command line.");
332 return XFS_ERROR(EINVAL);
333 }
334
335 if (unlikely(
336 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
337 xfs_warn(mp,
338 "filesystem is marked as having an internal log; "
339 "do not specify logdev on the mount command line.");
340 return XFS_ERROR(EINVAL);
341 }
342
343
344
345
346
347 if (unlikely(
348 sbp->sb_agcount <= 0 ||
349 sbp->sb_sectsize < XFS_MIN_SECTORSIZE ||
350 sbp->sb_sectsize > XFS_MAX_SECTORSIZE ||
351 sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG ||
352 sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG ||
353 sbp->sb_sectsize != (1 << sbp->sb_sectlog) ||
354 sbp->sb_blocksize < XFS_MIN_BLOCKSIZE ||
355 sbp->sb_blocksize > XFS_MAX_BLOCKSIZE ||
356 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
357 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
358 sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
359 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
360 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
361 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
362 sbp->sb_inodelog > XFS_DINODE_MAX_LOG ||
363 sbp->sb_inodesize != (1 << sbp->sb_inodelog) ||
364 (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
365 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
366 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
367 (sbp->sb_imax_pct > 100 ) ||
368 sbp->sb_dblocks == 0 ||
369 sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) ||
370 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
371 XFS_CORRUPTION_ERROR("SB sanity check failed",
372 XFS_ERRLEVEL_LOW, mp, sbp);
373 return XFS_ERROR(EFSCORRUPTED);
374 }
375
376
377
378
379 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
380 xfs_warn(mp,
381 "File system with blocksize %d bytes. "
382 "Only pagesize (%ld) or less will currently work.",
383 sbp->sb_blocksize, PAGE_SIZE);
384 return XFS_ERROR(ENOSYS);
385 }
386
387
388
389
390 switch (sbp->sb_inodesize) {
391 case 256:
392 case 512:
393 case 1024:
394 case 2048:
395 break;
396 default:
397 xfs_warn(mp, "inode size of %d bytes not supported",
398 sbp->sb_inodesize);
399 return XFS_ERROR(ENOSYS);
400 }
401
402 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
403 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
404 xfs_warn(mp,
405 "file system too large to be mounted on this system.");
406 return XFS_ERROR(EFBIG);
407 }
408
409 if (check_inprogress && sbp->sb_inprogress) {
410 xfs_warn(mp, "Offline file system operation in progress!");
411 return XFS_ERROR(EFSCORRUPTED);
412 }
413
414
415
416
417 if (unlikely(!xfs_sb_version_hasdirv2(sbp))) {
418 xfs_warn(mp, "file system using version 1 directory format");
419 return XFS_ERROR(ENOSYS);
420 }
421
422 return 0;
423}
424
425int
426xfs_initialize_perag(
427 xfs_mount_t *mp,
428 xfs_agnumber_t agcount,
429 xfs_agnumber_t *maxagi)
430{
431 xfs_agnumber_t index;
432 xfs_agnumber_t first_initialised = 0;
433 xfs_perag_t *pag;
434 xfs_agino_t agino;
435 xfs_ino_t ino;
436 xfs_sb_t *sbp = &mp->m_sb;
437 int error = -ENOMEM;
438
439
440
441
442
443
444 for (index = 0; index < agcount; index++) {
445 pag = xfs_perag_get(mp, index);
446 if (pag) {
447 xfs_perag_put(pag);
448 continue;
449 }
450 if (!first_initialised)
451 first_initialised = index;
452
453 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
454 if (!pag)
455 goto out_unwind;
456 pag->pag_agno = index;
457 pag->pag_mount = mp;
458 spin_lock_init(&pag->pag_ici_lock);
459 mutex_init(&pag->pag_ici_reclaim_lock);
460 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
461 spin_lock_init(&pag->pag_buf_lock);
462 pag->pag_buf_tree = RB_ROOT;
463
464 if (radix_tree_preload(GFP_NOFS))
465 goto out_unwind;
466
467 spin_lock(&mp->m_perag_lock);
468 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
469 BUG();
470 spin_unlock(&mp->m_perag_lock);
471 radix_tree_preload_end();
472 error = -EEXIST;
473 goto out_unwind;
474 }
475 spin_unlock(&mp->m_perag_lock);
476 radix_tree_preload_end();
477 }
478
479
480
481
482
483 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
484 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
485
486 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
487 mp->m_flags |= XFS_MOUNT_32BITINODES;
488 else
489 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
490
491 if (mp->m_flags & XFS_MOUNT_32BITINODES)
492 index = xfs_set_inode32(mp);
493 else
494 index = xfs_set_inode64(mp);
495
496 if (maxagi)
497 *maxagi = index;
498 return 0;
499
500out_unwind:
501 kmem_free(pag);
502 for (; index > first_initialised; index--) {
503 pag = radix_tree_delete(&mp->m_perag_tree, index);
504 kmem_free(pag);
505 }
506 return error;
507}
508
509void
510xfs_sb_from_disk(
511 struct xfs_sb *to,
512 xfs_dsb_t *from)
513{
514 to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
515 to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
516 to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
517 to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
518 to->sb_rextents = be64_to_cpu(from->sb_rextents);
519 memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
520 to->sb_logstart = be64_to_cpu(from->sb_logstart);
521 to->sb_rootino = be64_to_cpu(from->sb_rootino);
522 to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
523 to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
524 to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
525 to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
526 to->sb_agcount = be32_to_cpu(from->sb_agcount);
527 to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
528 to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
529 to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
530 to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
531 to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
532 to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
533 memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
534 to->sb_blocklog = from->sb_blocklog;
535 to->sb_sectlog = from->sb_sectlog;
536 to->sb_inodelog = from->sb_inodelog;
537 to->sb_inopblog = from->sb_inopblog;
538 to->sb_agblklog = from->sb_agblklog;
539 to->sb_rextslog = from->sb_rextslog;
540 to->sb_inprogress = from->sb_inprogress;
541 to->sb_imax_pct = from->sb_imax_pct;
542 to->sb_icount = be64_to_cpu(from->sb_icount);
543 to->sb_ifree = be64_to_cpu(from->sb_ifree);
544 to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
545 to->sb_frextents = be64_to_cpu(from->sb_frextents);
546 to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
547 to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
548 to->sb_qflags = be16_to_cpu(from->sb_qflags);
549 to->sb_flags = from->sb_flags;
550 to->sb_shared_vn = from->sb_shared_vn;
551 to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
552 to->sb_unit = be32_to_cpu(from->sb_unit);
553 to->sb_width = be32_to_cpu(from->sb_width);
554 to->sb_dirblklog = from->sb_dirblklog;
555 to->sb_logsectlog = from->sb_logsectlog;
556 to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
557 to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
558 to->sb_features2 = be32_to_cpu(from->sb_features2);
559 to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
560}
561
562
563
564
565
566
567void
568xfs_sb_to_disk(
569 xfs_dsb_t *to,
570 xfs_sb_t *from,
571 __int64_t fields)
572{
573 xfs_caddr_t to_ptr = (xfs_caddr_t)to;
574 xfs_caddr_t from_ptr = (xfs_caddr_t)from;
575 xfs_sb_field_t f;
576 int first;
577 int size;
578
579 ASSERT(fields);
580 if (!fields)
581 return;
582
583 while (fields) {
584 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
585 first = xfs_sb_info[f].offset;
586 size = xfs_sb_info[f + 1].offset - first;
587
588 ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
589
590 if (size == 1 || xfs_sb_info[f].type == 1) {
591 memcpy(to_ptr + first, from_ptr + first, size);
592 } else {
593 switch (size) {
594 case 2:
595 *(__be16 *)(to_ptr + first) =
596 cpu_to_be16(*(__u16 *)(from_ptr + first));
597 break;
598 case 4:
599 *(__be32 *)(to_ptr + first) =
600 cpu_to_be32(*(__u32 *)(from_ptr + first));
601 break;
602 case 8:
603 *(__be64 *)(to_ptr + first) =
604 cpu_to_be64(*(__u64 *)(from_ptr + first));
605 break;
606 default:
607 ASSERT(0);
608 }
609 }
610
611 fields &= ~(1LL << f);
612 }
613}
614
615static void
616xfs_sb_verify(
617 struct xfs_buf *bp)
618{
619 struct xfs_mount *mp = bp->b_target->bt_mount;
620 struct xfs_sb sb;
621 int error;
622
623 xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
624
625
626
627
628
629 error = xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR);
630 if (error)
631 xfs_buf_ioerror(bp, error);
632}
633
634static void
635xfs_sb_read_verify(
636 struct xfs_buf *bp)
637{
638 xfs_sb_verify(bp);
639}
640
641
642
643
644
645
646
647static void
648xfs_sb_quiet_read_verify(
649 struct xfs_buf *bp)
650{
651 struct xfs_sb sb;
652
653 xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
654
655 if (sb.sb_magicnum == XFS_SB_MAGIC) {
656
657 xfs_sb_read_verify(bp);
658 return;
659 }
660
661 xfs_buf_ioerror(bp, EWRONGFS);
662}
663
664static void
665xfs_sb_write_verify(
666 struct xfs_buf *bp)
667{
668 xfs_sb_verify(bp);
669}
670
671const struct xfs_buf_ops xfs_sb_buf_ops = {
672 .verify_read = xfs_sb_read_verify,
673 .verify_write = xfs_sb_write_verify,
674};
675
676static const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
677 .verify_read = xfs_sb_quiet_read_verify,
678 .verify_write = xfs_sb_write_verify,
679};
680
681
682
683
684
685
686int
687xfs_readsb(xfs_mount_t *mp, int flags)
688{
689 unsigned int sector_size;
690 xfs_buf_t *bp;
691 int error;
692 int loud = !(flags & XFS_MFSI_QUIET);
693
694 ASSERT(mp->m_sb_bp == NULL);
695 ASSERT(mp->m_ddev_targp != NULL);
696
697
698
699
700
701
702 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
703
704reread:
705 bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
706 BTOBB(sector_size), 0,
707 loud ? &xfs_sb_buf_ops
708 : &xfs_sb_quiet_buf_ops);
709 if (!bp) {
710 if (loud)
711 xfs_warn(mp, "SB buffer read failed");
712 return EIO;
713 }
714 if (bp->b_error) {
715 error = bp->b_error;
716 if (loud)
717 xfs_warn(mp, "SB validate failed");
718 goto release_buf;
719 }
720
721
722
723
724 xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
725
726
727
728
729 if (sector_size > mp->m_sb.sb_sectsize) {
730 if (loud)
731 xfs_warn(mp, "device supports %u byte sectors (not %u)",
732 sector_size, mp->m_sb.sb_sectsize);
733 error = ENOSYS;
734 goto release_buf;
735 }
736
737
738
739
740
741 if (sector_size < mp->m_sb.sb_sectsize) {
742 xfs_buf_relse(bp);
743 sector_size = mp->m_sb.sb_sectsize;
744 goto reread;
745 }
746
747
748 xfs_icsb_reinit_counters(mp);
749
750 mp->m_sb_bp = bp;
751 xfs_buf_unlock(bp);
752 return 0;
753
754release_buf:
755 xfs_buf_relse(bp);
756 return error;
757}
758
759
760
761
762
763
764
765
766
767STATIC void
768xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
769{
770 mp->m_agfrotor = mp->m_agirotor = 0;
771 spin_lock_init(&mp->m_agirotor_lock);
772 mp->m_maxagi = mp->m_sb.sb_agcount;
773 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
774 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
775 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
776 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
777 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
778 mp->m_blockmask = sbp->sb_blocksize - 1;
779 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
780 mp->m_blockwmask = mp->m_blockwsize - 1;
781
782 mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
783 mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
784 mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
785 mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
786
787 mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
788 mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
789 mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
790 mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
791
792 mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
793 mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
794 mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
795 mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
796
797 mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
798 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
799 sbp->sb_inopblock);
800 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
801}
802
803
804
805
806
807
808
809
810
811STATIC int
812xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
813{
814 xfs_agnumber_t index;
815 xfs_perag_t *pag;
816 xfs_sb_t *sbp = &mp->m_sb;
817 uint64_t ifree = 0;
818 uint64_t ialloc = 0;
819 uint64_t bfree = 0;
820 uint64_t bfreelst = 0;
821 uint64_t btree = 0;
822 int error;
823
824 for (index = 0; index < agcount; index++) {
825
826
827
828
829
830 error = xfs_alloc_pagf_init(mp, NULL, index, 0);
831 if (error)
832 return error;
833
834 error = xfs_ialloc_pagi_init(mp, NULL, index);
835 if (error)
836 return error;
837 pag = xfs_perag_get(mp, index);
838 ifree += pag->pagi_freecount;
839 ialloc += pag->pagi_count;
840 bfree += pag->pagf_freeblks;
841 bfreelst += pag->pagf_flcount;
842 btree += pag->pagf_btreeblks;
843 xfs_perag_put(pag);
844 }
845
846
847
848 spin_lock(&mp->m_sb_lock);
849 sbp->sb_ifree = ifree;
850 sbp->sb_icount = ialloc;
851 sbp->sb_fdblocks = bfree + bfreelst + btree;
852 spin_unlock(&mp->m_sb_lock);
853
854
855 xfs_icsb_reinit_counters(mp);
856
857 return 0;
858}
859
860
861
862
863STATIC int
864xfs_update_alignment(xfs_mount_t *mp)
865{
866 xfs_sb_t *sbp = &(mp->m_sb);
867
868 if (mp->m_dalign) {
869
870
871
872
873 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
874 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
875 if (mp->m_flags & XFS_MOUNT_RETERR) {
876 xfs_warn(mp, "alignment check failed: "
877 "(sunit/swidth vs. blocksize)");
878 return XFS_ERROR(EINVAL);
879 }
880 mp->m_dalign = mp->m_swidth = 0;
881 } else {
882
883
884
885 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
886 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
887 if (mp->m_flags & XFS_MOUNT_RETERR) {
888 xfs_warn(mp, "alignment check failed: "
889 "(sunit/swidth vs. ag size)");
890 return XFS_ERROR(EINVAL);
891 }
892 xfs_warn(mp,
893 "stripe alignment turned off: sunit(%d)/swidth(%d) "
894 "incompatible with agsize(%d)",
895 mp->m_dalign, mp->m_swidth,
896 sbp->sb_agblocks);
897
898 mp->m_dalign = 0;
899 mp->m_swidth = 0;
900 } else if (mp->m_dalign) {
901 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
902 } else {
903 if (mp->m_flags & XFS_MOUNT_RETERR) {
904 xfs_warn(mp, "alignment check failed: "
905 "sunit(%d) less than bsize(%d)",
906 mp->m_dalign,
907 mp->m_blockmask +1);
908 return XFS_ERROR(EINVAL);
909 }
910 mp->m_swidth = 0;
911 }
912 }
913
914
915
916
917
918 if (xfs_sb_version_hasdalign(sbp)) {
919 if (sbp->sb_unit != mp->m_dalign) {
920 sbp->sb_unit = mp->m_dalign;
921 mp->m_update_flags |= XFS_SB_UNIT;
922 }
923 if (sbp->sb_width != mp->m_swidth) {
924 sbp->sb_width = mp->m_swidth;
925 mp->m_update_flags |= XFS_SB_WIDTH;
926 }
927 }
928 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
929 xfs_sb_version_hasdalign(&mp->m_sb)) {
930 mp->m_dalign = sbp->sb_unit;
931 mp->m_swidth = sbp->sb_width;
932 }
933
934 return 0;
935}
936
937
938
939
940STATIC void
941xfs_set_maxicount(xfs_mount_t *mp)
942{
943 xfs_sb_t *sbp = &(mp->m_sb);
944 __uint64_t icount;
945
946 if (sbp->sb_imax_pct) {
947
948
949
950
951 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
952 do_div(icount, 100);
953 do_div(icount, mp->m_ialloc_blks);
954 mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
955 sbp->sb_inopblog;
956 } else {
957 mp->m_maxicount = 0;
958 }
959}
960
961
962
963
964
965
966
967STATIC void
968xfs_set_rw_sizes(xfs_mount_t *mp)
969{
970 xfs_sb_t *sbp = &(mp->m_sb);
971 int readio_log, writeio_log;
972
973 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
974 if (mp->m_flags & XFS_MOUNT_WSYNC) {
975 readio_log = XFS_WSYNC_READIO_LOG;
976 writeio_log = XFS_WSYNC_WRITEIO_LOG;
977 } else {
978 readio_log = XFS_READIO_LOG_LARGE;
979 writeio_log = XFS_WRITEIO_LOG_LARGE;
980 }
981 } else {
982 readio_log = mp->m_readio_log;
983 writeio_log = mp->m_writeio_log;
984 }
985
986 if (sbp->sb_blocklog > readio_log) {
987 mp->m_readio_log = sbp->sb_blocklog;
988 } else {
989 mp->m_readio_log = readio_log;
990 }
991 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
992 if (sbp->sb_blocklog > writeio_log) {
993 mp->m_writeio_log = sbp->sb_blocklog;
994 } else {
995 mp->m_writeio_log = writeio_log;
996 }
997 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
998}
999
1000
1001
1002
1003void
1004xfs_set_low_space_thresholds(
1005 struct xfs_mount *mp)
1006{
1007 int i;
1008
1009 for (i = 0; i < XFS_LOWSP_MAX; i++) {
1010 __uint64_t space = mp->m_sb.sb_dblocks;
1011
1012 do_div(space, 100);
1013 mp->m_low_space[i] = space * (i + 1);
1014 }
1015}
1016
1017
1018
1019
1020
1021STATIC void
1022xfs_set_inoalignment(xfs_mount_t *mp)
1023{
1024 if (xfs_sb_version_hasalign(&mp->m_sb) &&
1025 mp->m_sb.sb_inoalignmt >=
1026 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
1027 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
1028 else
1029 mp->m_inoalign_mask = 0;
1030
1031
1032
1033
1034 if (mp->m_dalign && mp->m_inoalign_mask &&
1035 !(mp->m_dalign & mp->m_inoalign_mask))
1036 mp->m_sinoalign = mp->m_dalign;
1037 else
1038 mp->m_sinoalign = 0;
1039}
1040
1041
1042
1043
1044STATIC int
1045xfs_check_sizes(xfs_mount_t *mp)
1046{
1047 xfs_buf_t *bp;
1048 xfs_daddr_t d;
1049
1050 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
1051 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
1052 xfs_warn(mp, "filesystem size mismatch detected");
1053 return XFS_ERROR(EFBIG);
1054 }
1055 bp = xfs_buf_read_uncached(mp->m_ddev_targp,
1056 d - XFS_FSS_TO_BB(mp, 1),
1057 XFS_FSS_TO_BB(mp, 1), 0, NULL);
1058 if (!bp) {
1059 xfs_warn(mp, "last sector read failed");
1060 return EIO;
1061 }
1062 xfs_buf_relse(bp);
1063
1064 if (mp->m_logdev_targp != mp->m_ddev_targp) {
1065 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
1066 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
1067 xfs_warn(mp, "log size mismatch detected");
1068 return XFS_ERROR(EFBIG);
1069 }
1070 bp = xfs_buf_read_uncached(mp->m_logdev_targp,
1071 d - XFS_FSB_TO_BB(mp, 1),
1072 XFS_FSB_TO_BB(mp, 1), 0, NULL);
1073 if (!bp) {
1074 xfs_warn(mp, "log device read failed");
1075 return EIO;
1076 }
1077 xfs_buf_relse(bp);
1078 }
1079 return 0;
1080}
1081
1082
1083
1084
1085int
1086xfs_mount_reset_sbqflags(
1087 struct xfs_mount *mp)
1088{
1089 int error;
1090 struct xfs_trans *tp;
1091
1092 mp->m_qflags = 0;
1093
1094
1095
1096
1097
1098 if (mp->m_sb.sb_qflags == 0)
1099 return 0;
1100 spin_lock(&mp->m_sb_lock);
1101 mp->m_sb.sb_qflags = 0;
1102 spin_unlock(&mp->m_sb_lock);
1103
1104
1105
1106
1107
1108 if (mp->m_flags & XFS_MOUNT_RDONLY)
1109 return 0;
1110
1111 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1112 error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp),
1113 0, 0, XFS_DEFAULT_LOG_COUNT);
1114 if (error) {
1115 xfs_trans_cancel(tp, 0);
1116 xfs_alert(mp, "%s: Superblock update failed!", __func__);
1117 return error;
1118 }
1119
1120 xfs_mod_sb(tp, XFS_SB_QFLAGS);
1121 return xfs_trans_commit(tp, 0);
1122}
1123
1124__uint64_t
1125xfs_default_resblks(xfs_mount_t *mp)
1126{
1127 __uint64_t resblks;
1128
1129
1130
1131
1132
1133
1134
1135
1136 resblks = mp->m_sb.sb_dblocks;
1137 do_div(resblks, 20);
1138 resblks = min_t(__uint64_t, resblks, 8192);
1139 return resblks;
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152int
1153xfs_mountfs(
1154 xfs_mount_t *mp)
1155{
1156 xfs_sb_t *sbp = &(mp->m_sb);
1157 xfs_inode_t *rip;
1158 __uint64_t resblks;
1159 uint quotamount = 0;
1160 uint quotaflags = 0;
1161 int error = 0;
1162
1163 xfs_mount_common(mp, sbp);
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181 if (xfs_sb_has_mismatched_features2(sbp)) {
1182 xfs_warn(mp, "correcting sb_features alignment problem");
1183 sbp->sb_features2 |= sbp->sb_bad_features2;
1184 sbp->sb_bad_features2 = sbp->sb_features2;
1185 mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
1186
1187
1188
1189
1190
1191 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1192 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1193 mp->m_flags |= XFS_MOUNT_ATTR2;
1194 }
1195
1196 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1197 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1198 xfs_sb_version_removeattr2(&mp->m_sb);
1199 mp->m_update_flags |= XFS_SB_FEATURES2;
1200
1201
1202 if (!sbp->sb_features2)
1203 mp->m_update_flags |= XFS_SB_VERSIONNUM;
1204 }
1205
1206
1207
1208
1209
1210
1211
1212 error = xfs_update_alignment(mp);
1213 if (error)
1214 goto out;
1215
1216 xfs_alloc_compute_maxlevels(mp);
1217 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
1218 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
1219 xfs_ialloc_compute_maxlevels(mp);
1220
1221 xfs_set_maxicount(mp);
1222
1223 error = xfs_uuid_mount(mp);
1224 if (error)
1225 goto out;
1226
1227
1228
1229
1230 xfs_set_rw_sizes(mp);
1231
1232
1233 xfs_set_low_space_thresholds(mp);
1234
1235
1236
1237
1238
1239
1240 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
1241
1242
1243
1244
1245 xfs_set_inoalignment(mp);
1246
1247
1248
1249
1250 error = xfs_check_sizes(mp);
1251 if (error)
1252 goto out_remove_uuid;
1253
1254
1255
1256
1257 error = xfs_rtmount_init(mp);
1258 if (error) {
1259 xfs_warn(mp, "RT mount failed");
1260 goto out_remove_uuid;
1261 }
1262
1263
1264
1265
1266
1267 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
1268
1269 mp->m_dmevmask = 0;
1270
1271 xfs_dir_mount(mp);
1272
1273
1274
1275
1276 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
1277
1278
1279
1280
1281 xfs_trans_init(mp);
1282
1283
1284
1285
1286 spin_lock_init(&mp->m_perag_lock);
1287 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1288 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
1289 if (error) {
1290 xfs_warn(mp, "Failed per-ag init: %d", error);
1291 goto out_remove_uuid;
1292 }
1293
1294 if (!sbp->sb_logblocks) {
1295 xfs_warn(mp, "no log defined");
1296 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
1297 error = XFS_ERROR(EFSCORRUPTED);
1298 goto out_free_perag;
1299 }
1300
1301
1302
1303
1304 error = xfs_log_mount(mp, mp->m_logdev_targp,
1305 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
1306 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
1307 if (error) {
1308 xfs_warn(mp, "log mount failed");
1309 goto out_fail_wait;
1310 }
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1332 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
1333 !mp->m_sb.sb_inprogress) {
1334 error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
1335 if (error)
1336 goto out_fail_wait;
1337 }
1338
1339
1340
1341
1342
1343 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
1344 if (error) {
1345 xfs_warn(mp, "failed to read root inode");
1346 goto out_log_dealloc;
1347 }
1348
1349 ASSERT(rip != NULL);
1350
1351 if (unlikely(!S_ISDIR(rip->i_d.di_mode))) {
1352 xfs_warn(mp, "corrupted root inode %llu: not a directory",
1353 (unsigned long long)rip->i_ino);
1354 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1355 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
1356 mp);
1357 error = XFS_ERROR(EFSCORRUPTED);
1358 goto out_rele_rip;
1359 }
1360 mp->m_rootip = rip;
1361
1362 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1363
1364
1365
1366
1367 error = xfs_rtmount_inodes(mp);
1368 if (error) {
1369
1370
1371
1372 xfs_warn(mp, "failed to read RT inodes");
1373 goto out_rele_rip;
1374 }
1375
1376
1377
1378
1379
1380
1381 if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1382 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1383 if (error) {
1384 xfs_warn(mp, "failed to write sb changes");
1385 goto out_rtunmount;
1386 }
1387 }
1388
1389
1390
1391
1392 if (XFS_IS_QUOTA_RUNNING(mp)) {
1393 error = xfs_qm_newmount(mp, "amount, "aflags);
1394 if (error)
1395 goto out_rtunmount;
1396 } else {
1397 ASSERT(!XFS_IS_QUOTA_ON(mp));
1398
1399
1400
1401
1402
1403
1404 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
1405 xfs_notice(mp, "resetting quota flags");
1406 error = xfs_mount_reset_sbqflags(mp);
1407 if (error)
1408 return error;
1409 }
1410 }
1411
1412
1413
1414
1415
1416
1417 error = xfs_log_mount_finish(mp);
1418 if (error) {
1419 xfs_warn(mp, "log mount finish failed");
1420 goto out_rtunmount;
1421 }
1422
1423
1424
1425
1426 if (quotamount) {
1427 ASSERT(mp->m_qflags == 0);
1428 mp->m_qflags = quotaflags;
1429
1430 xfs_qm_mount_quotas(mp);
1431 }
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1445 resblks = xfs_default_resblks(mp);
1446 error = xfs_reserve_blocks(mp, &resblks, NULL);
1447 if (error)
1448 xfs_warn(mp,
1449 "Unable to allocate reserve blocks. Continuing without reserve pool.");
1450 }
1451
1452 return 0;
1453
1454 out_rtunmount:
1455 xfs_rtunmount_inodes(mp);
1456 out_rele_rip:
1457 IRELE(rip);
1458 out_log_dealloc:
1459 xfs_log_unmount(mp);
1460 out_fail_wait:
1461 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1462 xfs_wait_buftarg(mp->m_logdev_targp);
1463 xfs_wait_buftarg(mp->m_ddev_targp);
1464 out_free_perag:
1465 xfs_free_perag(mp);
1466 out_remove_uuid:
1467 xfs_uuid_unmount(mp);
1468 out:
1469 return error;
1470}
1471
1472
1473
1474
1475
1476void
1477xfs_unmountfs(
1478 struct xfs_mount *mp)
1479{
1480 __uint64_t resblks;
1481 int error;
1482
1483 cancel_delayed_work_sync(&mp->m_eofblocks_work);
1484
1485 xfs_qm_unmount_quotas(mp);
1486 xfs_rtunmount_inodes(mp);
1487 IRELE(mp->m_rootip);
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499 xfs_log_force(mp, XFS_LOG_SYNC);
1500
1501
1502
1503
1504 xfs_ail_push_all_sync(mp->m_ail);
1505
1506
1507
1508
1509
1510
1511
1512 cancel_delayed_work_sync(&mp->m_reclaim_work);
1513 xfs_reclaim_inodes(mp, SYNC_WAIT);
1514
1515 xfs_qm_unmount(mp);
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 resblks = 0;
1532 error = xfs_reserve_blocks(mp, &resblks, NULL);
1533 if (error)
1534 xfs_warn(mp, "Unable to free reserved block pool. "
1535 "Freespace may not be correct on next mount.");
1536
1537 error = xfs_log_sbcount(mp);
1538 if (error)
1539 xfs_warn(mp, "Unable to update superblock counters. "
1540 "Freespace may not be correct on next mount.");
1541
1542 xfs_log_unmount(mp);
1543 xfs_uuid_unmount(mp);
1544
1545#if defined(DEBUG)
1546 xfs_errortag_clearall(mp, 0);
1547#endif
1548 xfs_free_perag(mp);
1549}
1550
1551int
1552xfs_fs_writable(xfs_mount_t *mp)
1553{
1554 return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) ||
1555 (mp->m_flags & XFS_MOUNT_RDONLY));
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567int
1568xfs_log_sbcount(xfs_mount_t *mp)
1569{
1570 xfs_trans_t *tp;
1571 int error;
1572
1573 if (!xfs_fs_writable(mp))
1574 return 0;
1575
1576 xfs_icsb_sync_counters(mp, 0);
1577
1578
1579
1580
1581
1582 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1583 return 0;
1584
1585 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
1586 error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
1587 XFS_DEFAULT_LOG_COUNT);
1588 if (error) {
1589 xfs_trans_cancel(tp, 0);
1590 return error;
1591 }
1592
1593 xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
1594 xfs_trans_set_sync(tp);
1595 error = xfs_trans_commit(tp, 0);
1596 return error;
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606void
1607xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1608{
1609 xfs_buf_t *bp;
1610 int first;
1611 int last;
1612 xfs_mount_t *mp;
1613 xfs_sb_field_t f;
1614
1615 ASSERT(fields);
1616 if (!fields)
1617 return;
1618 mp = tp->t_mountp;
1619 bp = xfs_trans_getsb(tp, mp, 0);
1620 first = sizeof(xfs_sb_t);
1621 last = 0;
1622
1623
1624
1625 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
1626
1627
1628 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
1629 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1630 last = xfs_sb_info[f + 1].offset - 1;
1631
1632 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
1633 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1634 first = xfs_sb_info[f].offset;
1635
1636 xfs_trans_log_buf(tp, bp, first, last);
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649STATIC int
1650xfs_mod_incore_sb_unlocked(
1651 xfs_mount_t *mp,
1652 xfs_sb_field_t field,
1653 int64_t delta,
1654 int rsvd)
1655{
1656 int scounter;
1657 long long lcounter;
1658 long long res_used, rem;
1659
1660
1661
1662
1663
1664
1665
1666 switch (field) {
1667 case XFS_SBS_ICOUNT:
1668 lcounter = (long long)mp->m_sb.sb_icount;
1669 lcounter += delta;
1670 if (lcounter < 0) {
1671 ASSERT(0);
1672 return XFS_ERROR(EINVAL);
1673 }
1674 mp->m_sb.sb_icount = lcounter;
1675 return 0;
1676 case XFS_SBS_IFREE:
1677 lcounter = (long long)mp->m_sb.sb_ifree;
1678 lcounter += delta;
1679 if (lcounter < 0) {
1680 ASSERT(0);
1681 return XFS_ERROR(EINVAL);
1682 }
1683 mp->m_sb.sb_ifree = lcounter;
1684 return 0;
1685 case XFS_SBS_FDBLOCKS:
1686 lcounter = (long long)
1687 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1688 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1689
1690 if (delta > 0) {
1691 if (res_used > delta) {
1692 mp->m_resblks_avail += delta;
1693 } else {
1694 rem = delta - res_used;
1695 mp->m_resblks_avail = mp->m_resblks;
1696 lcounter += rem;
1697 }
1698 } else {
1699 lcounter += delta;
1700 if (lcounter >= 0) {
1701 mp->m_sb.sb_fdblocks = lcounter +
1702 XFS_ALLOC_SET_ASIDE(mp);
1703 return 0;
1704 }
1705
1706
1707
1708
1709
1710 if (!rsvd)
1711 return XFS_ERROR(ENOSPC);
1712
1713 lcounter = (long long)mp->m_resblks_avail + delta;
1714 if (lcounter >= 0) {
1715 mp->m_resblks_avail = lcounter;
1716 return 0;
1717 }
1718 printk_once(KERN_WARNING
1719 "Filesystem \"%s\": reserve blocks depleted! "
1720 "Consider increasing reserve pool size.",
1721 mp->m_fsname);
1722 return XFS_ERROR(ENOSPC);
1723 }
1724
1725 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1726 return 0;
1727 case XFS_SBS_FREXTENTS:
1728 lcounter = (long long)mp->m_sb.sb_frextents;
1729 lcounter += delta;
1730 if (lcounter < 0) {
1731 return XFS_ERROR(ENOSPC);
1732 }
1733 mp->m_sb.sb_frextents = lcounter;
1734 return 0;
1735 case XFS_SBS_DBLOCKS:
1736 lcounter = (long long)mp->m_sb.sb_dblocks;
1737 lcounter += delta;
1738 if (lcounter < 0) {
1739 ASSERT(0);
1740 return XFS_ERROR(EINVAL);
1741 }
1742 mp->m_sb.sb_dblocks = lcounter;
1743 return 0;
1744 case XFS_SBS_AGCOUNT:
1745 scounter = mp->m_sb.sb_agcount;
1746 scounter += delta;
1747 if (scounter < 0) {
1748 ASSERT(0);
1749 return XFS_ERROR(EINVAL);
1750 }
1751 mp->m_sb.sb_agcount = scounter;
1752 return 0;
1753 case XFS_SBS_IMAX_PCT:
1754 scounter = mp->m_sb.sb_imax_pct;
1755 scounter += delta;
1756 if (scounter < 0) {
1757 ASSERT(0);
1758 return XFS_ERROR(EINVAL);
1759 }
1760 mp->m_sb.sb_imax_pct = scounter;
1761 return 0;
1762 case XFS_SBS_REXTSIZE:
1763 scounter = mp->m_sb.sb_rextsize;
1764 scounter += delta;
1765 if (scounter < 0) {
1766 ASSERT(0);
1767 return XFS_ERROR(EINVAL);
1768 }
1769 mp->m_sb.sb_rextsize = scounter;
1770 return 0;
1771 case XFS_SBS_RBMBLOCKS:
1772 scounter = mp->m_sb.sb_rbmblocks;
1773 scounter += delta;
1774 if (scounter < 0) {
1775 ASSERT(0);
1776 return XFS_ERROR(EINVAL);
1777 }
1778 mp->m_sb.sb_rbmblocks = scounter;
1779 return 0;
1780 case XFS_SBS_RBLOCKS:
1781 lcounter = (long long)mp->m_sb.sb_rblocks;
1782 lcounter += delta;
1783 if (lcounter < 0) {
1784 ASSERT(0);
1785 return XFS_ERROR(EINVAL);
1786 }
1787 mp->m_sb.sb_rblocks = lcounter;
1788 return 0;
1789 case XFS_SBS_REXTENTS:
1790 lcounter = (long long)mp->m_sb.sb_rextents;
1791 lcounter += delta;
1792 if (lcounter < 0) {
1793 ASSERT(0);
1794 return XFS_ERROR(EINVAL);
1795 }
1796 mp->m_sb.sb_rextents = lcounter;
1797 return 0;
1798 case XFS_SBS_REXTSLOG:
1799 scounter = mp->m_sb.sb_rextslog;
1800 scounter += delta;
1801 if (scounter < 0) {
1802 ASSERT(0);
1803 return XFS_ERROR(EINVAL);
1804 }
1805 mp->m_sb.sb_rextslog = scounter;
1806 return 0;
1807 default:
1808 ASSERT(0);
1809 return XFS_ERROR(EINVAL);
1810 }
1811}
1812
1813
1814
1815
1816
1817
1818
1819int
1820xfs_mod_incore_sb(
1821 struct xfs_mount *mp,
1822 xfs_sb_field_t field,
1823 int64_t delta,
1824 int rsvd)
1825{
1826 int status;
1827
1828#ifdef HAVE_PERCPU_SB
1829 ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS);
1830#endif
1831 spin_lock(&mp->m_sb_lock);
1832 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1833 spin_unlock(&mp->m_sb_lock);
1834
1835 return status;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850int
1851xfs_mod_incore_sb_batch(
1852 struct xfs_mount *mp,
1853 xfs_mod_sb_t *msb,
1854 uint nmsb,
1855 int rsvd)
1856{
1857 xfs_mod_sb_t *msbp;
1858 int error = 0;
1859
1860
1861
1862
1863
1864
1865
1866 spin_lock(&mp->m_sb_lock);
1867 for (msbp = msb; msbp < (msb + nmsb); msbp++) {
1868 ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
1869 msbp->msb_field > XFS_SBS_FDBLOCKS);
1870
1871 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1872 msbp->msb_delta, rsvd);
1873 if (error)
1874 goto unwind;
1875 }
1876 spin_unlock(&mp->m_sb_lock);
1877 return 0;
1878
1879unwind:
1880 while (--msbp >= msb) {
1881 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1882 -msbp->msb_delta, rsvd);
1883 ASSERT(error == 0);
1884 }
1885 spin_unlock(&mp->m_sb_lock);
1886 return error;
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898struct xfs_buf *
1899xfs_getsb(
1900 struct xfs_mount *mp,
1901 int flags)
1902{
1903 struct xfs_buf *bp = mp->m_sb_bp;
1904
1905 if (!xfs_buf_trylock(bp)) {
1906 if (flags & XBF_TRYLOCK)
1907 return NULL;
1908 xfs_buf_lock(bp);
1909 }
1910
1911 xfs_buf_hold(bp);
1912 ASSERT(XFS_BUF_ISDONE(bp));
1913 return bp;
1914}
1915
1916
1917
1918
1919void
1920xfs_freesb(
1921 struct xfs_mount *mp)
1922{
1923 struct xfs_buf *bp = mp->m_sb_bp;
1924
1925 xfs_buf_lock(bp);
1926 mp->m_sb_bp = NULL;
1927 xfs_buf_relse(bp);
1928}
1929
1930
1931
1932
1933
1934
1935int
1936xfs_mount_log_sb(
1937 xfs_mount_t *mp,
1938 __int64_t fields)
1939{
1940 xfs_trans_t *tp;
1941 int error;
1942
1943 ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
1944 XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
1945 XFS_SB_VERSIONNUM));
1946
1947 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
1948 error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
1949 XFS_DEFAULT_LOG_COUNT);
1950 if (error) {
1951 xfs_trans_cancel(tp, 0);
1952 return error;
1953 }
1954 xfs_mod_sb(tp, fields);
1955 error = xfs_trans_commit(tp, 0);
1956 return error;
1957}
1958
1959
1960
1961
1962
1963int
1964xfs_dev_is_read_only(
1965 struct xfs_mount *mp,
1966 char *message)
1967{
1968 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1969 xfs_readonly_buftarg(mp->m_logdev_targp) ||
1970 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1971 xfs_notice(mp, "%s required on read-only device.", message);
1972 xfs_notice(mp, "write access unavailable, cannot proceed.");
1973 return EROFS;
1974 }
1975 return 0;
1976}
1977
1978#ifdef HAVE_PERCPU_SB
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031#ifdef CONFIG_HOTPLUG_CPU
2032
2033
2034
2035
2036
2037
2038
2039
2040STATIC int
2041xfs_icsb_cpu_notify(
2042 struct notifier_block *nfb,
2043 unsigned long action,
2044 void *hcpu)
2045{
2046 xfs_icsb_cnts_t *cntp;
2047 xfs_mount_t *mp;
2048
2049 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
2050 cntp = (xfs_icsb_cnts_t *)
2051 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
2052 switch (action) {
2053 case CPU_UP_PREPARE:
2054 case CPU_UP_PREPARE_FROZEN:
2055
2056
2057 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2058 break;
2059 case CPU_ONLINE:
2060 case CPU_ONLINE_FROZEN:
2061 xfs_icsb_lock(mp);
2062 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
2063 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
2064 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
2065 xfs_icsb_unlock(mp);
2066 break;
2067 case CPU_DEAD:
2068 case CPU_DEAD_FROZEN:
2069
2070
2071
2072 xfs_icsb_lock(mp);
2073 spin_lock(&mp->m_sb_lock);
2074 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
2075 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
2076 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
2077
2078 mp->m_sb.sb_icount += cntp->icsb_icount;
2079 mp->m_sb.sb_ifree += cntp->icsb_ifree;
2080 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
2081
2082 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2083
2084 xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
2085 xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
2086 xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
2087 spin_unlock(&mp->m_sb_lock);
2088 xfs_icsb_unlock(mp);
2089 break;
2090 }
2091
2092 return NOTIFY_OK;
2093}
2094#endif
2095
2096int
2097xfs_icsb_init_counters(
2098 xfs_mount_t *mp)
2099{
2100 xfs_icsb_cnts_t *cntp;
2101 int i;
2102
2103 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
2104 if (mp->m_sb_cnts == NULL)
2105 return -ENOMEM;
2106
2107#ifdef CONFIG_HOTPLUG_CPU
2108 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
2109 mp->m_icsb_notifier.priority = 0;
2110 register_hotcpu_notifier(&mp->m_icsb_notifier);
2111#endif
2112
2113 for_each_online_cpu(i) {
2114 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2115 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2116 }
2117
2118 mutex_init(&mp->m_icsb_mutex);
2119
2120
2121
2122
2123
2124 mp->m_icsb_counters = -1;
2125 return 0;
2126}
2127
2128void
2129xfs_icsb_reinit_counters(
2130 xfs_mount_t *mp)
2131{
2132 xfs_icsb_lock(mp);
2133
2134
2135
2136
2137 mp->m_icsb_counters = -1;
2138 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
2139 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
2140 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
2141 xfs_icsb_unlock(mp);
2142}
2143
2144void
2145xfs_icsb_destroy_counters(
2146 xfs_mount_t *mp)
2147{
2148 if (mp->m_sb_cnts) {
2149 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
2150 free_percpu(mp->m_sb_cnts);
2151 }
2152 mutex_destroy(&mp->m_icsb_mutex);
2153}
2154
2155STATIC void
2156xfs_icsb_lock_cntr(
2157 xfs_icsb_cnts_t *icsbp)
2158{
2159 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
2160 ndelay(1000);
2161 }
2162}
2163
2164STATIC void
2165xfs_icsb_unlock_cntr(
2166 xfs_icsb_cnts_t *icsbp)
2167{
2168 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
2169}
2170
2171
2172STATIC void
2173xfs_icsb_lock_all_counters(
2174 xfs_mount_t *mp)
2175{
2176 xfs_icsb_cnts_t *cntp;
2177 int i;
2178
2179 for_each_online_cpu(i) {
2180 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2181 xfs_icsb_lock_cntr(cntp);
2182 }
2183}
2184
2185STATIC void
2186xfs_icsb_unlock_all_counters(
2187 xfs_mount_t *mp)
2188{
2189 xfs_icsb_cnts_t *cntp;
2190 int i;
2191
2192 for_each_online_cpu(i) {
2193 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2194 xfs_icsb_unlock_cntr(cntp);
2195 }
2196}
2197
2198STATIC void
2199xfs_icsb_count(
2200 xfs_mount_t *mp,
2201 xfs_icsb_cnts_t *cnt,
2202 int flags)
2203{
2204 xfs_icsb_cnts_t *cntp;
2205 int i;
2206
2207 memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
2208
2209 if (!(flags & XFS_ICSB_LAZY_COUNT))
2210 xfs_icsb_lock_all_counters(mp);
2211
2212 for_each_online_cpu(i) {
2213 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2214 cnt->icsb_icount += cntp->icsb_icount;
2215 cnt->icsb_ifree += cntp->icsb_ifree;
2216 cnt->icsb_fdblocks += cntp->icsb_fdblocks;
2217 }
2218
2219 if (!(flags & XFS_ICSB_LAZY_COUNT))
2220 xfs_icsb_unlock_all_counters(mp);
2221}
2222
2223STATIC int
2224xfs_icsb_counter_disabled(
2225 xfs_mount_t *mp,
2226 xfs_sb_field_t field)
2227{
2228 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2229 return test_bit(field, &mp->m_icsb_counters);
2230}
2231
2232STATIC void
2233xfs_icsb_disable_counter(
2234 xfs_mount_t *mp,
2235 xfs_sb_field_t field)
2236{
2237 xfs_icsb_cnts_t cnt;
2238
2239 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249 if (xfs_icsb_counter_disabled(mp, field))
2250 return;
2251
2252 xfs_icsb_lock_all_counters(mp);
2253 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
2254
2255
2256 xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
2257 switch(field) {
2258 case XFS_SBS_ICOUNT:
2259 mp->m_sb.sb_icount = cnt.icsb_icount;
2260 break;
2261 case XFS_SBS_IFREE:
2262 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2263 break;
2264 case XFS_SBS_FDBLOCKS:
2265 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2266 break;
2267 default:
2268 BUG();
2269 }
2270 }
2271
2272 xfs_icsb_unlock_all_counters(mp);
2273}
2274
2275STATIC void
2276xfs_icsb_enable_counter(
2277 xfs_mount_t *mp,
2278 xfs_sb_field_t field,
2279 uint64_t count,
2280 uint64_t resid)
2281{
2282 xfs_icsb_cnts_t *cntp;
2283 int i;
2284
2285 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2286
2287 xfs_icsb_lock_all_counters(mp);
2288 for_each_online_cpu(i) {
2289 cntp = per_cpu_ptr(mp->m_sb_cnts, i);
2290 switch (field) {
2291 case XFS_SBS_ICOUNT:
2292 cntp->icsb_icount = count + resid;
2293 break;
2294 case XFS_SBS_IFREE:
2295 cntp->icsb_ifree = count + resid;
2296 break;
2297 case XFS_SBS_FDBLOCKS:
2298 cntp->icsb_fdblocks = count + resid;
2299 break;
2300 default:
2301 BUG();
2302 break;
2303 }
2304 resid = 0;
2305 }
2306 clear_bit(field, &mp->m_icsb_counters);
2307 xfs_icsb_unlock_all_counters(mp);
2308}
2309
2310void
2311xfs_icsb_sync_counters_locked(
2312 xfs_mount_t *mp,
2313 int flags)
2314{
2315 xfs_icsb_cnts_t cnt;
2316
2317 xfs_icsb_count(mp, &cnt, flags);
2318
2319 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
2320 mp->m_sb.sb_icount = cnt.icsb_icount;
2321 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
2322 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2323 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
2324 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2325}
2326
2327
2328
2329
2330void
2331xfs_icsb_sync_counters(
2332 xfs_mount_t *mp,
2333 int flags)
2334{
2335 spin_lock(&mp->m_sb_lock);
2336 xfs_icsb_sync_counters_locked(mp, flags);
2337 spin_unlock(&mp->m_sb_lock);
2338}
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2357#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2358 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2359STATIC void
2360xfs_icsb_balance_counter_locked(
2361 xfs_mount_t *mp,
2362 xfs_sb_field_t field,
2363 int min_per_cpu)
2364{
2365 uint64_t count, resid;
2366 int weight = num_online_cpus();
2367 uint64_t min = (uint64_t)min_per_cpu;
2368
2369
2370 xfs_icsb_disable_counter(mp, field);
2371
2372
2373 switch (field) {
2374 case XFS_SBS_ICOUNT:
2375 count = mp->m_sb.sb_icount;
2376 resid = do_div(count, weight);
2377 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2378 return;
2379 break;
2380 case XFS_SBS_IFREE:
2381 count = mp->m_sb.sb_ifree;
2382 resid = do_div(count, weight);
2383 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2384 return;
2385 break;
2386 case XFS_SBS_FDBLOCKS:
2387 count = mp->m_sb.sb_fdblocks;
2388 resid = do_div(count, weight);
2389 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2390 return;
2391 break;
2392 default:
2393 BUG();
2394 count = resid = 0;
2395 break;
2396 }
2397
2398 xfs_icsb_enable_counter(mp, field, count, resid);
2399}
2400
2401STATIC void
2402xfs_icsb_balance_counter(
2403 xfs_mount_t *mp,
2404 xfs_sb_field_t fields,
2405 int min_per_cpu)
2406{
2407 spin_lock(&mp->m_sb_lock);
2408 xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
2409 spin_unlock(&mp->m_sb_lock);
2410}
2411
2412int
2413xfs_icsb_modify_counters(
2414 xfs_mount_t *mp,
2415 xfs_sb_field_t field,
2416 int64_t delta,
2417 int rsvd)
2418{
2419 xfs_icsb_cnts_t *icsbp;
2420 long long lcounter;
2421 int ret = 0;
2422
2423 might_sleep();
2424again:
2425 preempt_disable();
2426 icsbp = this_cpu_ptr(mp->m_sb_cnts);
2427
2428
2429
2430
2431 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2432 goto slow_path;
2433 xfs_icsb_lock_cntr(icsbp);
2434 if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
2435 xfs_icsb_unlock_cntr(icsbp);
2436 goto slow_path;
2437 }
2438
2439 switch (field) {
2440 case XFS_SBS_ICOUNT:
2441 lcounter = icsbp->icsb_icount;
2442 lcounter += delta;
2443 if (unlikely(lcounter < 0))
2444 goto balance_counter;
2445 icsbp->icsb_icount = lcounter;
2446 break;
2447
2448 case XFS_SBS_IFREE:
2449 lcounter = icsbp->icsb_ifree;
2450 lcounter += delta;
2451 if (unlikely(lcounter < 0))
2452 goto balance_counter;
2453 icsbp->icsb_ifree = lcounter;
2454 break;
2455
2456 case XFS_SBS_FDBLOCKS:
2457 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
2458
2459 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
2460 lcounter += delta;
2461 if (unlikely(lcounter < 0))
2462 goto balance_counter;
2463 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
2464 break;
2465 default:
2466 BUG();
2467 break;
2468 }
2469 xfs_icsb_unlock_cntr(icsbp);
2470 preempt_enable();
2471 return 0;
2472
2473slow_path:
2474 preempt_enable();
2475
2476
2477
2478
2479
2480
2481 xfs_icsb_lock(mp);
2482
2483
2484
2485
2486
2487
2488
2489 if (!(xfs_icsb_counter_disabled(mp, field))) {
2490 xfs_icsb_unlock(mp);
2491 goto again;
2492 }
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505 spin_lock(&mp->m_sb_lock);
2506 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2507 spin_unlock(&mp->m_sb_lock);
2508
2509
2510
2511
2512
2513
2514
2515 if (ret != ENOSPC)
2516 xfs_icsb_balance_counter(mp, field, 0);
2517 xfs_icsb_unlock(mp);
2518 return ret;
2519
2520balance_counter:
2521 xfs_icsb_unlock_cntr(icsbp);
2522 preempt_enable();
2523
2524
2525
2526
2527
2528
2529
2530 xfs_icsb_lock(mp);
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540 xfs_icsb_balance_counter(mp, field, delta);
2541 xfs_icsb_unlock(mp);
2542 goto again;
2543}
2544
2545#endif
2546