1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_trans_priv.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_dir2.h"
29#include "xfs_mount.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_dinode.h"
34#include "xfs_inode.h"
35#include "xfs_btree.h"
36#include "xfs_ialloc.h"
37#include "xfs_alloc.h"
38#include "xfs_rtalloc.h"
39#include "xfs_bmap.h"
40#include "xfs_error.h"
41#include "xfs_quota.h"
42#include "xfs_fsops.h"
43#include "xfs_utils.h"
44#include "xfs_trace.h"
45#include "xfs_icache.h"
46#include "xfs_cksum.h"
47#include "xfs_buf_item.h"
48
49
50#ifdef HAVE_PERCPU_SB
51STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
52 int);
53STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
54 int);
55STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
56#else
57
58#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
59#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
60#endif
61
62static const struct {
63 short offset;
64 short type;
65
66
67} xfs_sb_info[] = {
68 { offsetof(xfs_sb_t, sb_magicnum), 0 },
69 { offsetof(xfs_sb_t, sb_blocksize), 0 },
70 { offsetof(xfs_sb_t, sb_dblocks), 0 },
71 { offsetof(xfs_sb_t, sb_rblocks), 0 },
72 { offsetof(xfs_sb_t, sb_rextents), 0 },
73 { offsetof(xfs_sb_t, sb_uuid), 1 },
74 { offsetof(xfs_sb_t, sb_logstart), 0 },
75 { offsetof(xfs_sb_t, sb_rootino), 0 },
76 { offsetof(xfs_sb_t, sb_rbmino), 0 },
77 { offsetof(xfs_sb_t, sb_rsumino), 0 },
78 { offsetof(xfs_sb_t, sb_rextsize), 0 },
79 { offsetof(xfs_sb_t, sb_agblocks), 0 },
80 { offsetof(xfs_sb_t, sb_agcount), 0 },
81 { offsetof(xfs_sb_t, sb_rbmblocks), 0 },
82 { offsetof(xfs_sb_t, sb_logblocks), 0 },
83 { offsetof(xfs_sb_t, sb_versionnum), 0 },
84 { offsetof(xfs_sb_t, sb_sectsize), 0 },
85 { offsetof(xfs_sb_t, sb_inodesize), 0 },
86 { offsetof(xfs_sb_t, sb_inopblock), 0 },
87 { offsetof(xfs_sb_t, sb_fname[0]), 1 },
88 { offsetof(xfs_sb_t, sb_blocklog), 0 },
89 { offsetof(xfs_sb_t, sb_sectlog), 0 },
90 { offsetof(xfs_sb_t, sb_inodelog), 0 },
91 { offsetof(xfs_sb_t, sb_inopblog), 0 },
92 { offsetof(xfs_sb_t, sb_agblklog), 0 },
93 { offsetof(xfs_sb_t, sb_rextslog), 0 },
94 { offsetof(xfs_sb_t, sb_inprogress), 0 },
95 { offsetof(xfs_sb_t, sb_imax_pct), 0 },
96 { offsetof(xfs_sb_t, sb_icount), 0 },
97 { offsetof(xfs_sb_t, sb_ifree), 0 },
98 { offsetof(xfs_sb_t, sb_fdblocks), 0 },
99 { offsetof(xfs_sb_t, sb_frextents), 0 },
100 { offsetof(xfs_sb_t, sb_uquotino), 0 },
101 { offsetof(xfs_sb_t, sb_gquotino), 0 },
102 { offsetof(xfs_sb_t, sb_qflags), 0 },
103 { offsetof(xfs_sb_t, sb_flags), 0 },
104 { offsetof(xfs_sb_t, sb_shared_vn), 0 },
105 { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
106 { offsetof(xfs_sb_t, sb_unit), 0 },
107 { offsetof(xfs_sb_t, sb_width), 0 },
108 { offsetof(xfs_sb_t, sb_dirblklog), 0 },
109 { offsetof(xfs_sb_t, sb_logsectlog), 0 },
110 { offsetof(xfs_sb_t, sb_logsectsize),0 },
111 { offsetof(xfs_sb_t, sb_logsunit), 0 },
112 { offsetof(xfs_sb_t, sb_features2), 0 },
113 { offsetof(xfs_sb_t, sb_bad_features2), 0 },
114 { offsetof(xfs_sb_t, sb_features_compat), 0 },
115 { offsetof(xfs_sb_t, sb_features_ro_compat), 0 },
116 { offsetof(xfs_sb_t, sb_features_incompat), 0 },
117 { offsetof(xfs_sb_t, sb_features_log_incompat), 0 },
118 { offsetof(xfs_sb_t, sb_crc), 0 },
119 { offsetof(xfs_sb_t, sb_pad), 0 },
120 { offsetof(xfs_sb_t, sb_pquotino), 0 },
121 { offsetof(xfs_sb_t, sb_lsn), 0 },
122 { sizeof(xfs_sb_t), 0 }
123};
124
125static DEFINE_MUTEX(xfs_uuid_table_mutex);
126static int xfs_uuid_table_size;
127static uuid_t *xfs_uuid_table;
128
129
130
131
132
133STATIC int
134xfs_uuid_mount(
135 struct xfs_mount *mp)
136{
137 uuid_t *uuid = &mp->m_sb.sb_uuid;
138 int hole, i;
139
140 if (mp->m_flags & XFS_MOUNT_NOUUID)
141 return 0;
142
143 if (uuid_is_nil(uuid)) {
144 xfs_warn(mp, "Filesystem has nil UUID - can't mount");
145 return XFS_ERROR(EINVAL);
146 }
147
148 mutex_lock(&xfs_uuid_table_mutex);
149 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
150 if (uuid_is_nil(&xfs_uuid_table[i])) {
151 hole = i;
152 continue;
153 }
154 if (uuid_equal(uuid, &xfs_uuid_table[i]))
155 goto out_duplicate;
156 }
157
158 if (hole < 0) {
159 xfs_uuid_table = kmem_realloc(xfs_uuid_table,
160 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
161 xfs_uuid_table_size * sizeof(*xfs_uuid_table),
162 KM_SLEEP);
163 hole = xfs_uuid_table_size++;
164 }
165 xfs_uuid_table[hole] = *uuid;
166 mutex_unlock(&xfs_uuid_table_mutex);
167
168 return 0;
169
170 out_duplicate:
171 mutex_unlock(&xfs_uuid_table_mutex);
172 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
173 return XFS_ERROR(EINVAL);
174}
175
176STATIC void
177xfs_uuid_unmount(
178 struct xfs_mount *mp)
179{
180 uuid_t *uuid = &mp->m_sb.sb_uuid;
181 int i;
182
183 if (mp->m_flags & XFS_MOUNT_NOUUID)
184 return;
185
186 mutex_lock(&xfs_uuid_table_mutex);
187 for (i = 0; i < xfs_uuid_table_size; i++) {
188 if (uuid_is_nil(&xfs_uuid_table[i]))
189 continue;
190 if (!uuid_equal(uuid, &xfs_uuid_table[i]))
191 continue;
192 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
193 break;
194 }
195 ASSERT(i < xfs_uuid_table_size);
196 mutex_unlock(&xfs_uuid_table_mutex);
197}
198
199
200
201
202
203
204
205struct xfs_perag *
206xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
207{
208 struct xfs_perag *pag;
209 int ref = 0;
210
211 rcu_read_lock();
212 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
213 if (pag) {
214 ASSERT(atomic_read(&pag->pag_ref) >= 0);
215 ref = atomic_inc_return(&pag->pag_ref);
216 }
217 rcu_read_unlock();
218 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
219 return pag;
220}
221
222
223
224
225struct xfs_perag *
226xfs_perag_get_tag(
227 struct xfs_mount *mp,
228 xfs_agnumber_t first,
229 int tag)
230{
231 struct xfs_perag *pag;
232 int found;
233 int ref;
234
235 rcu_read_lock();
236 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
237 (void **)&pag, first, 1, tag);
238 if (found <= 0) {
239 rcu_read_unlock();
240 return NULL;
241 }
242 ref = atomic_inc_return(&pag->pag_ref);
243 rcu_read_unlock();
244 trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
245 return pag;
246}
247
248void
249xfs_perag_put(struct xfs_perag *pag)
250{
251 int ref;
252
253 ASSERT(atomic_read(&pag->pag_ref) > 0);
254 ref = atomic_dec_return(&pag->pag_ref);
255 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
256}
257
258STATIC void
259__xfs_free_perag(
260 struct rcu_head *head)
261{
262 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
263
264 ASSERT(atomic_read(&pag->pag_ref) == 0);
265 kmem_free(pag);
266}
267
268
269
270
271STATIC void
272xfs_free_perag(
273 xfs_mount_t *mp)
274{
275 xfs_agnumber_t agno;
276 struct xfs_perag *pag;
277
278 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
279 spin_lock(&mp->m_perag_lock);
280 pag = radix_tree_delete(&mp->m_perag_tree, agno);
281 spin_unlock(&mp->m_perag_lock);
282 ASSERT(pag);
283 ASSERT(atomic_read(&pag->pag_ref) == 0);
284 call_rcu(&pag->rcu_head, __xfs_free_perag);
285 }
286}
287
288
289
290
291
292int
293xfs_sb_validate_fsb_count(
294 xfs_sb_t *sbp,
295 __uint64_t nblocks)
296{
297 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
298 ASSERT(sbp->sb_blocklog >= BBSHIFT);
299
300#if XFS_BIG_BLKNOS
301 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
302 return EFBIG;
303#else
304 if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX)
305 return EFBIG;
306#endif
307 return 0;
308}
309
310
311
312
313STATIC int
314xfs_mount_validate_sb(
315 xfs_mount_t *mp,
316 xfs_sb_t *sbp,
317 bool check_inprogress,
318 bool check_version)
319{
320
321
322
323
324
325
326
327
328 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
329 xfs_warn(mp, "bad magic number");
330 return XFS_ERROR(EWRONGFS);
331 }
332
333
334 if (!xfs_sb_good_version(sbp)) {
335 xfs_warn(mp, "bad version");
336 return XFS_ERROR(EWRONGFS);
337 }
338
339
340
341
342
343
344 if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
345 xfs_alert(mp,
346"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
347"Use of these features in this kernel is at your own risk!");
348
349 if (xfs_sb_has_compat_feature(sbp,
350 XFS_SB_FEAT_COMPAT_UNKNOWN)) {
351 xfs_warn(mp,
352"Superblock has unknown compatible features (0x%x) enabled.\n"
353"Using a more recent kernel is recommended.",
354 (sbp->sb_features_compat &
355 XFS_SB_FEAT_COMPAT_UNKNOWN));
356 }
357
358 if (xfs_sb_has_ro_compat_feature(sbp,
359 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
360 xfs_alert(mp,
361"Superblock has unknown read-only compatible features (0x%x) enabled.",
362 (sbp->sb_features_ro_compat &
363 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
364 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
365 xfs_warn(mp,
366"Attempted to mount read-only compatible filesystem read-write.\n"
367"Filesystem can only be safely mounted read only.");
368 return XFS_ERROR(EINVAL);
369 }
370 }
371 if (xfs_sb_has_incompat_feature(sbp,
372 XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
373 xfs_warn(mp,
374"Superblock has unknown incompatible features (0x%x) enabled.\n"
375"Filesystem can not be safely mounted by this kernel.",
376 (sbp->sb_features_incompat &
377 XFS_SB_FEAT_INCOMPAT_UNKNOWN));
378 return XFS_ERROR(EINVAL);
379 }
380 }
381
382 if (unlikely(
383 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
384 xfs_warn(mp,
385 "filesystem is marked as having an external log; "
386 "specify logdev on the mount command line.");
387 return XFS_ERROR(EINVAL);
388 }
389
390 if (unlikely(
391 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
392 xfs_warn(mp,
393 "filesystem is marked as having an internal log; "
394 "do not specify logdev on the mount command line.");
395 return XFS_ERROR(EINVAL);
396 }
397
398
399
400
401
402 if (unlikely(
403 sbp->sb_agcount <= 0 ||
404 sbp->sb_sectsize < XFS_MIN_SECTORSIZE ||
405 sbp->sb_sectsize > XFS_MAX_SECTORSIZE ||
406 sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG ||
407 sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG ||
408 sbp->sb_sectsize != (1 << sbp->sb_sectlog) ||
409 sbp->sb_blocksize < XFS_MIN_BLOCKSIZE ||
410 sbp->sb_blocksize > XFS_MAX_BLOCKSIZE ||
411 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
412 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
413 sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
414 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
415 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
416 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
417 sbp->sb_inodelog > XFS_DINODE_MAX_LOG ||
418 sbp->sb_inodesize != (1 << sbp->sb_inodelog) ||
419 (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
420 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
421 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
422 (sbp->sb_imax_pct > 100 ) ||
423 sbp->sb_dblocks == 0 ||
424 sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) ||
425 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
426 XFS_CORRUPTION_ERROR("SB sanity check failed",
427 XFS_ERRLEVEL_LOW, mp, sbp);
428 return XFS_ERROR(EFSCORRUPTED);
429 }
430
431
432
433
434 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
435 xfs_warn(mp,
436 "File system with blocksize %d bytes. "
437 "Only pagesize (%ld) or less will currently work.",
438 sbp->sb_blocksize, PAGE_SIZE);
439 return XFS_ERROR(ENOSYS);
440 }
441
442
443
444
445 switch (sbp->sb_inodesize) {
446 case 256:
447 case 512:
448 case 1024:
449 case 2048:
450 break;
451 default:
452 xfs_warn(mp, "inode size of %d bytes not supported",
453 sbp->sb_inodesize);
454 return XFS_ERROR(ENOSYS);
455 }
456
457 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
458 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
459 xfs_warn(mp,
460 "file system too large to be mounted on this system.");
461 return XFS_ERROR(EFBIG);
462 }
463
464 if (check_inprogress && sbp->sb_inprogress) {
465 xfs_warn(mp, "Offline file system operation in progress!");
466 return XFS_ERROR(EFSCORRUPTED);
467 }
468
469
470
471
472 if (unlikely(!xfs_sb_version_hasdirv2(sbp))) {
473 xfs_warn(mp, "file system using version 1 directory format");
474 return XFS_ERROR(ENOSYS);
475 }
476
477 return 0;
478}
479
480int
481xfs_initialize_perag(
482 xfs_mount_t *mp,
483 xfs_agnumber_t agcount,
484 xfs_agnumber_t *maxagi)
485{
486 xfs_agnumber_t index;
487 xfs_agnumber_t first_initialised = 0;
488 xfs_perag_t *pag;
489 xfs_agino_t agino;
490 xfs_ino_t ino;
491 xfs_sb_t *sbp = &mp->m_sb;
492 int error = -ENOMEM;
493
494
495
496
497
498
499 for (index = 0; index < agcount; index++) {
500 pag = xfs_perag_get(mp, index);
501 if (pag) {
502 xfs_perag_put(pag);
503 continue;
504 }
505 if (!first_initialised)
506 first_initialised = index;
507
508 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
509 if (!pag)
510 goto out_unwind;
511 pag->pag_agno = index;
512 pag->pag_mount = mp;
513 spin_lock_init(&pag->pag_ici_lock);
514 mutex_init(&pag->pag_ici_reclaim_lock);
515 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
516 spin_lock_init(&pag->pag_buf_lock);
517 pag->pag_buf_tree = RB_ROOT;
518
519 if (radix_tree_preload(GFP_NOFS))
520 goto out_unwind;
521
522 spin_lock(&mp->m_perag_lock);
523 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
524 BUG();
525 spin_unlock(&mp->m_perag_lock);
526 radix_tree_preload_end();
527 error = -EEXIST;
528 goto out_unwind;
529 }
530 spin_unlock(&mp->m_perag_lock);
531 radix_tree_preload_end();
532 }
533
534
535
536
537
538 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
539 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
540
541 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
542 mp->m_flags |= XFS_MOUNT_32BITINODES;
543 else
544 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
545
546 if (mp->m_flags & XFS_MOUNT_32BITINODES)
547 index = xfs_set_inode32(mp);
548 else
549 index = xfs_set_inode64(mp);
550
551 if (maxagi)
552 *maxagi = index;
553 return 0;
554
555out_unwind:
556 kmem_free(pag);
557 for (; index > first_initialised; index--) {
558 pag = radix_tree_delete(&mp->m_perag_tree, index);
559 kmem_free(pag);
560 }
561 return error;
562}
563
564void
565xfs_sb_from_disk(
566 struct xfs_sb *to,
567 xfs_dsb_t *from)
568{
569 to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
570 to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
571 to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
572 to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
573 to->sb_rextents = be64_to_cpu(from->sb_rextents);
574 memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
575 to->sb_logstart = be64_to_cpu(from->sb_logstart);
576 to->sb_rootino = be64_to_cpu(from->sb_rootino);
577 to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
578 to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
579 to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
580 to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
581 to->sb_agcount = be32_to_cpu(from->sb_agcount);
582 to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
583 to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
584 to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
585 to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
586 to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
587 to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
588 memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
589 to->sb_blocklog = from->sb_blocklog;
590 to->sb_sectlog = from->sb_sectlog;
591 to->sb_inodelog = from->sb_inodelog;
592 to->sb_inopblog = from->sb_inopblog;
593 to->sb_agblklog = from->sb_agblklog;
594 to->sb_rextslog = from->sb_rextslog;
595 to->sb_inprogress = from->sb_inprogress;
596 to->sb_imax_pct = from->sb_imax_pct;
597 to->sb_icount = be64_to_cpu(from->sb_icount);
598 to->sb_ifree = be64_to_cpu(from->sb_ifree);
599 to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
600 to->sb_frextents = be64_to_cpu(from->sb_frextents);
601 to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
602 to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
603 to->sb_qflags = be16_to_cpu(from->sb_qflags);
604 to->sb_flags = from->sb_flags;
605 to->sb_shared_vn = from->sb_shared_vn;
606 to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
607 to->sb_unit = be32_to_cpu(from->sb_unit);
608 to->sb_width = be32_to_cpu(from->sb_width);
609 to->sb_dirblklog = from->sb_dirblklog;
610 to->sb_logsectlog = from->sb_logsectlog;
611 to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
612 to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
613 to->sb_features2 = be32_to_cpu(from->sb_features2);
614 to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
615 to->sb_features_compat = be32_to_cpu(from->sb_features_compat);
616 to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat);
617 to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat);
618 to->sb_features_log_incompat =
619 be32_to_cpu(from->sb_features_log_incompat);
620 to->sb_pad = 0;
621 to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
622 to->sb_lsn = be64_to_cpu(from->sb_lsn);
623}
624
625
626
627
628
629
630void
631xfs_sb_to_disk(
632 xfs_dsb_t *to,
633 xfs_sb_t *from,
634 __int64_t fields)
635{
636 xfs_caddr_t to_ptr = (xfs_caddr_t)to;
637 xfs_caddr_t from_ptr = (xfs_caddr_t)from;
638 xfs_sb_field_t f;
639 int first;
640 int size;
641
642 ASSERT(fields);
643 if (!fields)
644 return;
645
646 while (fields) {
647 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
648 first = xfs_sb_info[f].offset;
649 size = xfs_sb_info[f + 1].offset - first;
650
651 ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
652
653 if (size == 1 || xfs_sb_info[f].type == 1) {
654 memcpy(to_ptr + first, from_ptr + first, size);
655 } else {
656 switch (size) {
657 case 2:
658 *(__be16 *)(to_ptr + first) =
659 cpu_to_be16(*(__u16 *)(from_ptr + first));
660 break;
661 case 4:
662 *(__be32 *)(to_ptr + first) =
663 cpu_to_be32(*(__u32 *)(from_ptr + first));
664 break;
665 case 8:
666 *(__be64 *)(to_ptr + first) =
667 cpu_to_be64(*(__u64 *)(from_ptr + first));
668 break;
669 default:
670 ASSERT(0);
671 }
672 }
673
674 fields &= ~(1LL << f);
675 }
676}
677
678static int
679xfs_sb_verify(
680 struct xfs_buf *bp,
681 bool check_version)
682{
683 struct xfs_mount *mp = bp->b_target->bt_mount;
684 struct xfs_sb sb;
685
686 xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
687
688
689
690
691
692 return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
693 check_version);
694}
695
696
697
698
699
700
701
702
703static void
704xfs_sb_read_verify(
705 struct xfs_buf *bp)
706{
707 struct xfs_mount *mp = bp->b_target->bt_mount;
708 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
709 int error;
710
711
712
713
714
715 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC) &&
716 (((be16_to_cpu(dsb->sb_versionnum) & XFS_SB_VERSION_NUMBITS) ==
717 XFS_SB_VERSION_5) ||
718 dsb->sb_crc != 0)) {
719
720 if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize),
721 offsetof(struct xfs_sb, sb_crc))) {
722 error = EFSCORRUPTED;
723 goto out_error;
724 }
725 }
726 error = xfs_sb_verify(bp, true);
727
728out_error:
729 if (error) {
730 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
731 xfs_buf_ioerror(bp, error);
732 }
733}
734
735
736
737
738
739
740
741static void
742xfs_sb_quiet_read_verify(
743 struct xfs_buf *bp)
744{
745 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
746
747
748 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
749
750 xfs_sb_read_verify(bp);
751 return;
752 }
753
754 xfs_buf_ioerror(bp, EWRONGFS);
755}
756
757static void
758xfs_sb_write_verify(
759 struct xfs_buf *bp)
760{
761 struct xfs_mount *mp = bp->b_target->bt_mount;
762 struct xfs_buf_log_item *bip = bp->b_fspriv;
763 int error;
764
765 error = xfs_sb_verify(bp, false);
766 if (error) {
767 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
768 xfs_buf_ioerror(bp, error);
769 return;
770 }
771
772 if (!xfs_sb_version_hascrc(&mp->m_sb))
773 return;
774
775 if (bip)
776 XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
777
778 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
779 offsetof(struct xfs_sb, sb_crc));
780}
781
782const struct xfs_buf_ops xfs_sb_buf_ops = {
783 .verify_read = xfs_sb_read_verify,
784 .verify_write = xfs_sb_write_verify,
785};
786
787static const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
788 .verify_read = xfs_sb_quiet_read_verify,
789 .verify_write = xfs_sb_write_verify,
790};
791
792
793
794
795
796
797int
798xfs_readsb(xfs_mount_t *mp, int flags)
799{
800 unsigned int sector_size;
801 struct xfs_buf *bp;
802 struct xfs_sb *sbp = &mp->m_sb;
803 int error;
804 int loud = !(flags & XFS_MFSI_QUIET);
805
806 ASSERT(mp->m_sb_bp == NULL);
807 ASSERT(mp->m_ddev_targp != NULL);
808
809
810
811
812
813
814 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
815
816reread:
817 bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
818 BTOBB(sector_size), 0,
819 loud ? &xfs_sb_buf_ops
820 : &xfs_sb_quiet_buf_ops);
821 if (!bp) {
822 if (loud)
823 xfs_warn(mp, "SB buffer read failed");
824 return EIO;
825 }
826 if (bp->b_error) {
827 error = bp->b_error;
828 if (loud)
829 xfs_warn(mp, "SB validate failed with error %d.", error);
830 goto release_buf;
831 }
832
833
834
835
836 xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
837
838
839
840
841 if (sector_size > sbp->sb_sectsize) {
842 if (loud)
843 xfs_warn(mp, "device supports %u byte sectors (not %u)",
844 sector_size, sbp->sb_sectsize);
845 error = ENOSYS;
846 goto release_buf;
847 }
848
849
850
851
852
853 if (sector_size < sbp->sb_sectsize) {
854 xfs_buf_relse(bp);
855 sector_size = sbp->sb_sectsize;
856 goto reread;
857 }
858
859
860 xfs_icsb_reinit_counters(mp);
861
862
863 bp->b_ops = &xfs_sb_buf_ops;
864
865 mp->m_sb_bp = bp;
866 xfs_buf_unlock(bp);
867 return 0;
868
869release_buf:
870 xfs_buf_relse(bp);
871 return error;
872}
873
874
875
876
877
878
879
880
881
882STATIC void
883xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
884{
885 mp->m_agfrotor = mp->m_agirotor = 0;
886 spin_lock_init(&mp->m_agirotor_lock);
887 mp->m_maxagi = mp->m_sb.sb_agcount;
888 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
889 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
890 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
891 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
892 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
893 mp->m_blockmask = sbp->sb_blocksize - 1;
894 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
895 mp->m_blockwmask = mp->m_blockwsize - 1;
896
897 mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
898 mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
899 mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
900 mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
901
902 mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
903 mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
904 mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
905 mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
906
907 mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
908 mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
909 mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
910 mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
911
912 mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
913 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
914 sbp->sb_inopblock);
915 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
916}
917
918
919
920
921
922
923
924
925
926STATIC int
927xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
928{
929 xfs_agnumber_t index;
930 xfs_perag_t *pag;
931 xfs_sb_t *sbp = &mp->m_sb;
932 uint64_t ifree = 0;
933 uint64_t ialloc = 0;
934 uint64_t bfree = 0;
935 uint64_t bfreelst = 0;
936 uint64_t btree = 0;
937 int error;
938
939 for (index = 0; index < agcount; index++) {
940
941
942
943
944
945 error = xfs_alloc_pagf_init(mp, NULL, index, 0);
946 if (error)
947 return error;
948
949 error = xfs_ialloc_pagi_init(mp, NULL, index);
950 if (error)
951 return error;
952 pag = xfs_perag_get(mp, index);
953 ifree += pag->pagi_freecount;
954 ialloc += pag->pagi_count;
955 bfree += pag->pagf_freeblks;
956 bfreelst += pag->pagf_flcount;
957 btree += pag->pagf_btreeblks;
958 xfs_perag_put(pag);
959 }
960
961
962
963 spin_lock(&mp->m_sb_lock);
964 sbp->sb_ifree = ifree;
965 sbp->sb_icount = ialloc;
966 sbp->sb_fdblocks = bfree + bfreelst + btree;
967 spin_unlock(&mp->m_sb_lock);
968
969
970 xfs_icsb_reinit_counters(mp);
971
972 return 0;
973}
974
975
976
977
978STATIC int
979xfs_update_alignment(xfs_mount_t *mp)
980{
981 xfs_sb_t *sbp = &(mp->m_sb);
982
983 if (mp->m_dalign) {
984
985
986
987
988 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
989 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
990 if (mp->m_flags & XFS_MOUNT_RETERR) {
991 xfs_warn(mp, "alignment check failed: "
992 "(sunit/swidth vs. blocksize)");
993 return XFS_ERROR(EINVAL);
994 }
995 mp->m_dalign = mp->m_swidth = 0;
996 } else {
997
998
999
1000 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
1001 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
1002 if (mp->m_flags & XFS_MOUNT_RETERR) {
1003 xfs_warn(mp, "alignment check failed: "
1004 "(sunit/swidth vs. ag size)");
1005 return XFS_ERROR(EINVAL);
1006 }
1007 xfs_warn(mp,
1008 "stripe alignment turned off: sunit(%d)/swidth(%d) "
1009 "incompatible with agsize(%d)",
1010 mp->m_dalign, mp->m_swidth,
1011 sbp->sb_agblocks);
1012
1013 mp->m_dalign = 0;
1014 mp->m_swidth = 0;
1015 } else if (mp->m_dalign) {
1016 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
1017 } else {
1018 if (mp->m_flags & XFS_MOUNT_RETERR) {
1019 xfs_warn(mp, "alignment check failed: "
1020 "sunit(%d) less than bsize(%d)",
1021 mp->m_dalign,
1022 mp->m_blockmask +1);
1023 return XFS_ERROR(EINVAL);
1024 }
1025 mp->m_swidth = 0;
1026 }
1027 }
1028
1029
1030
1031
1032
1033 if (xfs_sb_version_hasdalign(sbp)) {
1034 if (sbp->sb_unit != mp->m_dalign) {
1035 sbp->sb_unit = mp->m_dalign;
1036 mp->m_update_flags |= XFS_SB_UNIT;
1037 }
1038 if (sbp->sb_width != mp->m_swidth) {
1039 sbp->sb_width = mp->m_swidth;
1040 mp->m_update_flags |= XFS_SB_WIDTH;
1041 }
1042 }
1043 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
1044 xfs_sb_version_hasdalign(&mp->m_sb)) {
1045 mp->m_dalign = sbp->sb_unit;
1046 mp->m_swidth = sbp->sb_width;
1047 }
1048
1049 return 0;
1050}
1051
1052
1053
1054
1055STATIC void
1056xfs_set_maxicount(xfs_mount_t *mp)
1057{
1058 xfs_sb_t *sbp = &(mp->m_sb);
1059 __uint64_t icount;
1060
1061 if (sbp->sb_imax_pct) {
1062
1063
1064
1065
1066 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
1067 do_div(icount, 100);
1068 do_div(icount, mp->m_ialloc_blks);
1069 mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
1070 sbp->sb_inopblog;
1071 } else {
1072 mp->m_maxicount = 0;
1073 }
1074}
1075
1076
1077
1078
1079
1080
1081
1082STATIC void
1083xfs_set_rw_sizes(xfs_mount_t *mp)
1084{
1085 xfs_sb_t *sbp = &(mp->m_sb);
1086 int readio_log, writeio_log;
1087
1088 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
1089 if (mp->m_flags & XFS_MOUNT_WSYNC) {
1090 readio_log = XFS_WSYNC_READIO_LOG;
1091 writeio_log = XFS_WSYNC_WRITEIO_LOG;
1092 } else {
1093 readio_log = XFS_READIO_LOG_LARGE;
1094 writeio_log = XFS_WRITEIO_LOG_LARGE;
1095 }
1096 } else {
1097 readio_log = mp->m_readio_log;
1098 writeio_log = mp->m_writeio_log;
1099 }
1100
1101 if (sbp->sb_blocklog > readio_log) {
1102 mp->m_readio_log = sbp->sb_blocklog;
1103 } else {
1104 mp->m_readio_log = readio_log;
1105 }
1106 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
1107 if (sbp->sb_blocklog > writeio_log) {
1108 mp->m_writeio_log = sbp->sb_blocklog;
1109 } else {
1110 mp->m_writeio_log = writeio_log;
1111 }
1112 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
1113}
1114
1115
1116
1117
1118void
1119xfs_set_low_space_thresholds(
1120 struct xfs_mount *mp)
1121{
1122 int i;
1123
1124 for (i = 0; i < XFS_LOWSP_MAX; i++) {
1125 __uint64_t space = mp->m_sb.sb_dblocks;
1126
1127 do_div(space, 100);
1128 mp->m_low_space[i] = space * (i + 1);
1129 }
1130}
1131
1132
1133
1134
1135
1136STATIC void
1137xfs_set_inoalignment(xfs_mount_t *mp)
1138{
1139 if (xfs_sb_version_hasalign(&mp->m_sb) &&
1140 mp->m_sb.sb_inoalignmt >=
1141 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
1142 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
1143 else
1144 mp->m_inoalign_mask = 0;
1145
1146
1147
1148
1149 if (mp->m_dalign && mp->m_inoalign_mask &&
1150 !(mp->m_dalign & mp->m_inoalign_mask))
1151 mp->m_sinoalign = mp->m_dalign;
1152 else
1153 mp->m_sinoalign = 0;
1154}
1155
1156
1157
1158
1159STATIC int
1160xfs_check_sizes(xfs_mount_t *mp)
1161{
1162 xfs_buf_t *bp;
1163 xfs_daddr_t d;
1164
1165 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
1166 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
1167 xfs_warn(mp, "filesystem size mismatch detected");
1168 return XFS_ERROR(EFBIG);
1169 }
1170 bp = xfs_buf_read_uncached(mp->m_ddev_targp,
1171 d - XFS_FSS_TO_BB(mp, 1),
1172 XFS_FSS_TO_BB(mp, 1), 0, NULL);
1173 if (!bp) {
1174 xfs_warn(mp, "last sector read failed");
1175 return EIO;
1176 }
1177 xfs_buf_relse(bp);
1178
1179 if (mp->m_logdev_targp != mp->m_ddev_targp) {
1180 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
1181 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
1182 xfs_warn(mp, "log size mismatch detected");
1183 return XFS_ERROR(EFBIG);
1184 }
1185 bp = xfs_buf_read_uncached(mp->m_logdev_targp,
1186 d - XFS_FSB_TO_BB(mp, 1),
1187 XFS_FSB_TO_BB(mp, 1), 0, NULL);
1188 if (!bp) {
1189 xfs_warn(mp, "log device read failed");
1190 return EIO;
1191 }
1192 xfs_buf_relse(bp);
1193 }
1194 return 0;
1195}
1196
1197
1198
1199
1200int
1201xfs_mount_reset_sbqflags(
1202 struct xfs_mount *mp)
1203{
1204 int error;
1205 struct xfs_trans *tp;
1206
1207 mp->m_qflags = 0;
1208
1209
1210
1211
1212
1213 if (mp->m_sb.sb_qflags == 0)
1214 return 0;
1215 spin_lock(&mp->m_sb_lock);
1216 mp->m_sb.sb_qflags = 0;
1217 spin_unlock(&mp->m_sb_lock);
1218
1219
1220
1221
1222
1223 if (mp->m_flags & XFS_MOUNT_RDONLY)
1224 return 0;
1225
1226 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1227 error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp),
1228 0, 0, XFS_DEFAULT_LOG_COUNT);
1229 if (error) {
1230 xfs_trans_cancel(tp, 0);
1231 xfs_alert(mp, "%s: Superblock update failed!", __func__);
1232 return error;
1233 }
1234
1235 xfs_mod_sb(tp, XFS_SB_QFLAGS);
1236 return xfs_trans_commit(tp, 0);
1237}
1238
1239__uint64_t
1240xfs_default_resblks(xfs_mount_t *mp)
1241{
1242 __uint64_t resblks;
1243
1244
1245
1246
1247
1248
1249
1250
1251 resblks = mp->m_sb.sb_dblocks;
1252 do_div(resblks, 20);
1253 resblks = min_t(__uint64_t, resblks, 8192);
1254 return resblks;
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267int
1268xfs_mountfs(
1269 xfs_mount_t *mp)
1270{
1271 xfs_sb_t *sbp = &(mp->m_sb);
1272 xfs_inode_t *rip;
1273 __uint64_t resblks;
1274 uint quotamount = 0;
1275 uint quotaflags = 0;
1276 int error = 0;
1277
1278 xfs_mount_common(mp, sbp);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296 if (xfs_sb_has_mismatched_features2(sbp)) {
1297 xfs_warn(mp, "correcting sb_features alignment problem");
1298 sbp->sb_features2 |= sbp->sb_bad_features2;
1299 sbp->sb_bad_features2 = sbp->sb_features2;
1300 mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
1301
1302
1303
1304
1305
1306 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1307 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1308 mp->m_flags |= XFS_MOUNT_ATTR2;
1309 }
1310
1311 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1312 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1313 xfs_sb_version_removeattr2(&mp->m_sb);
1314 mp->m_update_flags |= XFS_SB_FEATURES2;
1315
1316
1317 if (!sbp->sb_features2)
1318 mp->m_update_flags |= XFS_SB_VERSIONNUM;
1319 }
1320
1321
1322
1323
1324
1325
1326
1327 error = xfs_update_alignment(mp);
1328 if (error)
1329 goto out;
1330
1331 xfs_alloc_compute_maxlevels(mp);
1332 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
1333 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
1334 xfs_ialloc_compute_maxlevels(mp);
1335
1336 xfs_set_maxicount(mp);
1337
1338 error = xfs_uuid_mount(mp);
1339 if (error)
1340 goto out;
1341
1342
1343
1344
1345 xfs_set_rw_sizes(mp);
1346
1347
1348 xfs_set_low_space_thresholds(mp);
1349
1350
1351
1352
1353
1354
1355 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
1356
1357
1358
1359
1360 xfs_set_inoalignment(mp);
1361
1362
1363
1364
1365 error = xfs_check_sizes(mp);
1366 if (error)
1367 goto out_remove_uuid;
1368
1369
1370
1371
1372 error = xfs_rtmount_init(mp);
1373 if (error) {
1374 xfs_warn(mp, "RT mount failed");
1375 goto out_remove_uuid;
1376 }
1377
1378
1379
1380
1381
1382 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
1383
1384 mp->m_dmevmask = 0;
1385
1386 xfs_dir_mount(mp);
1387
1388
1389
1390
1391 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
1392
1393
1394
1395
1396 xfs_trans_init(mp);
1397
1398
1399
1400
1401 spin_lock_init(&mp->m_perag_lock);
1402 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1403 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
1404 if (error) {
1405 xfs_warn(mp, "Failed per-ag init: %d", error);
1406 goto out_remove_uuid;
1407 }
1408
1409 if (!sbp->sb_logblocks) {
1410 xfs_warn(mp, "no log defined");
1411 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
1412 error = XFS_ERROR(EFSCORRUPTED);
1413 goto out_free_perag;
1414 }
1415
1416
1417
1418
1419 error = xfs_log_mount(mp, mp->m_logdev_targp,
1420 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
1421 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
1422 if (error) {
1423 xfs_warn(mp, "log mount failed");
1424 goto out_fail_wait;
1425 }
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1447 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
1448 !mp->m_sb.sb_inprogress) {
1449 error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
1450 if (error)
1451 goto out_fail_wait;
1452 }
1453
1454
1455
1456
1457
1458 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
1459 if (error) {
1460 xfs_warn(mp, "failed to read root inode");
1461 goto out_log_dealloc;
1462 }
1463
1464 ASSERT(rip != NULL);
1465
1466 if (unlikely(!S_ISDIR(rip->i_d.di_mode))) {
1467 xfs_warn(mp, "corrupted root inode %llu: not a directory",
1468 (unsigned long long)rip->i_ino);
1469 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1470 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
1471 mp);
1472 error = XFS_ERROR(EFSCORRUPTED);
1473 goto out_rele_rip;
1474 }
1475 mp->m_rootip = rip;
1476
1477 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1478
1479
1480
1481
1482 error = xfs_rtmount_inodes(mp);
1483 if (error) {
1484
1485
1486
1487 xfs_warn(mp, "failed to read RT inodes");
1488 goto out_rele_rip;
1489 }
1490
1491
1492
1493
1494
1495
1496 if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1497 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1498 if (error) {
1499 xfs_warn(mp, "failed to write sb changes");
1500 goto out_rtunmount;
1501 }
1502 }
1503
1504
1505
1506
1507 if (XFS_IS_QUOTA_RUNNING(mp)) {
1508 error = xfs_qm_newmount(mp, "amount, "aflags);
1509 if (error)
1510 goto out_rtunmount;
1511 } else {
1512 ASSERT(!XFS_IS_QUOTA_ON(mp));
1513
1514
1515
1516
1517
1518
1519 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
1520 xfs_notice(mp, "resetting quota flags");
1521 error = xfs_mount_reset_sbqflags(mp);
1522 if (error)
1523 return error;
1524 }
1525 }
1526
1527
1528
1529
1530
1531
1532 error = xfs_log_mount_finish(mp);
1533 if (error) {
1534 xfs_warn(mp, "log mount finish failed");
1535 goto out_rtunmount;
1536 }
1537
1538
1539
1540
1541 if (quotamount) {
1542 ASSERT(mp->m_qflags == 0);
1543 mp->m_qflags = quotaflags;
1544
1545 xfs_qm_mount_quotas(mp);
1546 }
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1560 resblks = xfs_default_resblks(mp);
1561 error = xfs_reserve_blocks(mp, &resblks, NULL);
1562 if (error)
1563 xfs_warn(mp,
1564 "Unable to allocate reserve blocks. Continuing without reserve pool.");
1565 }
1566
1567 return 0;
1568
1569 out_rtunmount:
1570 xfs_rtunmount_inodes(mp);
1571 out_rele_rip:
1572 IRELE(rip);
1573 out_log_dealloc:
1574 xfs_log_unmount(mp);
1575 out_fail_wait:
1576 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1577 xfs_wait_buftarg(mp->m_logdev_targp);
1578 xfs_wait_buftarg(mp->m_ddev_targp);
1579 out_free_perag:
1580 xfs_free_perag(mp);
1581 out_remove_uuid:
1582 xfs_uuid_unmount(mp);
1583 out:
1584 return error;
1585}
1586
1587
1588
1589
1590
1591void
1592xfs_unmountfs(
1593 struct xfs_mount *mp)
1594{
1595 __uint64_t resblks;
1596 int error;
1597
1598 cancel_delayed_work_sync(&mp->m_eofblocks_work);
1599
1600 xfs_qm_unmount_quotas(mp);
1601 xfs_rtunmount_inodes(mp);
1602 IRELE(mp->m_rootip);
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614 xfs_log_force(mp, XFS_LOG_SYNC);
1615
1616
1617
1618
1619 xfs_ail_push_all_sync(mp->m_ail);
1620
1621
1622
1623
1624
1625
1626
1627 cancel_delayed_work_sync(&mp->m_reclaim_work);
1628 xfs_reclaim_inodes(mp, SYNC_WAIT);
1629
1630 xfs_qm_unmount(mp);
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 resblks = 0;
1647 error = xfs_reserve_blocks(mp, &resblks, NULL);
1648 if (error)
1649 xfs_warn(mp, "Unable to free reserved block pool. "
1650 "Freespace may not be correct on next mount.");
1651
1652 error = xfs_log_sbcount(mp);
1653 if (error)
1654 xfs_warn(mp, "Unable to update superblock counters. "
1655 "Freespace may not be correct on next mount.");
1656
1657 xfs_log_unmount(mp);
1658 xfs_uuid_unmount(mp);
1659
1660#if defined(DEBUG)
1661 xfs_errortag_clearall(mp, 0);
1662#endif
1663 xfs_free_perag(mp);
1664}
1665
1666int
1667xfs_fs_writable(xfs_mount_t *mp)
1668{
1669 return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) ||
1670 (mp->m_flags & XFS_MOUNT_RDONLY));
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682int
1683xfs_log_sbcount(xfs_mount_t *mp)
1684{
1685 xfs_trans_t *tp;
1686 int error;
1687
1688 if (!xfs_fs_writable(mp))
1689 return 0;
1690
1691 xfs_icsb_sync_counters(mp, 0);
1692
1693
1694
1695
1696
1697 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1698 return 0;
1699
1700 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
1701 error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
1702 XFS_DEFAULT_LOG_COUNT);
1703 if (error) {
1704 xfs_trans_cancel(tp, 0);
1705 return error;
1706 }
1707
1708 xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
1709 xfs_trans_set_sync(tp);
1710 error = xfs_trans_commit(tp, 0);
1711 return error;
1712}
1713
1714
1715
1716
1717
1718
1719
1720
1721void
1722xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1723{
1724 xfs_buf_t *bp;
1725 int first;
1726 int last;
1727 xfs_mount_t *mp;
1728 xfs_sb_field_t f;
1729
1730 ASSERT(fields);
1731 if (!fields)
1732 return;
1733 mp = tp->t_mountp;
1734 bp = xfs_trans_getsb(tp, mp, 0);
1735 first = sizeof(xfs_sb_t);
1736 last = 0;
1737
1738
1739
1740 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
1741
1742
1743 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
1744 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1745 last = xfs_sb_info[f + 1].offset - 1;
1746
1747 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
1748 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1749 first = xfs_sb_info[f].offset;
1750
1751 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
1752 xfs_trans_log_buf(tp, bp, first, last);
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765STATIC int
1766xfs_mod_incore_sb_unlocked(
1767 xfs_mount_t *mp,
1768 xfs_sb_field_t field,
1769 int64_t delta,
1770 int rsvd)
1771{
1772 int scounter;
1773 long long lcounter;
1774 long long res_used, rem;
1775
1776
1777
1778
1779
1780
1781
1782 switch (field) {
1783 case XFS_SBS_ICOUNT:
1784 lcounter = (long long)mp->m_sb.sb_icount;
1785 lcounter += delta;
1786 if (lcounter < 0) {
1787 ASSERT(0);
1788 return XFS_ERROR(EINVAL);
1789 }
1790 mp->m_sb.sb_icount = lcounter;
1791 return 0;
1792 case XFS_SBS_IFREE:
1793 lcounter = (long long)mp->m_sb.sb_ifree;
1794 lcounter += delta;
1795 if (lcounter < 0) {
1796 ASSERT(0);
1797 return XFS_ERROR(EINVAL);
1798 }
1799 mp->m_sb.sb_ifree = lcounter;
1800 return 0;
1801 case XFS_SBS_FDBLOCKS:
1802 lcounter = (long long)
1803 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1804 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1805
1806 if (delta > 0) {
1807 if (res_used > delta) {
1808 mp->m_resblks_avail += delta;
1809 } else {
1810 rem = delta - res_used;
1811 mp->m_resblks_avail = mp->m_resblks;
1812 lcounter += rem;
1813 }
1814 } else {
1815 lcounter += delta;
1816 if (lcounter >= 0) {
1817 mp->m_sb.sb_fdblocks = lcounter +
1818 XFS_ALLOC_SET_ASIDE(mp);
1819 return 0;
1820 }
1821
1822
1823
1824
1825
1826 if (!rsvd)
1827 return XFS_ERROR(ENOSPC);
1828
1829 lcounter = (long long)mp->m_resblks_avail + delta;
1830 if (lcounter >= 0) {
1831 mp->m_resblks_avail = lcounter;
1832 return 0;
1833 }
1834 printk_once(KERN_WARNING
1835 "Filesystem \"%s\": reserve blocks depleted! "
1836 "Consider increasing reserve pool size.",
1837 mp->m_fsname);
1838 return XFS_ERROR(ENOSPC);
1839 }
1840
1841 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1842 return 0;
1843 case XFS_SBS_FREXTENTS:
1844 lcounter = (long long)mp->m_sb.sb_frextents;
1845 lcounter += delta;
1846 if (lcounter < 0) {
1847 return XFS_ERROR(ENOSPC);
1848 }
1849 mp->m_sb.sb_frextents = lcounter;
1850 return 0;
1851 case XFS_SBS_DBLOCKS:
1852 lcounter = (long long)mp->m_sb.sb_dblocks;
1853 lcounter += delta;
1854 if (lcounter < 0) {
1855 ASSERT(0);
1856 return XFS_ERROR(EINVAL);
1857 }
1858 mp->m_sb.sb_dblocks = lcounter;
1859 return 0;
1860 case XFS_SBS_AGCOUNT:
1861 scounter = mp->m_sb.sb_agcount;
1862 scounter += delta;
1863 if (scounter < 0) {
1864 ASSERT(0);
1865 return XFS_ERROR(EINVAL);
1866 }
1867 mp->m_sb.sb_agcount = scounter;
1868 return 0;
1869 case XFS_SBS_IMAX_PCT:
1870 scounter = mp->m_sb.sb_imax_pct;
1871 scounter += delta;
1872 if (scounter < 0) {
1873 ASSERT(0);
1874 return XFS_ERROR(EINVAL);
1875 }
1876 mp->m_sb.sb_imax_pct = scounter;
1877 return 0;
1878 case XFS_SBS_REXTSIZE:
1879 scounter = mp->m_sb.sb_rextsize;
1880 scounter += delta;
1881 if (scounter < 0) {
1882 ASSERT(0);
1883 return XFS_ERROR(EINVAL);
1884 }
1885 mp->m_sb.sb_rextsize = scounter;
1886 return 0;
1887 case XFS_SBS_RBMBLOCKS:
1888 scounter = mp->m_sb.sb_rbmblocks;
1889 scounter += delta;
1890 if (scounter < 0) {
1891 ASSERT(0);
1892 return XFS_ERROR(EINVAL);
1893 }
1894 mp->m_sb.sb_rbmblocks = scounter;
1895 return 0;
1896 case XFS_SBS_RBLOCKS:
1897 lcounter = (long long)mp->m_sb.sb_rblocks;
1898 lcounter += delta;
1899 if (lcounter < 0) {
1900 ASSERT(0);
1901 return XFS_ERROR(EINVAL);
1902 }
1903 mp->m_sb.sb_rblocks = lcounter;
1904 return 0;
1905 case XFS_SBS_REXTENTS:
1906 lcounter = (long long)mp->m_sb.sb_rextents;
1907 lcounter += delta;
1908 if (lcounter < 0) {
1909 ASSERT(0);
1910 return XFS_ERROR(EINVAL);
1911 }
1912 mp->m_sb.sb_rextents = lcounter;
1913 return 0;
1914 case XFS_SBS_REXTSLOG:
1915 scounter = mp->m_sb.sb_rextslog;
1916 scounter += delta;
1917 if (scounter < 0) {
1918 ASSERT(0);
1919 return XFS_ERROR(EINVAL);
1920 }
1921 mp->m_sb.sb_rextslog = scounter;
1922 return 0;
1923 default:
1924 ASSERT(0);
1925 return XFS_ERROR(EINVAL);
1926 }
1927}
1928
1929
1930
1931
1932
1933
1934
1935int
1936xfs_mod_incore_sb(
1937 struct xfs_mount *mp,
1938 xfs_sb_field_t field,
1939 int64_t delta,
1940 int rsvd)
1941{
1942 int status;
1943
1944#ifdef HAVE_PERCPU_SB
1945 ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS);
1946#endif
1947 spin_lock(&mp->m_sb_lock);
1948 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1949 spin_unlock(&mp->m_sb_lock);
1950
1951 return status;
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966int
1967xfs_mod_incore_sb_batch(
1968 struct xfs_mount *mp,
1969 xfs_mod_sb_t *msb,
1970 uint nmsb,
1971 int rsvd)
1972{
1973 xfs_mod_sb_t *msbp;
1974 int error = 0;
1975
1976
1977
1978
1979
1980
1981
1982 spin_lock(&mp->m_sb_lock);
1983 for (msbp = msb; msbp < (msb + nmsb); msbp++) {
1984 ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
1985 msbp->msb_field > XFS_SBS_FDBLOCKS);
1986
1987 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1988 msbp->msb_delta, rsvd);
1989 if (error)
1990 goto unwind;
1991 }
1992 spin_unlock(&mp->m_sb_lock);
1993 return 0;
1994
1995unwind:
1996 while (--msbp >= msb) {
1997 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1998 -msbp->msb_delta, rsvd);
1999 ASSERT(error == 0);
2000 }
2001 spin_unlock(&mp->m_sb_lock);
2002 return error;
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014struct xfs_buf *
2015xfs_getsb(
2016 struct xfs_mount *mp,
2017 int flags)
2018{
2019 struct xfs_buf *bp = mp->m_sb_bp;
2020
2021 if (!xfs_buf_trylock(bp)) {
2022 if (flags & XBF_TRYLOCK)
2023 return NULL;
2024 xfs_buf_lock(bp);
2025 }
2026
2027 xfs_buf_hold(bp);
2028 ASSERT(XFS_BUF_ISDONE(bp));
2029 return bp;
2030}
2031
2032
2033
2034
2035void
2036xfs_freesb(
2037 struct xfs_mount *mp)
2038{
2039 struct xfs_buf *bp = mp->m_sb_bp;
2040
2041 xfs_buf_lock(bp);
2042 mp->m_sb_bp = NULL;
2043 xfs_buf_relse(bp);
2044}
2045
2046
2047
2048
2049
2050
2051int
2052xfs_mount_log_sb(
2053 xfs_mount_t *mp,
2054 __int64_t fields)
2055{
2056 xfs_trans_t *tp;
2057 int error;
2058
2059 ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
2060 XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
2061 XFS_SB_VERSIONNUM));
2062
2063 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
2064 error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
2065 XFS_DEFAULT_LOG_COUNT);
2066 if (error) {
2067 xfs_trans_cancel(tp, 0);
2068 return error;
2069 }
2070 xfs_mod_sb(tp, fields);
2071 error = xfs_trans_commit(tp, 0);
2072 return error;
2073}
2074
2075
2076
2077
2078
2079int
2080xfs_dev_is_read_only(
2081 struct xfs_mount *mp,
2082 char *message)
2083{
2084 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
2085 xfs_readonly_buftarg(mp->m_logdev_targp) ||
2086 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
2087 xfs_notice(mp, "%s required on read-only device.", message);
2088 xfs_notice(mp, "write access unavailable, cannot proceed.");
2089 return EROFS;
2090 }
2091 return 0;
2092}
2093
2094#ifdef HAVE_PERCPU_SB
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147#ifdef CONFIG_HOTPLUG_CPU
2148
2149
2150
2151
2152
2153
2154
2155
2156STATIC int
2157xfs_icsb_cpu_notify(
2158 struct notifier_block *nfb,
2159 unsigned long action,
2160 void *hcpu)
2161{
2162 xfs_icsb_cnts_t *cntp;
2163 xfs_mount_t *mp;
2164
2165 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
2166 cntp = (xfs_icsb_cnts_t *)
2167 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
2168 switch (action) {
2169 case CPU_UP_PREPARE:
2170 case CPU_UP_PREPARE_FROZEN:
2171
2172
2173 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2174 break;
2175 case CPU_ONLINE:
2176 case CPU_ONLINE_FROZEN:
2177 xfs_icsb_lock(mp);
2178 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
2179 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
2180 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
2181 xfs_icsb_unlock(mp);
2182 break;
2183 case CPU_DEAD:
2184 case CPU_DEAD_FROZEN:
2185
2186
2187
2188 xfs_icsb_lock(mp);
2189 spin_lock(&mp->m_sb_lock);
2190 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
2191 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
2192 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
2193
2194 mp->m_sb.sb_icount += cntp->icsb_icount;
2195 mp->m_sb.sb_ifree += cntp->icsb_ifree;
2196 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
2197
2198 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2199
2200 xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
2201 xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
2202 xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
2203 spin_unlock(&mp->m_sb_lock);
2204 xfs_icsb_unlock(mp);
2205 break;
2206 }
2207
2208 return NOTIFY_OK;
2209}
2210#endif
2211
2212int
2213xfs_icsb_init_counters(
2214 xfs_mount_t *mp)
2215{
2216 xfs_icsb_cnts_t *cntp;
2217 int i;
2218
2219 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
2220 if (mp->m_sb_cnts == NULL)
2221 return -ENOMEM;
2222
2223#ifdef CONFIG_HOTPLUG_CPU
2224 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
2225 mp->m_icsb_notifier.priority = 0;
2226 register_hotcpu_notifier(&mp->m_icsb_notifier);
2227#endif
2228
2229 for_each_online_cpu(i) {
2230 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2231 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2232 }
2233
2234 mutex_init(&mp->m_icsb_mutex);
2235
2236
2237
2238
2239
2240 mp->m_icsb_counters = -1;
2241 return 0;
2242}
2243
2244void
2245xfs_icsb_reinit_counters(
2246 xfs_mount_t *mp)
2247{
2248 xfs_icsb_lock(mp);
2249
2250
2251
2252
2253 mp->m_icsb_counters = -1;
2254 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
2255 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
2256 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
2257 xfs_icsb_unlock(mp);
2258}
2259
2260void
2261xfs_icsb_destroy_counters(
2262 xfs_mount_t *mp)
2263{
2264 if (mp->m_sb_cnts) {
2265 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
2266 free_percpu(mp->m_sb_cnts);
2267 }
2268 mutex_destroy(&mp->m_icsb_mutex);
2269}
2270
2271STATIC void
2272xfs_icsb_lock_cntr(
2273 xfs_icsb_cnts_t *icsbp)
2274{
2275 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
2276 ndelay(1000);
2277 }
2278}
2279
2280STATIC void
2281xfs_icsb_unlock_cntr(
2282 xfs_icsb_cnts_t *icsbp)
2283{
2284 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
2285}
2286
2287
2288STATIC void
2289xfs_icsb_lock_all_counters(
2290 xfs_mount_t *mp)
2291{
2292 xfs_icsb_cnts_t *cntp;
2293 int i;
2294
2295 for_each_online_cpu(i) {
2296 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2297 xfs_icsb_lock_cntr(cntp);
2298 }
2299}
2300
2301STATIC void
2302xfs_icsb_unlock_all_counters(
2303 xfs_mount_t *mp)
2304{
2305 xfs_icsb_cnts_t *cntp;
2306 int i;
2307
2308 for_each_online_cpu(i) {
2309 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2310 xfs_icsb_unlock_cntr(cntp);
2311 }
2312}
2313
2314STATIC void
2315xfs_icsb_count(
2316 xfs_mount_t *mp,
2317 xfs_icsb_cnts_t *cnt,
2318 int flags)
2319{
2320 xfs_icsb_cnts_t *cntp;
2321 int i;
2322
2323 memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
2324
2325 if (!(flags & XFS_ICSB_LAZY_COUNT))
2326 xfs_icsb_lock_all_counters(mp);
2327
2328 for_each_online_cpu(i) {
2329 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2330 cnt->icsb_icount += cntp->icsb_icount;
2331 cnt->icsb_ifree += cntp->icsb_ifree;
2332 cnt->icsb_fdblocks += cntp->icsb_fdblocks;
2333 }
2334
2335 if (!(flags & XFS_ICSB_LAZY_COUNT))
2336 xfs_icsb_unlock_all_counters(mp);
2337}
2338
2339STATIC int
2340xfs_icsb_counter_disabled(
2341 xfs_mount_t *mp,
2342 xfs_sb_field_t field)
2343{
2344 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2345 return test_bit(field, &mp->m_icsb_counters);
2346}
2347
2348STATIC void
2349xfs_icsb_disable_counter(
2350 xfs_mount_t *mp,
2351 xfs_sb_field_t field)
2352{
2353 xfs_icsb_cnts_t cnt;
2354
2355 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365 if (xfs_icsb_counter_disabled(mp, field))
2366 return;
2367
2368 xfs_icsb_lock_all_counters(mp);
2369 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
2370
2371
2372 xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
2373 switch(field) {
2374 case XFS_SBS_ICOUNT:
2375 mp->m_sb.sb_icount = cnt.icsb_icount;
2376 break;
2377 case XFS_SBS_IFREE:
2378 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2379 break;
2380 case XFS_SBS_FDBLOCKS:
2381 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2382 break;
2383 default:
2384 BUG();
2385 }
2386 }
2387
2388 xfs_icsb_unlock_all_counters(mp);
2389}
2390
2391STATIC void
2392xfs_icsb_enable_counter(
2393 xfs_mount_t *mp,
2394 xfs_sb_field_t field,
2395 uint64_t count,
2396 uint64_t resid)
2397{
2398 xfs_icsb_cnts_t *cntp;
2399 int i;
2400
2401 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2402
2403 xfs_icsb_lock_all_counters(mp);
2404 for_each_online_cpu(i) {
2405 cntp = per_cpu_ptr(mp->m_sb_cnts, i);
2406 switch (field) {
2407 case XFS_SBS_ICOUNT:
2408 cntp->icsb_icount = count + resid;
2409 break;
2410 case XFS_SBS_IFREE:
2411 cntp->icsb_ifree = count + resid;
2412 break;
2413 case XFS_SBS_FDBLOCKS:
2414 cntp->icsb_fdblocks = count + resid;
2415 break;
2416 default:
2417 BUG();
2418 break;
2419 }
2420 resid = 0;
2421 }
2422 clear_bit(field, &mp->m_icsb_counters);
2423 xfs_icsb_unlock_all_counters(mp);
2424}
2425
2426void
2427xfs_icsb_sync_counters_locked(
2428 xfs_mount_t *mp,
2429 int flags)
2430{
2431 xfs_icsb_cnts_t cnt;
2432
2433 xfs_icsb_count(mp, &cnt, flags);
2434
2435 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
2436 mp->m_sb.sb_icount = cnt.icsb_icount;
2437 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
2438 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2439 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
2440 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2441}
2442
2443
2444
2445
2446void
2447xfs_icsb_sync_counters(
2448 xfs_mount_t *mp,
2449 int flags)
2450{
2451 spin_lock(&mp->m_sb_lock);
2452 xfs_icsb_sync_counters_locked(mp, flags);
2453 spin_unlock(&mp->m_sb_lock);
2454}
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2473#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2474 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2475STATIC void
2476xfs_icsb_balance_counter_locked(
2477 xfs_mount_t *mp,
2478 xfs_sb_field_t field,
2479 int min_per_cpu)
2480{
2481 uint64_t count, resid;
2482 int weight = num_online_cpus();
2483 uint64_t min = (uint64_t)min_per_cpu;
2484
2485
2486 xfs_icsb_disable_counter(mp, field);
2487
2488
2489 switch (field) {
2490 case XFS_SBS_ICOUNT:
2491 count = mp->m_sb.sb_icount;
2492 resid = do_div(count, weight);
2493 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2494 return;
2495 break;
2496 case XFS_SBS_IFREE:
2497 count = mp->m_sb.sb_ifree;
2498 resid = do_div(count, weight);
2499 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2500 return;
2501 break;
2502 case XFS_SBS_FDBLOCKS:
2503 count = mp->m_sb.sb_fdblocks;
2504 resid = do_div(count, weight);
2505 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2506 return;
2507 break;
2508 default:
2509 BUG();
2510 count = resid = 0;
2511 break;
2512 }
2513
2514 xfs_icsb_enable_counter(mp, field, count, resid);
2515}
2516
2517STATIC void
2518xfs_icsb_balance_counter(
2519 xfs_mount_t *mp,
2520 xfs_sb_field_t fields,
2521 int min_per_cpu)
2522{
2523 spin_lock(&mp->m_sb_lock);
2524 xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
2525 spin_unlock(&mp->m_sb_lock);
2526}
2527
2528int
2529xfs_icsb_modify_counters(
2530 xfs_mount_t *mp,
2531 xfs_sb_field_t field,
2532 int64_t delta,
2533 int rsvd)
2534{
2535 xfs_icsb_cnts_t *icsbp;
2536 long long lcounter;
2537 int ret = 0;
2538
2539 might_sleep();
2540again:
2541 preempt_disable();
2542 icsbp = this_cpu_ptr(mp->m_sb_cnts);
2543
2544
2545
2546
2547 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2548 goto slow_path;
2549 xfs_icsb_lock_cntr(icsbp);
2550 if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
2551 xfs_icsb_unlock_cntr(icsbp);
2552 goto slow_path;
2553 }
2554
2555 switch (field) {
2556 case XFS_SBS_ICOUNT:
2557 lcounter = icsbp->icsb_icount;
2558 lcounter += delta;
2559 if (unlikely(lcounter < 0))
2560 goto balance_counter;
2561 icsbp->icsb_icount = lcounter;
2562 break;
2563
2564 case XFS_SBS_IFREE:
2565 lcounter = icsbp->icsb_ifree;
2566 lcounter += delta;
2567 if (unlikely(lcounter < 0))
2568 goto balance_counter;
2569 icsbp->icsb_ifree = lcounter;
2570 break;
2571
2572 case XFS_SBS_FDBLOCKS:
2573 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
2574
2575 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
2576 lcounter += delta;
2577 if (unlikely(lcounter < 0))
2578 goto balance_counter;
2579 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
2580 break;
2581 default:
2582 BUG();
2583 break;
2584 }
2585 xfs_icsb_unlock_cntr(icsbp);
2586 preempt_enable();
2587 return 0;
2588
2589slow_path:
2590 preempt_enable();
2591
2592
2593
2594
2595
2596
2597 xfs_icsb_lock(mp);
2598
2599
2600
2601
2602
2603
2604
2605 if (!(xfs_icsb_counter_disabled(mp, field))) {
2606 xfs_icsb_unlock(mp);
2607 goto again;
2608 }
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621 spin_lock(&mp->m_sb_lock);
2622 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2623 spin_unlock(&mp->m_sb_lock);
2624
2625
2626
2627
2628
2629
2630
2631 if (ret != ENOSPC)
2632 xfs_icsb_balance_counter(mp, field, 0);
2633 xfs_icsb_unlock(mp);
2634 return ret;
2635
2636balance_counter:
2637 xfs_icsb_unlock_cntr(icsbp);
2638 preempt_enable();
2639
2640
2641
2642
2643
2644
2645
2646 xfs_icsb_lock(mp);
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656 xfs_icsb_balance_counter(mp, field, delta);
2657 xfs_icsb_unlock(mp);
2658 goto again;
2659}
2660
2661#endif
2662