1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_inum.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_inode.h"
29#include "xfs_btree.h"
30#include "xfs_ialloc.h"
31#include "xfs_ialloc_btree.h"
32#include "xfs_itable.h"
33#include "xfs_error.h"
34#include "xfs_trace.h"
35#include "xfs_icache.h"
36#include "xfs_dinode.h"
37
38STATIC int
39xfs_internal_inum(
40 xfs_mount_t *mp,
41 xfs_ino_t ino)
42{
43 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
44 (xfs_sb_version_hasquota(&mp->m_sb) &&
45 xfs_is_quota_inode(&mp->m_sb, ino)));
46}
47
48
49
50
51
52int
53xfs_bulkstat_one_int(
54 struct xfs_mount *mp,
55 xfs_ino_t ino,
56 void __user *buffer,
57 int ubsize,
58 bulkstat_one_fmt_pf formatter,
59 int *ubused,
60 int *stat)
61{
62 struct xfs_icdinode *dic;
63 struct xfs_inode *ip;
64 struct xfs_bstat *buf;
65 int error = 0;
66
67 *stat = BULKSTAT_RV_NOTHING;
68
69 if (!buffer || xfs_internal_inum(mp, ino))
70 return XFS_ERROR(EINVAL);
71
72 buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
73 if (!buf)
74 return XFS_ERROR(ENOMEM);
75
76 error = xfs_iget(mp, NULL, ino,
77 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
78 XFS_ILOCK_SHARED, &ip);
79 if (error) {
80 *stat = BULKSTAT_RV_NOTHING;
81 goto out_free;
82 }
83
84 ASSERT(ip != NULL);
85 ASSERT(ip->i_imap.im_blkno != 0);
86
87 dic = &ip->i_d;
88
89
90
91
92 buf->bs_nlink = dic->di_nlink;
93 buf->bs_projid_lo = dic->di_projid_lo;
94 buf->bs_projid_hi = dic->di_projid_hi;
95 buf->bs_ino = ino;
96 buf->bs_mode = dic->di_mode;
97 buf->bs_uid = dic->di_uid;
98 buf->bs_gid = dic->di_gid;
99 buf->bs_size = dic->di_size;
100 buf->bs_atime.tv_sec = dic->di_atime.t_sec;
101 buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
102 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
103 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
104 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
105 buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
106 buf->bs_xflags = xfs_ip2xflags(ip);
107 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
108 buf->bs_extents = dic->di_nextents;
109 buf->bs_gen = dic->di_gen;
110 memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
111 buf->bs_dmevmask = dic->di_dmevmask;
112 buf->bs_dmstate = dic->di_dmstate;
113 buf->bs_aextents = dic->di_anextents;
114 buf->bs_forkoff = XFS_IFORK_BOFF(ip);
115
116 switch (dic->di_format) {
117 case XFS_DINODE_FMT_DEV:
118 buf->bs_rdev = ip->i_df.if_u2.if_rdev;
119 buf->bs_blksize = BLKDEV_IOSIZE;
120 buf->bs_blocks = 0;
121 break;
122 case XFS_DINODE_FMT_LOCAL:
123 case XFS_DINODE_FMT_UUID:
124 buf->bs_rdev = 0;
125 buf->bs_blksize = mp->m_sb.sb_blocksize;
126 buf->bs_blocks = 0;
127 break;
128 case XFS_DINODE_FMT_EXTENTS:
129 case XFS_DINODE_FMT_BTREE:
130 buf->bs_rdev = 0;
131 buf->bs_blksize = mp->m_sb.sb_blocksize;
132 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
133 break;
134 }
135 xfs_iunlock(ip, XFS_ILOCK_SHARED);
136 IRELE(ip);
137
138 error = formatter(buffer, ubsize, ubused, buf);
139
140 if (!error)
141 *stat = BULKSTAT_RV_DIDONE;
142
143 out_free:
144 kmem_free(buf);
145 return error;
146}
147
148
149STATIC int
150xfs_bulkstat_one_fmt(
151 void __user *ubuffer,
152 int ubsize,
153 int *ubused,
154 const xfs_bstat_t *buffer)
155{
156 if (ubsize < sizeof(*buffer))
157 return XFS_ERROR(ENOMEM);
158 if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
159 return XFS_ERROR(EFAULT);
160 if (ubused)
161 *ubused = sizeof(*buffer);
162 return 0;
163}
164
165int
166xfs_bulkstat_one(
167 xfs_mount_t *mp,
168 xfs_ino_t ino,
169 void __user *buffer,
170 int ubsize,
171 int *ubused,
172 int *stat)
173{
174 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
175 xfs_bulkstat_one_fmt, ubused, stat);
176}
177
178#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
179
180
181
182
183int
184xfs_bulkstat(
185 xfs_mount_t *mp,
186 xfs_ino_t *lastinop,
187 int *ubcountp,
188 bulkstat_one_pf formatter,
189 size_t statstruct_size,
190 char __user *ubuffer,
191 int *done)
192{
193 xfs_agblock_t agbno=0;
194 xfs_buf_t *agbp;
195 xfs_agi_t *agi;
196 xfs_agino_t agino;
197 xfs_agnumber_t agno;
198 int chunkidx;
199 int clustidx;
200 xfs_btree_cur_t *cur;
201 int end_of_ag;
202 int error;
203 int fmterror;
204 int i;
205 int icount;
206 size_t irbsize;
207 xfs_ino_t ino;
208 xfs_inobt_rec_incore_t *irbp;
209 xfs_inobt_rec_incore_t *irbuf;
210 xfs_inobt_rec_incore_t *irbufend;
211 xfs_ino_t lastino;
212 int blks_per_cluster;
213 int inodes_per_cluster;
214 int nirbuf;
215 int rval;
216 int tmp;
217 int ubcount;
218 int ubleft;
219 char __user *ubufp;
220 int ubelem;
221 int ubused;
222
223
224
225
226 ino = (xfs_ino_t)*lastinop;
227 lastino = ino;
228 agno = XFS_INO_TO_AGNO(mp, ino);
229 agino = XFS_INO_TO_AGINO(mp, ino);
230 if (agno >= mp->m_sb.sb_agcount ||
231 ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
232 *done = 1;
233 *ubcountp = 0;
234 return 0;
235 }
236 if (!ubcountp || *ubcountp <= 0) {
237 return EINVAL;
238 }
239 ubcount = *ubcountp;
240 ubleft = ubcount * statstruct_size;
241 *ubcountp = ubelem = 0;
242 *done = 0;
243 fmterror = 0;
244 ubufp = ubuffer;
245 blks_per_cluster = xfs_icluster_size_fsb(mp);
246 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
247 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
248 if (!irbuf)
249 return ENOMEM;
250
251 nirbuf = irbsize / sizeof(*irbuf);
252
253
254
255
256
257 rval = 0;
258 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
259 cond_resched();
260 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
261 if (error) {
262
263
264
265 agno++;
266 agino = 0;
267 continue;
268 }
269 agi = XFS_BUF_TO_AGI(agbp);
270
271
272
273 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
274 XFS_BTNUM_INO);
275 irbp = irbuf;
276 irbufend = irbuf + nirbuf;
277 end_of_ag = 0;
278
279
280
281
282 if (agino > 0) {
283 xfs_inobt_rec_incore_t r;
284
285
286
287
288 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE,
289 &tmp);
290 if (!error &&
291 tmp &&
292
293 !(error = xfs_inobt_get_rec(cur, &r, &i)) &&
294 i == 1 &&
295
296 agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
297
298 (chunkidx = agino - r.ir_startino + 1) <
299 XFS_INODES_PER_CHUNK &&
300
301 xfs_inobt_maskn(chunkidx,
302 XFS_INODES_PER_CHUNK - chunkidx) &
303 ~r.ir_free) {
304
305
306
307
308
309 for (i = 0; i < chunkidx; i++) {
310 if (XFS_INOBT_MASK(i) & ~r.ir_free)
311 r.ir_freecount++;
312 }
313 r.ir_free |= xfs_inobt_maskn(0, chunkidx);
314 irbp->ir_startino = r.ir_startino;
315 irbp->ir_freecount = r.ir_freecount;
316 irbp->ir_free = r.ir_free;
317 irbp++;
318 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
319 icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
320 } else {
321
322
323
324
325 agino++;
326 icount = 0;
327 }
328
329
330
331 if (!error)
332 error = xfs_btree_increment(cur, 0, &tmp);
333 } else {
334
335
336
337 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
338 icount = 0;
339 }
340
341
342
343
344 while (irbp < irbufend && icount < ubcount) {
345 xfs_inobt_rec_incore_t r;
346
347
348
349
350
351 while (error) {
352 agino += XFS_INODES_PER_CHUNK;
353 if (XFS_AGINO_TO_AGBNO(mp, agino) >=
354 be32_to_cpu(agi->agi_length))
355 break;
356 error = xfs_inobt_lookup(cur, agino,
357 XFS_LOOKUP_GE, &tmp);
358 cond_resched();
359 }
360
361
362
363
364 if (error) {
365 end_of_ag = 1;
366 break;
367 }
368
369 error = xfs_inobt_get_rec(cur, &r, &i);
370 if (error || i == 0) {
371 end_of_ag = 1;
372 break;
373 }
374
375
376
377
378
379 if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
380 struct blk_plug plug;
381
382
383
384
385
386 blk_start_plug(&plug);
387 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
388 for (chunkidx = 0;
389 chunkidx < XFS_INODES_PER_CHUNK;
390 chunkidx += inodes_per_cluster,
391 agbno += blks_per_cluster) {
392 if (xfs_inobt_maskn(chunkidx,
393 inodes_per_cluster) & ~r.ir_free)
394 xfs_btree_reada_bufs(mp, agno,
395 agbno, blks_per_cluster,
396 &xfs_inode_buf_ops);
397 }
398 blk_finish_plug(&plug);
399 irbp->ir_startino = r.ir_startino;
400 irbp->ir_freecount = r.ir_freecount;
401 irbp->ir_free = r.ir_free;
402 irbp++;
403 icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
404 }
405
406
407
408 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
409 error = xfs_btree_increment(cur, 0, &tmp);
410 cond_resched();
411 }
412
413
414
415
416
417 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
418 xfs_buf_relse(agbp);
419
420
421
422 irbufend = irbp;
423 for (irbp = irbuf;
424 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
425
426
427
428 for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
429 XFS_BULKSTAT_UBLEFT(ubleft) &&
430 irbp->ir_freecount < XFS_INODES_PER_CHUNK;
431 chunkidx++, clustidx++, agino++) {
432 ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
433
434 ino = XFS_AGINO_TO_INO(mp, agno, agino);
435
436
437
438 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
439 lastino = ino;
440 continue;
441 }
442
443
444
445
446 irbp->ir_freecount++;
447
448
449
450
451 ubused = statstruct_size;
452 error = formatter(mp, ino, ubufp, ubleft,
453 &ubused, &fmterror);
454 if (fmterror == BULKSTAT_RV_NOTHING) {
455 if (error && error != ENOENT &&
456 error != EINVAL) {
457 ubleft = 0;
458 rval = error;
459 break;
460 }
461 lastino = ino;
462 continue;
463 }
464 if (fmterror == BULKSTAT_RV_GIVEUP) {
465 ubleft = 0;
466 ASSERT(error);
467 rval = error;
468 break;
469 }
470 if (ubufp)
471 ubufp += ubused;
472 ubleft -= ubused;
473 ubelem++;
474 lastino = ino;
475 }
476
477 cond_resched();
478 }
479
480
481
482 if (XFS_BULKSTAT_UBLEFT(ubleft)) {
483 if (end_of_ag) {
484 agno++;
485 agino = 0;
486 } else
487 agino = XFS_INO_TO_AGINO(mp, lastino);
488 } else
489 break;
490 }
491
492
493
494 kmem_free(irbuf);
495 *ubcountp = ubelem;
496
497
498
499 if (ubelem)
500 rval = 0;
501 if (agno >= mp->m_sb.sb_agcount) {
502
503
504
505
506
507 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
508 *done = 1;
509 } else
510 *lastinop = (xfs_ino_t)lastino;
511
512 return rval;
513}
514
515
516
517
518
519int
520xfs_bulkstat_single(
521 xfs_mount_t *mp,
522 xfs_ino_t *lastinop,
523 char __user *buffer,
524 int *done)
525{
526 int count;
527 int error;
528 xfs_ino_t ino;
529 int res;
530
531
532
533
534
535
536
537
538
539
540 ino = *lastinop;
541 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
542 NULL, &res);
543 if (error) {
544
545
546
547
548 (*lastinop)--;
549 count = 1;
550 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
551 sizeof(xfs_bstat_t), buffer, done))
552 return error;
553 if (count == 0 || (xfs_ino_t)*lastinop != ino)
554 return error == EFSCORRUPTED ?
555 XFS_ERROR(EINVAL) : error;
556 else
557 return 0;
558 }
559 *done = 0;
560 return 0;
561}
562
563int
564xfs_inumbers_fmt(
565 void __user *ubuffer,
566 const xfs_inogrp_t *buffer,
567 long count,
568 long *written)
569{
570 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
571 return -EFAULT;
572 *written = count * sizeof(*buffer);
573 return 0;
574}
575
576
577
578
579int
580xfs_inumbers(
581 xfs_mount_t *mp,
582 xfs_ino_t *lastino,
583 int *count,
584 void __user *ubuffer,
585 inumbers_fmt_pf formatter)
586{
587 xfs_buf_t *agbp;
588 xfs_agino_t agino;
589 xfs_agnumber_t agno;
590 int bcount;
591 xfs_inogrp_t *buffer;
592 int bufidx;
593 xfs_btree_cur_t *cur;
594 int error;
595 xfs_inobt_rec_incore_t r;
596 int i;
597 xfs_ino_t ino;
598 int left;
599 int tmp;
600
601 ino = (xfs_ino_t)*lastino;
602 agno = XFS_INO_TO_AGNO(mp, ino);
603 agino = XFS_INO_TO_AGINO(mp, ino);
604 left = *count;
605 *count = 0;
606 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
607 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
608 error = bufidx = 0;
609 cur = NULL;
610 agbp = NULL;
611 while (left > 0 && agno < mp->m_sb.sb_agcount) {
612 if (agbp == NULL) {
613 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
614 if (error) {
615
616
617
618
619 ASSERT(cur == NULL);
620 agbp = NULL;
621 agno++;
622 agino = 0;
623 continue;
624 }
625 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
626 XFS_BTNUM_INO);
627 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
628 &tmp);
629 if (error) {
630 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
631 cur = NULL;
632 xfs_buf_relse(agbp);
633 agbp = NULL;
634
635
636
637
638
639 agino += XFS_INODES_PER_CHUNK - 1;
640 continue;
641 }
642 }
643 error = xfs_inobt_get_rec(cur, &r, &i);
644 if (error || i == 0) {
645 xfs_buf_relse(agbp);
646 agbp = NULL;
647 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
648 cur = NULL;
649 agno++;
650 agino = 0;
651 continue;
652 }
653 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
654 buffer[bufidx].xi_startino =
655 XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
656 buffer[bufidx].xi_alloccount =
657 XFS_INODES_PER_CHUNK - r.ir_freecount;
658 buffer[bufidx].xi_allocmask = ~r.ir_free;
659 bufidx++;
660 left--;
661 if (bufidx == bcount) {
662 long written;
663 if (formatter(ubuffer, buffer, bufidx, &written)) {
664 error = XFS_ERROR(EFAULT);
665 break;
666 }
667 ubuffer += written;
668 *count += bufidx;
669 bufidx = 0;
670 }
671 if (left) {
672 error = xfs_btree_increment(cur, 0, &tmp);
673 if (error) {
674 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
675 cur = NULL;
676 xfs_buf_relse(agbp);
677 agbp = NULL;
678
679
680
681
682 agino += XFS_INODES_PER_CHUNK;
683 continue;
684 }
685 }
686 }
687 if (!error) {
688 if (bufidx) {
689 long written;
690 if (formatter(ubuffer, buffer, bufidx, &written))
691 error = XFS_ERROR(EFAULT);
692 else
693 *count += bufidx;
694 }
695 *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
696 }
697 kmem_free(buffer);
698 if (cur)
699 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
700 XFS_BTREE_NOERROR));
701 if (agbp)
702 xfs_buf_relse(agbp);
703 return error;
704}
705