1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_alloc.h"
27#include "xfs_quota.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_inode.h"
31#include "xfs_bmap.h"
32#include "xfs_bmap_util.h"
33#include "xfs_rtalloc.h"
34#include "xfs_error.h"
35#include "xfs_itable.h"
36#include "xfs_attr.h"
37#include "xfs_buf_item.h"
38#include "xfs_trans_space.h"
39#include "xfs_trans_priv.h"
40#include "xfs_qm.h"
41#include "xfs_cksum.h"
42#include "xfs_trace.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57#ifdef DEBUG
58xfs_buftarg_t *xfs_dqerror_target;
59int xfs_do_dqerror;
60int xfs_dqreq_num;
61int xfs_dqerror_mod = 33;
62#endif
63
64struct kmem_zone *xfs_qm_dqtrxzone;
65static struct kmem_zone *xfs_qm_dqzone;
66
67static struct lock_class_key xfs_dquot_group_class;
68static struct lock_class_key xfs_dquot_project_class;
69
70
71
72
73void
74xfs_qm_dqdestroy(
75 xfs_dquot_t *dqp)
76{
77 ASSERT(list_empty(&dqp->q_lru));
78
79 mutex_destroy(&dqp->q_qlock);
80 kmem_zone_free(xfs_qm_dqzone, dqp);
81
82 XFS_STATS_DEC(xs_qm_dquot);
83}
84
85
86
87
88
89
90void
91xfs_qm_adjust_dqlimits(
92 struct xfs_mount *mp,
93 struct xfs_dquot *dq)
94{
95 struct xfs_quotainfo *q = mp->m_quotainfo;
96 struct xfs_disk_dquot *d = &dq->q_core;
97 int prealloc = 0;
98
99 ASSERT(d->d_id);
100
101 if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
102 d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
103 prealloc = 1;
104 }
105 if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
106 d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
107 prealloc = 1;
108 }
109 if (q->qi_isoftlimit && !d->d_ino_softlimit)
110 d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
111 if (q->qi_ihardlimit && !d->d_ino_hardlimit)
112 d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
113 if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
114 d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
115 if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
116 d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
117
118 if (prealloc)
119 xfs_dquot_set_prealloc_limits(dq);
120}
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135void
136xfs_qm_adjust_dqtimers(
137 xfs_mount_t *mp,
138 xfs_disk_dquot_t *d)
139{
140 ASSERT(d->d_id);
141
142#ifdef DEBUG
143 if (d->d_blk_hardlimit)
144 ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
145 be64_to_cpu(d->d_blk_hardlimit));
146 if (d->d_ino_hardlimit)
147 ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
148 be64_to_cpu(d->d_ino_hardlimit));
149 if (d->d_rtb_hardlimit)
150 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
151 be64_to_cpu(d->d_rtb_hardlimit));
152#endif
153
154 if (!d->d_btimer) {
155 if ((d->d_blk_softlimit &&
156 (be64_to_cpu(d->d_bcount) >
157 be64_to_cpu(d->d_blk_softlimit))) ||
158 (d->d_blk_hardlimit &&
159 (be64_to_cpu(d->d_bcount) >
160 be64_to_cpu(d->d_blk_hardlimit)))) {
161 d->d_btimer = cpu_to_be32(get_seconds() +
162 mp->m_quotainfo->qi_btimelimit);
163 } else {
164 d->d_bwarns = 0;
165 }
166 } else {
167 if ((!d->d_blk_softlimit ||
168 (be64_to_cpu(d->d_bcount) <=
169 be64_to_cpu(d->d_blk_softlimit))) &&
170 (!d->d_blk_hardlimit ||
171 (be64_to_cpu(d->d_bcount) <=
172 be64_to_cpu(d->d_blk_hardlimit)))) {
173 d->d_btimer = 0;
174 }
175 }
176
177 if (!d->d_itimer) {
178 if ((d->d_ino_softlimit &&
179 (be64_to_cpu(d->d_icount) >
180 be64_to_cpu(d->d_ino_softlimit))) ||
181 (d->d_ino_hardlimit &&
182 (be64_to_cpu(d->d_icount) >
183 be64_to_cpu(d->d_ino_hardlimit)))) {
184 d->d_itimer = cpu_to_be32(get_seconds() +
185 mp->m_quotainfo->qi_itimelimit);
186 } else {
187 d->d_iwarns = 0;
188 }
189 } else {
190 if ((!d->d_ino_softlimit ||
191 (be64_to_cpu(d->d_icount) <=
192 be64_to_cpu(d->d_ino_softlimit))) &&
193 (!d->d_ino_hardlimit ||
194 (be64_to_cpu(d->d_icount) <=
195 be64_to_cpu(d->d_ino_hardlimit)))) {
196 d->d_itimer = 0;
197 }
198 }
199
200 if (!d->d_rtbtimer) {
201 if ((d->d_rtb_softlimit &&
202 (be64_to_cpu(d->d_rtbcount) >
203 be64_to_cpu(d->d_rtb_softlimit))) ||
204 (d->d_rtb_hardlimit &&
205 (be64_to_cpu(d->d_rtbcount) >
206 be64_to_cpu(d->d_rtb_hardlimit)))) {
207 d->d_rtbtimer = cpu_to_be32(get_seconds() +
208 mp->m_quotainfo->qi_rtbtimelimit);
209 } else {
210 d->d_rtbwarns = 0;
211 }
212 } else {
213 if ((!d->d_rtb_softlimit ||
214 (be64_to_cpu(d->d_rtbcount) <=
215 be64_to_cpu(d->d_rtb_softlimit))) &&
216 (!d->d_rtb_hardlimit ||
217 (be64_to_cpu(d->d_rtbcount) <=
218 be64_to_cpu(d->d_rtb_hardlimit)))) {
219 d->d_rtbtimer = 0;
220 }
221 }
222}
223
224
225
226
227STATIC void
228xfs_qm_init_dquot_blk(
229 xfs_trans_t *tp,
230 xfs_mount_t *mp,
231 xfs_dqid_t id,
232 uint type,
233 xfs_buf_t *bp)
234{
235 struct xfs_quotainfo *q = mp->m_quotainfo;
236 xfs_dqblk_t *d;
237 int curid, i;
238
239 ASSERT(tp);
240 ASSERT(xfs_buf_islocked(bp));
241
242 d = bp->b_addr;
243
244
245
246
247 curid = id - (id % q->qi_dqperchunk);
248 ASSERT(curid >= 0);
249 memset(d, 0, BBTOB(q->qi_dqchunklen));
250 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
251 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
252 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
253 d->dd_diskdq.d_id = cpu_to_be32(curid);
254 d->dd_diskdq.d_flags = type;
255 if (xfs_sb_version_hascrc(&mp->m_sb)) {
256 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
257 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
258 XFS_DQUOT_CRC_OFF);
259 }
260 }
261
262 xfs_trans_dquot_buf(tp, bp,
263 (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
264 ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
265 XFS_BLF_GDQUOT_BUF)));
266 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
267}
268
269
270
271
272
273
274void
275xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
276{
277 __uint64_t space;
278
279 dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
280 dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
281 if (!dqp->q_prealloc_lo_wmark) {
282 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
283 do_div(dqp->q_prealloc_lo_wmark, 100);
284 dqp->q_prealloc_lo_wmark *= 95;
285 }
286
287 space = dqp->q_prealloc_hi_wmark;
288
289 do_div(space, 100);
290 dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
291 dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
292 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
293}
294
295STATIC bool
296xfs_dquot_buf_verify_crc(
297 struct xfs_mount *mp,
298 struct xfs_buf *bp)
299{
300 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
301 int ndquots;
302 int i;
303
304 if (!xfs_sb_version_hascrc(&mp->m_sb))
305 return true;
306
307
308
309
310
311
312 if (mp->m_quotainfo)
313 ndquots = mp->m_quotainfo->qi_dqperchunk;
314 else
315 ndquots = xfs_qm_calc_dquots_per_chunk(mp,
316 XFS_BB_TO_FSB(mp, bp->b_length));
317
318 for (i = 0; i < ndquots; i++, d++) {
319 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
320 XFS_DQUOT_CRC_OFF))
321 return false;
322 if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
323 return false;
324 }
325 return true;
326}
327
328STATIC bool
329xfs_dquot_buf_verify(
330 struct xfs_mount *mp,
331 struct xfs_buf *bp)
332{
333 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
334 xfs_dqid_t id = 0;
335 int ndquots;
336 int i;
337
338
339
340
341
342
343 if (mp->m_quotainfo)
344 ndquots = mp->m_quotainfo->qi_dqperchunk;
345 else
346 ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
347
348
349
350
351
352
353
354
355 for (i = 0; i < ndquots; i++) {
356 struct xfs_disk_dquot *ddq;
357 int error;
358
359 ddq = &d[i].dd_diskdq;
360
361 if (i == 0)
362 id = be32_to_cpu(ddq->d_id);
363
364 error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
365 "xfs_dquot_buf_verify");
366 if (error)
367 return false;
368 }
369 return true;
370}
371
372static void
373xfs_dquot_buf_read_verify(
374 struct xfs_buf *bp)
375{
376 struct xfs_mount *mp = bp->b_target->bt_mount;
377
378 if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
379 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
380 xfs_buf_ioerror(bp, EFSCORRUPTED);
381 }
382}
383
384
385
386
387
388
389void
390xfs_dquot_buf_write_verify(
391 struct xfs_buf *bp)
392{
393 struct xfs_mount *mp = bp->b_target->bt_mount;
394
395 if (!xfs_dquot_buf_verify(mp, bp)) {
396 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
397 xfs_buf_ioerror(bp, EFSCORRUPTED);
398 return;
399 }
400}
401
402const struct xfs_buf_ops xfs_dquot_buf_ops = {
403 .verify_read = xfs_dquot_buf_read_verify,
404 .verify_write = xfs_dquot_buf_write_verify,
405};
406
407
408
409
410
411STATIC int
412xfs_qm_dqalloc(
413 xfs_trans_t **tpp,
414 xfs_mount_t *mp,
415 xfs_dquot_t *dqp,
416 xfs_inode_t *quotip,
417 xfs_fileoff_t offset_fsb,
418 xfs_buf_t **O_bpp)
419{
420 xfs_fsblock_t firstblock;
421 xfs_bmap_free_t flist;
422 xfs_bmbt_irec_t map;
423 int nmaps, error, committed;
424 xfs_buf_t *bp;
425 xfs_trans_t *tp = *tpp;
426
427 ASSERT(tp != NULL);
428
429 trace_xfs_dqalloc(dqp);
430
431
432
433
434 xfs_bmap_init(&flist, &firstblock);
435 xfs_ilock(quotip, XFS_ILOCK_EXCL);
436
437
438
439
440 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
441 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
442 return (ESRCH);
443 }
444
445 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
446 nmaps = 1;
447 error = xfs_bmapi_write(tp, quotip, offset_fsb,
448 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
449 &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
450 &map, &nmaps, &flist);
451 if (error)
452 goto error0;
453 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
454 ASSERT(nmaps == 1);
455 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
456 (map.br_startblock != HOLESTARTBLOCK));
457
458
459
460
461 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
462
463
464 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
465 dqp->q_blkno,
466 mp->m_quotainfo->qi_dqchunklen,
467 0);
468
469 error = xfs_buf_geterror(bp);
470 if (error)
471 goto error1;
472 bp->b_ops = &xfs_dquot_buf_ops;
473
474
475
476
477
478 xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
479 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494 xfs_trans_bhold(tp, bp);
495
496 if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
497 goto error1;
498 }
499
500 if (committed) {
501 tp = *tpp;
502 xfs_trans_bjoin(tp, bp);
503 } else {
504 xfs_trans_bhold_release(tp, bp);
505 }
506
507 *O_bpp = bp;
508 return 0;
509
510 error1:
511 xfs_bmap_cancel(&flist);
512 error0:
513 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
514
515 return (error);
516}
517STATIC int
518xfs_qm_dqrepair(
519 struct xfs_mount *mp,
520 struct xfs_trans *tp,
521 struct xfs_dquot *dqp,
522 xfs_dqid_t firstid,
523 struct xfs_buf **bpp)
524{
525 int error;
526 struct xfs_disk_dquot *ddq;
527 struct xfs_dqblk *d;
528 int i;
529
530
531
532
533
534 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
535 mp->m_quotainfo->qi_dqchunklen,
536 0, bpp, NULL);
537
538 if (error) {
539 ASSERT(*bpp == NULL);
540 return XFS_ERROR(error);
541 }
542 (*bpp)->b_ops = &xfs_dquot_buf_ops;
543
544 ASSERT(xfs_buf_islocked(*bpp));
545 d = (struct xfs_dqblk *)(*bpp)->b_addr;
546
547
548 for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
549 ddq = &d[i].dd_diskdq;
550 error = xfs_qm_dqcheck(mp, ddq, firstid + i,
551 dqp->dq_flags & XFS_DQ_ALLTYPES,
552 XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
553 if (error) {
554
555 xfs_trans_brelse(tp, *bpp);
556 return XFS_ERROR(EIO);
557 }
558 }
559
560 return 0;
561}
562
563
564
565
566
567
568STATIC int
569xfs_qm_dqtobp(
570 xfs_trans_t **tpp,
571 xfs_dquot_t *dqp,
572 xfs_disk_dquot_t **O_ddpp,
573 xfs_buf_t **O_bpp,
574 uint flags)
575{
576 struct xfs_bmbt_irec map;
577 int nmaps = 1, error;
578 struct xfs_buf *bp;
579 struct xfs_inode *quotip = xfs_dq_to_quota_inode(dqp);
580 struct xfs_mount *mp = dqp->q_mount;
581 xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
582 struct xfs_trans *tp = (tpp ? *tpp : NULL);
583
584 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
585
586 xfs_ilock(quotip, XFS_ILOCK_SHARED);
587 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
588
589
590
591
592 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
593 return ESRCH;
594 }
595
596
597
598
599 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
600 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
601
602 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
603 if (error)
604 return error;
605
606 ASSERT(nmaps == 1);
607 ASSERT(map.br_blockcount == 1);
608
609
610
611
612 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
613 sizeof(xfs_dqblk_t);
614
615 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
616 if (map.br_startblock == HOLESTARTBLOCK) {
617
618
619
620 if (!(flags & XFS_QMOPT_DQALLOC))
621 return ENOENT;
622
623 ASSERT(tp);
624 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
625 dqp->q_fileoffset, &bp);
626 if (error)
627 return error;
628 tp = *tpp;
629 } else {
630 trace_xfs_dqtobp_read(dqp);
631
632
633
634
635
636 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
637
638 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
639 dqp->q_blkno,
640 mp->m_quotainfo->qi_dqchunklen,
641 0, &bp, &xfs_dquot_buf_ops);
642
643 if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
644 xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
645 mp->m_quotainfo->qi_dqperchunk;
646 ASSERT(bp == NULL);
647 error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
648 }
649
650 if (error) {
651 ASSERT(bp == NULL);
652 return XFS_ERROR(error);
653 }
654 }
655
656 ASSERT(xfs_buf_islocked(bp));
657 *O_bpp = bp;
658 *O_ddpp = bp->b_addr + dqp->q_bufoffset;
659
660 return (0);
661}
662
663
664
665
666
667
668
669
670int
671xfs_qm_dqread(
672 struct xfs_mount *mp,
673 xfs_dqid_t id,
674 uint type,
675 uint flags,
676 struct xfs_dquot **O_dqpp)
677{
678 struct xfs_dquot *dqp;
679 struct xfs_disk_dquot *ddqp;
680 struct xfs_buf *bp;
681 struct xfs_trans *tp = NULL;
682 int error;
683 int cancelflags = 0;
684
685
686 dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
687
688 dqp->dq_flags = type;
689 dqp->q_core.d_id = cpu_to_be32(id);
690 dqp->q_mount = mp;
691 INIT_LIST_HEAD(&dqp->q_lru);
692 mutex_init(&dqp->q_qlock);
693 init_waitqueue_head(&dqp->q_pinwait);
694
695
696
697
698
699
700 init_completion(&dqp->q_flush);
701 complete(&dqp->q_flush);
702
703
704
705
706
707 switch (type) {
708 case XFS_DQ_USER:
709
710 break;
711 case XFS_DQ_GROUP:
712 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
713 break;
714 case XFS_DQ_PROJ:
715 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
716 break;
717 default:
718 ASSERT(0);
719 break;
720 }
721
722 XFS_STATS_INC(xs_qm_dquot);
723
724 trace_xfs_dqread(dqp);
725
726 if (flags & XFS_QMOPT_DQALLOC) {
727 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
728 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_attrsetm,
729 XFS_QM_DQALLOC_SPACE_RES(mp), 0);
730 if (error)
731 goto error1;
732 cancelflags = XFS_TRANS_RELEASE_LOG_RES;
733 }
734
735
736
737
738
739 error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
740 if (error) {
741
742
743
744
745
746 trace_xfs_dqread_fail(dqp);
747 cancelflags |= XFS_TRANS_ABORT;
748 goto error1;
749 }
750
751
752 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
753 xfs_qm_dquot_logitem_init(dqp);
754
755
756
757
758
759 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
760 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
761 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
762
763
764 xfs_dquot_set_prealloc_limits(dqp);
765
766
767 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
768
769
770
771
772
773
774
775
776
777
778
779
780
781 ASSERT(xfs_buf_islocked(bp));
782 xfs_trans_brelse(tp, bp);
783
784 if (tp) {
785 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
786 if (error)
787 goto error0;
788 }
789
790 *O_dqpp = dqp;
791 return error;
792
793error1:
794 if (tp)
795 xfs_trans_cancel(tp, cancelflags);
796error0:
797 xfs_qm_dqdestroy(dqp);
798 *O_dqpp = NULL;
799 return error;
800}
801
802
803
804
805
806
807
808
809
810int
811xfs_qm_dqget(
812 xfs_mount_t *mp,
813 xfs_inode_t *ip,
814 xfs_dqid_t id,
815 uint type,
816 uint flags,
817 xfs_dquot_t **O_dqpp)
818{
819 struct xfs_quotainfo *qi = mp->m_quotainfo;
820 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
821 struct xfs_dquot *dqp;
822 int error;
823
824 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
825 if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
826 (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
827 (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
828 return (ESRCH);
829 }
830
831#ifdef DEBUG
832 if (xfs_do_dqerror) {
833 if ((xfs_dqerror_target == mp->m_ddev_targp) &&
834 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
835 xfs_debug(mp, "Returning error in dqget");
836 return (EIO);
837 }
838 }
839
840 ASSERT(type == XFS_DQ_USER ||
841 type == XFS_DQ_PROJ ||
842 type == XFS_DQ_GROUP);
843 if (ip) {
844 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
845 ASSERT(xfs_inode_dquot(ip, type) == NULL);
846 }
847#endif
848
849restart:
850 mutex_lock(&qi->qi_tree_lock);
851 dqp = radix_tree_lookup(tree, id);
852 if (dqp) {
853 xfs_dqlock(dqp);
854 if (dqp->dq_flags & XFS_DQ_FREEING) {
855 xfs_dqunlock(dqp);
856 mutex_unlock(&qi->qi_tree_lock);
857 trace_xfs_dqget_freeing(dqp);
858 delay(1);
859 goto restart;
860 }
861
862 dqp->q_nrefs++;
863 mutex_unlock(&qi->qi_tree_lock);
864
865 trace_xfs_dqget_hit(dqp);
866 XFS_STATS_INC(xs_qm_dqcachehits);
867 *O_dqpp = dqp;
868 return 0;
869 }
870 mutex_unlock(&qi->qi_tree_lock);
871 XFS_STATS_INC(xs_qm_dqcachemisses);
872
873
874
875
876
877
878
879
880 if (ip)
881 xfs_iunlock(ip, XFS_ILOCK_EXCL);
882
883 error = xfs_qm_dqread(mp, id, type, flags, &dqp);
884
885 if (ip)
886 xfs_ilock(ip, XFS_ILOCK_EXCL);
887
888 if (error)
889 return error;
890
891 if (ip) {
892
893
894
895
896 if (xfs_this_quota_on(mp, type)) {
897 struct xfs_dquot *dqp1;
898
899 dqp1 = xfs_inode_dquot(ip, type);
900 if (dqp1) {
901 xfs_qm_dqdestroy(dqp);
902 dqp = dqp1;
903 xfs_dqlock(dqp);
904 goto dqret;
905 }
906 } else {
907
908 xfs_qm_dqdestroy(dqp);
909 return XFS_ERROR(ESRCH);
910 }
911 }
912
913 mutex_lock(&qi->qi_tree_lock);
914 error = -radix_tree_insert(tree, id, dqp);
915 if (unlikely(error)) {
916 WARN_ON(error != EEXIST);
917
918
919
920
921
922 mutex_unlock(&qi->qi_tree_lock);
923 trace_xfs_dqget_dup(dqp);
924 xfs_qm_dqdestroy(dqp);
925 XFS_STATS_INC(xs_qm_dquot_dups);
926 goto restart;
927 }
928
929
930
931
932 xfs_dqlock(dqp);
933 dqp->q_nrefs = 1;
934
935 qi->qi_dquots++;
936 mutex_unlock(&qi->qi_tree_lock);
937
938 dqret:
939 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
940 trace_xfs_dqget_miss(dqp);
941 *O_dqpp = dqp;
942 return (0);
943}
944
945
946STATIC void
947xfs_qm_dqput_final(
948 struct xfs_dquot *dqp)
949{
950 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
951 struct xfs_dquot *gdqp;
952 struct xfs_dquot *pdqp;
953
954 trace_xfs_dqput_free(dqp);
955
956 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
957 XFS_STATS_INC(xs_qm_dquot_unused);
958
959
960
961
962
963
964 gdqp = dqp->q_gdquot;
965 if (gdqp) {
966 xfs_dqlock(gdqp);
967 dqp->q_gdquot = NULL;
968 }
969
970 pdqp = dqp->q_pdquot;
971 if (pdqp) {
972 xfs_dqlock(pdqp);
973 dqp->q_pdquot = NULL;
974 }
975 xfs_dqunlock(dqp);
976
977
978
979
980 if (gdqp)
981 xfs_qm_dqput(gdqp);
982 if (pdqp)
983 xfs_qm_dqput(pdqp);
984}
985
986
987
988
989
990
991
992void
993xfs_qm_dqput(
994 struct xfs_dquot *dqp)
995{
996 ASSERT(dqp->q_nrefs > 0);
997 ASSERT(XFS_DQ_IS_LOCKED(dqp));
998
999 trace_xfs_dqput(dqp);
1000
1001 if (--dqp->q_nrefs > 0)
1002 xfs_dqunlock(dqp);
1003 else
1004 xfs_qm_dqput_final(dqp);
1005}
1006
1007
1008
1009
1010
1011void
1012xfs_qm_dqrele(
1013 xfs_dquot_t *dqp)
1014{
1015 if (!dqp)
1016 return;
1017
1018 trace_xfs_dqrele(dqp);
1019
1020 xfs_dqlock(dqp);
1021
1022
1023
1024
1025
1026
1027 xfs_qm_dqput(dqp);
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037STATIC void
1038xfs_qm_dqflush_done(
1039 struct xfs_buf *bp,
1040 struct xfs_log_item *lip)
1041{
1042 xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
1043 xfs_dquot_t *dqp = qip->qli_dquot;
1044 struct xfs_ail *ailp = lip->li_ailp;
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 if ((lip->li_flags & XFS_LI_IN_AIL) &&
1055 lip->li_lsn == qip->qli_flush_lsn) {
1056
1057
1058 spin_lock(&ailp->xa_lock);
1059 if (lip->li_lsn == qip->qli_flush_lsn)
1060 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1061 else
1062 spin_unlock(&ailp->xa_lock);
1063 }
1064
1065
1066
1067
1068 xfs_dqfunlock(dqp);
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079int
1080xfs_qm_dqflush(
1081 struct xfs_dquot *dqp,
1082 struct xfs_buf **bpp)
1083{
1084 struct xfs_mount *mp = dqp->q_mount;
1085 struct xfs_buf *bp;
1086 struct xfs_disk_dquot *ddqp;
1087 int error;
1088
1089 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1090 ASSERT(!completion_done(&dqp->q_flush));
1091
1092 trace_xfs_dqflush(dqp);
1093
1094 *bpp = NULL;
1095
1096 xfs_qm_dqunpin_wait(dqp);
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 if (XFS_FORCED_SHUTDOWN(mp)) {
1107 struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
1108 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1109
1110 spin_lock(&mp->m_ail->xa_lock);
1111 if (lip->li_flags & XFS_LI_IN_AIL)
1112 xfs_trans_ail_delete(mp->m_ail, lip,
1113 SHUTDOWN_CORRUPT_INCORE);
1114 else
1115 spin_unlock(&mp->m_ail->xa_lock);
1116 error = XFS_ERROR(EIO);
1117 goto out_unlock;
1118 }
1119
1120
1121
1122
1123 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1124 mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
1125 if (error)
1126 goto out_unlock;
1127
1128
1129
1130
1131 ddqp = bp->b_addr + dqp->q_bufoffset;
1132
1133
1134
1135
1136 error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1137 XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1138 if (error) {
1139 xfs_buf_relse(bp);
1140 xfs_dqfunlock(dqp);
1141 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1142 return XFS_ERROR(EIO);
1143 }
1144
1145
1146 memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1147
1148
1149
1150
1151 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1152
1153 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1154 &dqp->q_logitem.qli_item.li_lsn);
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1166 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1167
1168 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1169 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1170 XFS_DQUOT_CRC_OFF);
1171 }
1172
1173
1174
1175
1176
1177 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1178 &dqp->q_logitem.qli_item);
1179
1180
1181
1182
1183
1184 if (xfs_buf_ispinned(bp)) {
1185 trace_xfs_dqflush_force(dqp);
1186 xfs_log_force(mp, 0);
1187 }
1188
1189 trace_xfs_dqflush_done(dqp);
1190 *bpp = bp;
1191 return 0;
1192
1193out_unlock:
1194 xfs_dqfunlock(dqp);
1195 return XFS_ERROR(EIO);
1196}
1197
1198
1199
1200
1201
1202
1203
1204void
1205xfs_dqlock2(
1206 xfs_dquot_t *d1,
1207 xfs_dquot_t *d2)
1208{
1209 if (d1 && d2) {
1210 ASSERT(d1 != d2);
1211 if (be32_to_cpu(d1->q_core.d_id) >
1212 be32_to_cpu(d2->q_core.d_id)) {
1213 mutex_lock(&d2->q_qlock);
1214 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1215 } else {
1216 mutex_lock(&d1->q_qlock);
1217 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1218 }
1219 } else if (d1) {
1220 mutex_lock(&d1->q_qlock);
1221 } else if (d2) {
1222 mutex_lock(&d2->q_qlock);
1223 }
1224}
1225
1226int __init
1227xfs_qm_init(void)
1228{
1229 xfs_qm_dqzone =
1230 kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1231 if (!xfs_qm_dqzone)
1232 goto out;
1233
1234 xfs_qm_dqtrxzone =
1235 kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1236 if (!xfs_qm_dqtrxzone)
1237 goto out_free_dqzone;
1238
1239 return 0;
1240
1241out_free_dqzone:
1242 kmem_zone_destroy(xfs_qm_dqzone);
1243out:
1244 return -ENOMEM;
1245}
1246
1247void
1248xfs_qm_exit(void)
1249{
1250 kmem_zone_destroy(xfs_qm_dqtrxzone);
1251 kmem_zone_destroy(xfs_qm_dqzone);
1252}
1253