1
2
3
4
5
6#include <linux/errno.h>
7#include <linux/fs.h>
8#include <linux/mount.h>
9#include <linux/dqblk_v2.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/quotaops.h>
15
16#include <asm/byteorder.h>
17
18#include "quota_tree.h"
19
20MODULE_AUTHOR("Jan Kara");
21MODULE_DESCRIPTION("Quota trie support");
22MODULE_LICENSE("GPL");
23
24#define __QUOTA_QT_PARANOIA
25
26static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
27{
28 unsigned int epb = info->dqi_usable_bs >> 2;
29
30 depth = info->dqi_qtree_depth - depth - 1;
31 while (depth--)
32 id /= epb;
33 return id % epb;
34}
35
36static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
37{
38 qid_t id = from_kqid(&init_user_ns, qid);
39
40 return __get_index(info, id, depth);
41}
42
43
44static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
45{
46 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
47 / info->dqi_entry_size;
48}
49
50static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
51{
52 struct super_block *sb = info->dqi_sb;
53
54 memset(buf, 0, info->dqi_usable_bs);
55 return sb->s_op->quota_read(sb, info->dqi_type, buf,
56 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
57}
58
59static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
60{
61 struct super_block *sb = info->dqi_sb;
62 ssize_t ret;
63
64 ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
65 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
66 if (ret != info->dqi_usable_bs) {
67 quota_error(sb, "dquota write failed");
68 if (ret >= 0)
69 ret = -EIO;
70 }
71 return ret;
72}
73
74
75static int get_free_dqblk(struct qtree_mem_dqinfo *info)
76{
77 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
78 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
79 int ret, blk;
80
81 if (!buf)
82 return -ENOMEM;
83 if (info->dqi_free_blk) {
84 blk = info->dqi_free_blk;
85 ret = read_blk(info, blk, buf);
86 if (ret < 0)
87 goto out_buf;
88 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
89 }
90 else {
91 memset(buf, 0, info->dqi_usable_bs);
92
93 ret = write_blk(info, info->dqi_blocks, buf);
94 if (ret < 0)
95 goto out_buf;
96 blk = info->dqi_blocks++;
97 }
98 mark_info_dirty(info->dqi_sb, info->dqi_type);
99 ret = blk;
100out_buf:
101 kfree(buf);
102 return ret;
103}
104
105
106static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
107{
108 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
109 int err;
110
111 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
112 dh->dqdh_prev_free = cpu_to_le32(0);
113 dh->dqdh_entries = cpu_to_le16(0);
114 err = write_blk(info, blk, buf);
115 if (err < 0)
116 return err;
117 info->dqi_free_blk = blk;
118 mark_info_dirty(info->dqi_sb, info->dqi_type);
119 return 0;
120}
121
122
123static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
124 uint blk)
125{
126 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
127 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
128 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
129 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
130 int err;
131
132 if (!tmpbuf)
133 return -ENOMEM;
134 if (nextblk) {
135 err = read_blk(info, nextblk, tmpbuf);
136 if (err < 0)
137 goto out_buf;
138 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
139 dh->dqdh_prev_free;
140 err = write_blk(info, nextblk, tmpbuf);
141 if (err < 0)
142 goto out_buf;
143 }
144 if (prevblk) {
145 err = read_blk(info, prevblk, tmpbuf);
146 if (err < 0)
147 goto out_buf;
148 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
149 dh->dqdh_next_free;
150 err = write_blk(info, prevblk, tmpbuf);
151 if (err < 0)
152 goto out_buf;
153 } else {
154 info->dqi_free_entry = nextblk;
155 mark_info_dirty(info->dqi_sb, info->dqi_type);
156 }
157 kfree(tmpbuf);
158 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
159
160 if (write_blk(info, blk, buf) < 0)
161 quota_error(info->dqi_sb, "Can't write block (%u) "
162 "with free entries", blk);
163 return 0;
164out_buf:
165 kfree(tmpbuf);
166 return err;
167}
168
169
170static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
171 uint blk)
172{
173 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
174 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
175 int err;
176
177 if (!tmpbuf)
178 return -ENOMEM;
179 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
180 dh->dqdh_prev_free = cpu_to_le32(0);
181 err = write_blk(info, blk, buf);
182 if (err < 0)
183 goto out_buf;
184 if (info->dqi_free_entry) {
185 err = read_blk(info, info->dqi_free_entry, tmpbuf);
186 if (err < 0)
187 goto out_buf;
188 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
189 cpu_to_le32(blk);
190 err = write_blk(info, info->dqi_free_entry, tmpbuf);
191 if (err < 0)
192 goto out_buf;
193 }
194 kfree(tmpbuf);
195 info->dqi_free_entry = blk;
196 mark_info_dirty(info->dqi_sb, info->dqi_type);
197 return 0;
198out_buf:
199 kfree(tmpbuf);
200 return err;
201}
202
203
204int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
205{
206 int i;
207
208 for (i = 0; i < info->dqi_entry_size; i++)
209 if (disk[i])
210 return 0;
211 return 1;
212}
213EXPORT_SYMBOL(qtree_entry_unused);
214
215
216static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
217 struct dquot *dquot, int *err)
218{
219 uint blk, i;
220 struct qt_disk_dqdbheader *dh;
221 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
222 char *ddquot;
223
224 *err = 0;
225 if (!buf) {
226 *err = -ENOMEM;
227 return 0;
228 }
229 dh = (struct qt_disk_dqdbheader *)buf;
230 if (info->dqi_free_entry) {
231 blk = info->dqi_free_entry;
232 *err = read_blk(info, blk, buf);
233 if (*err < 0)
234 goto out_buf;
235 } else {
236 blk = get_free_dqblk(info);
237 if ((int)blk < 0) {
238 *err = blk;
239 kfree(buf);
240 return 0;
241 }
242 memset(buf, 0, info->dqi_usable_bs);
243
244
245 info->dqi_free_entry = blk;
246 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
247 }
248
249 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
250 *err = remove_free_dqentry(info, buf, blk);
251 if (*err < 0) {
252 quota_error(dquot->dq_sb, "Can't remove block (%u) "
253 "from entry free list", blk);
254 goto out_buf;
255 }
256 }
257 le16_add_cpu(&dh->dqdh_entries, 1);
258
259 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
260 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
261 if (qtree_entry_unused(info, ddquot))
262 break;
263 ddquot += info->dqi_entry_size;
264 }
265#ifdef __QUOTA_QT_PARANOIA
266 if (i == qtree_dqstr_in_blk(info)) {
267 quota_error(dquot->dq_sb, "Data block full but it shouldn't");
268 *err = -EIO;
269 goto out_buf;
270 }
271#endif
272 *err = write_blk(info, blk, buf);
273 if (*err < 0) {
274 quota_error(dquot->dq_sb, "Can't write quota data block %u",
275 blk);
276 goto out_buf;
277 }
278 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
279 sizeof(struct qt_disk_dqdbheader) +
280 i * info->dqi_entry_size;
281 kfree(buf);
282 return blk;
283out_buf:
284 kfree(buf);
285 return 0;
286}
287
288
289static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
290 uint *treeblk, int depth)
291{
292 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
293 int ret = 0, newson = 0, newact = 0;
294 __le32 *ref;
295 uint newblk;
296
297 if (!buf)
298 return -ENOMEM;
299 if (!*treeblk) {
300 ret = get_free_dqblk(info);
301 if (ret < 0)
302 goto out_buf;
303 *treeblk = ret;
304 memset(buf, 0, info->dqi_usable_bs);
305 newact = 1;
306 } else {
307 ret = read_blk(info, *treeblk, buf);
308 if (ret < 0) {
309 quota_error(dquot->dq_sb, "Can't read tree quota "
310 "block %u", *treeblk);
311 goto out_buf;
312 }
313 }
314 ref = (__le32 *)buf;
315 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
316 if (!newblk)
317 newson = 1;
318 if (depth == info->dqi_qtree_depth - 1) {
319#ifdef __QUOTA_QT_PARANOIA
320 if (newblk) {
321 quota_error(dquot->dq_sb, "Inserting already present "
322 "quota entry (block %u)",
323 le32_to_cpu(ref[get_index(info,
324 dquot->dq_id, depth)]));
325 ret = -EIO;
326 goto out_buf;
327 }
328#endif
329 newblk = find_free_dqentry(info, dquot, &ret);
330 } else {
331 ret = do_insert_tree(info, dquot, &newblk, depth+1);
332 }
333 if (newson && ret >= 0) {
334 ref[get_index(info, dquot->dq_id, depth)] =
335 cpu_to_le32(newblk);
336 ret = write_blk(info, *treeblk, buf);
337 } else if (newact && ret < 0) {
338 put_free_dqblk(info, buf, *treeblk);
339 }
340out_buf:
341 kfree(buf);
342 return ret;
343}
344
345
346static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
347 struct dquot *dquot)
348{
349 int tmp = QT_TREEOFF;
350
351#ifdef __QUOTA_QT_PARANOIA
352 if (info->dqi_blocks <= QT_TREEOFF) {
353 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
354 return -EIO;
355 }
356#endif
357 return do_insert_tree(info, dquot, &tmp, 0);
358}
359
360
361
362
363
364int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
365{
366 int type = dquot->dq_id.type;
367 struct super_block *sb = dquot->dq_sb;
368 ssize_t ret;
369 char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
370
371 if (!ddquot)
372 return -ENOMEM;
373
374
375 if (!dquot->dq_off) {
376 ret = dq_insert_tree(info, dquot);
377 if (ret < 0) {
378 quota_error(sb, "Error %zd occurred while creating "
379 "quota", ret);
380 kfree(ddquot);
381 return ret;
382 }
383 }
384 spin_lock(&dquot->dq_dqb_lock);
385 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
386 spin_unlock(&dquot->dq_dqb_lock);
387 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
388 dquot->dq_off);
389 if (ret != info->dqi_entry_size) {
390 quota_error(sb, "dquota write failed");
391 if (ret >= 0)
392 ret = -ENOSPC;
393 } else {
394 ret = 0;
395 }
396 dqstats_inc(DQST_WRITES);
397 kfree(ddquot);
398
399 return ret;
400}
401EXPORT_SYMBOL(qtree_write_dquot);
402
403
404static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
405 uint blk)
406{
407 struct qt_disk_dqdbheader *dh;
408 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
409 int ret = 0;
410
411 if (!buf)
412 return -ENOMEM;
413 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
414 quota_error(dquot->dq_sb, "Quota structure has offset to "
415 "other block (%u) than it should (%u)", blk,
416 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
417 goto out_buf;
418 }
419 ret = read_blk(info, blk, buf);
420 if (ret < 0) {
421 quota_error(dquot->dq_sb, "Can't read quota data block %u",
422 blk);
423 goto out_buf;
424 }
425 dh = (struct qt_disk_dqdbheader *)buf;
426 le16_add_cpu(&dh->dqdh_entries, -1);
427 if (!le16_to_cpu(dh->dqdh_entries)) {
428 ret = remove_free_dqentry(info, buf, blk);
429 if (ret >= 0)
430 ret = put_free_dqblk(info, buf, blk);
431 if (ret < 0) {
432 quota_error(dquot->dq_sb, "Can't move quota data block "
433 "(%u) to free list", blk);
434 goto out_buf;
435 }
436 } else {
437 memset(buf +
438 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
439 0, info->dqi_entry_size);
440 if (le16_to_cpu(dh->dqdh_entries) ==
441 qtree_dqstr_in_blk(info) - 1) {
442
443 ret = insert_free_dqentry(info, buf, blk);
444 if (ret < 0) {
445 quota_error(dquot->dq_sb, "Can't insert quota "
446 "data block (%u) to free entry list", blk);
447 goto out_buf;
448 }
449 } else {
450 ret = write_blk(info, blk, buf);
451 if (ret < 0) {
452 quota_error(dquot->dq_sb, "Can't write quota "
453 "data block %u", blk);
454 goto out_buf;
455 }
456 }
457 }
458 dquot->dq_off = 0;
459out_buf:
460 kfree(buf);
461 return ret;
462}
463
464
465static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
466 uint *blk, int depth)
467{
468 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
469 int ret = 0;
470 uint newblk;
471 __le32 *ref = (__le32 *)buf;
472
473 if (!buf)
474 return -ENOMEM;
475 ret = read_blk(info, *blk, buf);
476 if (ret < 0) {
477 quota_error(dquot->dq_sb, "Can't read quota data block %u",
478 *blk);
479 goto out_buf;
480 }
481 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
482 if (depth == info->dqi_qtree_depth - 1) {
483 ret = free_dqentry(info, dquot, newblk);
484 newblk = 0;
485 } else {
486 ret = remove_tree(info, dquot, &newblk, depth+1);
487 }
488 if (ret >= 0 && !newblk) {
489 int i;
490 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
491
492 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
493 ;
494
495 if (i == (info->dqi_usable_bs >> 2)
496 && *blk != QT_TREEOFF) {
497 put_free_dqblk(info, buf, *blk);
498 *blk = 0;
499 } else {
500 ret = write_blk(info, *blk, buf);
501 if (ret < 0)
502 quota_error(dquot->dq_sb,
503 "Can't write quota tree block %u",
504 *blk);
505 }
506 }
507out_buf:
508 kfree(buf);
509 return ret;
510}
511
512
513int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
514{
515 uint tmp = QT_TREEOFF;
516
517 if (!dquot->dq_off)
518 return 0;
519 return remove_tree(info, dquot, &tmp, 0);
520}
521EXPORT_SYMBOL(qtree_delete_dquot);
522
523
524static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
525 struct dquot *dquot, uint blk)
526{
527 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
528 loff_t ret = 0;
529 int i;
530 char *ddquot;
531
532 if (!buf)
533 return -ENOMEM;
534 ret = read_blk(info, blk, buf);
535 if (ret < 0) {
536 quota_error(dquot->dq_sb, "Can't read quota tree "
537 "block %u", blk);
538 goto out_buf;
539 }
540 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
541 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
542 if (info->dqi_ops->is_id(ddquot, dquot))
543 break;
544 ddquot += info->dqi_entry_size;
545 }
546 if (i == qtree_dqstr_in_blk(info)) {
547 quota_error(dquot->dq_sb,
548 "Quota for id %u referenced but not present",
549 from_kqid(&init_user_ns, dquot->dq_id));
550 ret = -EIO;
551 goto out_buf;
552 } else {
553 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
554 qt_disk_dqdbheader) + i * info->dqi_entry_size;
555 }
556out_buf:
557 kfree(buf);
558 return ret;
559}
560
561
562static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
563 struct dquot *dquot, uint blk, int depth)
564{
565 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
566 loff_t ret = 0;
567 __le32 *ref = (__le32 *)buf;
568
569 if (!buf)
570 return -ENOMEM;
571 ret = read_blk(info, blk, buf);
572 if (ret < 0) {
573 quota_error(dquot->dq_sb, "Can't read quota tree block %u",
574 blk);
575 goto out_buf;
576 }
577 ret = 0;
578 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
579 if (!blk)
580 goto out_buf;
581 if (depth < info->dqi_qtree_depth - 1)
582 ret = find_tree_dqentry(info, dquot, blk, depth+1);
583 else
584 ret = find_block_dqentry(info, dquot, blk);
585out_buf:
586 kfree(buf);
587 return ret;
588}
589
590
591static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
592 struct dquot *dquot)
593{
594 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
595}
596
597int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
598{
599 int type = dquot->dq_id.type;
600 struct super_block *sb = dquot->dq_sb;
601 loff_t offset;
602 char *ddquot;
603 int ret = 0;
604
605#ifdef __QUOTA_QT_PARANOIA
606
607 if (!sb_dqopt(dquot->dq_sb)->files[type]) {
608 quota_error(sb, "Quota invalidated while reading!");
609 return -EIO;
610 }
611#endif
612
613 if (!dquot->dq_off) {
614 offset = find_dqentry(info, dquot);
615 if (offset <= 0) {
616 if (offset < 0)
617 quota_error(sb,"Can't read quota structure "
618 "for id %u",
619 from_kqid(&init_user_ns,
620 dquot->dq_id));
621 dquot->dq_off = 0;
622 set_bit(DQ_FAKE_B, &dquot->dq_flags);
623 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
624 ret = offset;
625 goto out;
626 }
627 dquot->dq_off = offset;
628 }
629 ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
630 if (!ddquot)
631 return -ENOMEM;
632 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
633 dquot->dq_off);
634 if (ret != info->dqi_entry_size) {
635 if (ret >= 0)
636 ret = -EIO;
637 quota_error(sb, "Error while reading quota structure for id %u",
638 from_kqid(&init_user_ns, dquot->dq_id));
639 set_bit(DQ_FAKE_B, &dquot->dq_flags);
640 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
641 kfree(ddquot);
642 goto out;
643 }
644 spin_lock(&dquot->dq_dqb_lock);
645 info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
646 if (!dquot->dq_dqb.dqb_bhardlimit &&
647 !dquot->dq_dqb.dqb_bsoftlimit &&
648 !dquot->dq_dqb.dqb_ihardlimit &&
649 !dquot->dq_dqb.dqb_isoftlimit)
650 set_bit(DQ_FAKE_B, &dquot->dq_flags);
651 spin_unlock(&dquot->dq_dqb_lock);
652 kfree(ddquot);
653out:
654 dqstats_inc(DQST_READS);
655 return ret;
656}
657EXPORT_SYMBOL(qtree_read_dquot);
658
659
660
661int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
662{
663 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
664 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
665 return qtree_delete_dquot(info, dquot);
666 return 0;
667}
668EXPORT_SYMBOL(qtree_release_dquot);
669
670static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
671 unsigned int blk, int depth)
672{
673 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
674 __le32 *ref = (__le32 *)buf;
675 ssize_t ret;
676 unsigned int epb = info->dqi_usable_bs >> 2;
677 unsigned int level_inc = 1;
678 int i;
679
680 if (!buf)
681 return -ENOMEM;
682
683 for (i = depth; i < info->dqi_qtree_depth - 1; i++)
684 level_inc *= epb;
685
686 ret = read_blk(info, blk, buf);
687 if (ret < 0) {
688 quota_error(info->dqi_sb,
689 "Can't read quota tree block %u", blk);
690 goto out_buf;
691 }
692 for (i = __get_index(info, *id, depth); i < epb; i++) {
693 if (ref[i] == cpu_to_le32(0)) {
694 *id += level_inc;
695 continue;
696 }
697 if (depth == info->dqi_qtree_depth - 1) {
698 ret = 0;
699 goto out_buf;
700 }
701 ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
702 if (ret != -ENOENT)
703 break;
704 }
705 if (i == epb) {
706 ret = -ENOENT;
707 goto out_buf;
708 }
709out_buf:
710 kfree(buf);
711 return ret;
712}
713
714int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
715{
716 qid_t id = from_kqid(&init_user_ns, *qid);
717 int ret;
718
719 ret = find_next_id(info, &id, QT_TREEOFF, 0);
720 if (ret < 0)
721 return ret;
722 *qid = make_kqid(&init_user_ns, qid->type, id);
723 return 0;
724}
725EXPORT_SYMBOL(qtree_get_next_id);
726