1
2
3
4
5#include <linux/errno.h>
6#include <linux/fs.h>
7#include <linux/mount.h>
8#include <linux/dqblk_v2.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/quotaops.h>
14
15#include <asm/byteorder.h>
16
17#include "quota_tree.h"
18
19MODULE_AUTHOR("Jan Kara");
20MODULE_DESCRIPTION("Quota trie support");
21MODULE_LICENSE("GPL");
22
23#define __QUOTA_QT_PARANOIA
24
25static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
26{
27 unsigned int epb = info->dqi_usable_bs >> 2;
28 qid_t id = from_kqid(&init_user_ns, qid);
29
30 depth = info->dqi_qtree_depth - depth - 1;
31 while (depth--)
32 id /= epb;
33 return id % epb;
34}
35
36
37static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
38{
39 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
40 / info->dqi_entry_size;
41}
42
43static char *getdqbuf(size_t size)
44{
45 char *buf = kmalloc(size, GFP_NOFS);
46 if (!buf)
47 printk(KERN_WARNING
48 "VFS: Not enough memory for quota buffers.\n");
49 return buf;
50}
51
52static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
53{
54 struct super_block *sb = info->dqi_sb;
55
56 memset(buf, 0, info->dqi_usable_bs);
57 return sb->s_op->quota_read(sb, info->dqi_type, buf,
58 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
59}
60
61static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
62{
63 struct super_block *sb = info->dqi_sb;
64 ssize_t ret;
65
66 ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
67 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
68 if (ret != info->dqi_usable_bs) {
69 quota_error(sb, "dquota write failed");
70 if (ret >= 0)
71 ret = -EIO;
72 }
73 return ret;
74}
75
76
77static int get_free_dqblk(struct qtree_mem_dqinfo *info)
78{
79 char *buf = getdqbuf(info->dqi_usable_bs);
80 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
81 int ret, blk;
82
83 if (!buf)
84 return -ENOMEM;
85 if (info->dqi_free_blk) {
86 blk = info->dqi_free_blk;
87 ret = read_blk(info, blk, buf);
88 if (ret < 0)
89 goto out_buf;
90 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
91 }
92 else {
93 memset(buf, 0, info->dqi_usable_bs);
94
95 ret = write_blk(info, info->dqi_blocks, buf);
96 if (ret < 0)
97 goto out_buf;
98 blk = info->dqi_blocks++;
99 }
100 mark_info_dirty(info->dqi_sb, info->dqi_type);
101 ret = blk;
102out_buf:
103 kfree(buf);
104 return ret;
105}
106
107
108static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
109{
110 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
111 int err;
112
113 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
114 dh->dqdh_prev_free = cpu_to_le32(0);
115 dh->dqdh_entries = cpu_to_le16(0);
116 err = write_blk(info, blk, buf);
117 if (err < 0)
118 return err;
119 info->dqi_free_blk = blk;
120 mark_info_dirty(info->dqi_sb, info->dqi_type);
121 return 0;
122}
123
124
125static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
126 uint blk)
127{
128 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
129 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
130 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
131 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
132 int err;
133
134 if (!tmpbuf)
135 return -ENOMEM;
136 if (nextblk) {
137 err = read_blk(info, nextblk, tmpbuf);
138 if (err < 0)
139 goto out_buf;
140 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
141 dh->dqdh_prev_free;
142 err = write_blk(info, nextblk, tmpbuf);
143 if (err < 0)
144 goto out_buf;
145 }
146 if (prevblk) {
147 err = read_blk(info, prevblk, tmpbuf);
148 if (err < 0)
149 goto out_buf;
150 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
151 dh->dqdh_next_free;
152 err = write_blk(info, prevblk, tmpbuf);
153 if (err < 0)
154 goto out_buf;
155 } else {
156 info->dqi_free_entry = nextblk;
157 mark_info_dirty(info->dqi_sb, info->dqi_type);
158 }
159 kfree(tmpbuf);
160 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
161
162 if (write_blk(info, blk, buf) < 0)
163 quota_error(info->dqi_sb, "Can't write block (%u) "
164 "with free entries", blk);
165 return 0;
166out_buf:
167 kfree(tmpbuf);
168 return err;
169}
170
171
172static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
173 uint blk)
174{
175 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
176 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
177 int err;
178
179 if (!tmpbuf)
180 return -ENOMEM;
181 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
182 dh->dqdh_prev_free = cpu_to_le32(0);
183 err = write_blk(info, blk, buf);
184 if (err < 0)
185 goto out_buf;
186 if (info->dqi_free_entry) {
187 err = read_blk(info, info->dqi_free_entry, tmpbuf);
188 if (err < 0)
189 goto out_buf;
190 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
191 cpu_to_le32(blk);
192 err = write_blk(info, info->dqi_free_entry, tmpbuf);
193 if (err < 0)
194 goto out_buf;
195 }
196 kfree(tmpbuf);
197 info->dqi_free_entry = blk;
198 mark_info_dirty(info->dqi_sb, info->dqi_type);
199 return 0;
200out_buf:
201 kfree(tmpbuf);
202 return err;
203}
204
205
206int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
207{
208 int i;
209
210 for (i = 0; i < info->dqi_entry_size; i++)
211 if (disk[i])
212 return 0;
213 return 1;
214}
215EXPORT_SYMBOL(qtree_entry_unused);
216
217
218static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
219 struct dquot *dquot, int *err)
220{
221 uint blk, i;
222 struct qt_disk_dqdbheader *dh;
223 char *buf = getdqbuf(info->dqi_usable_bs);
224 char *ddquot;
225
226 *err = 0;
227 if (!buf) {
228 *err = -ENOMEM;
229 return 0;
230 }
231 dh = (struct qt_disk_dqdbheader *)buf;
232 if (info->dqi_free_entry) {
233 blk = info->dqi_free_entry;
234 *err = read_blk(info, blk, buf);
235 if (*err < 0)
236 goto out_buf;
237 } else {
238 blk = get_free_dqblk(info);
239 if ((int)blk < 0) {
240 *err = blk;
241 kfree(buf);
242 return 0;
243 }
244 memset(buf, 0, info->dqi_usable_bs);
245
246
247 info->dqi_free_entry = blk;
248 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
249 }
250
251 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
252 *err = remove_free_dqentry(info, buf, blk);
253 if (*err < 0) {
254 quota_error(dquot->dq_sb, "Can't remove block (%u) "
255 "from entry free list", blk);
256 goto out_buf;
257 }
258 }
259 le16_add_cpu(&dh->dqdh_entries, 1);
260
261 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
262 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
263 if (qtree_entry_unused(info, ddquot))
264 break;
265 ddquot += info->dqi_entry_size;
266 }
267#ifdef __QUOTA_QT_PARANOIA
268 if (i == qtree_dqstr_in_blk(info)) {
269 quota_error(dquot->dq_sb, "Data block full but it shouldn't");
270 *err = -EIO;
271 goto out_buf;
272 }
273#endif
274 *err = write_blk(info, blk, buf);
275 if (*err < 0) {
276 quota_error(dquot->dq_sb, "Can't write quota data block %u",
277 blk);
278 goto out_buf;
279 }
280 dquot->dq_off = (blk << info->dqi_blocksize_bits) +
281 sizeof(struct qt_disk_dqdbheader) +
282 i * info->dqi_entry_size;
283 kfree(buf);
284 return blk;
285out_buf:
286 kfree(buf);
287 return 0;
288}
289
290
291static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
292 uint *treeblk, int depth)
293{
294 char *buf = getdqbuf(info->dqi_usable_bs);
295 int ret = 0, newson = 0, newact = 0;
296 __le32 *ref;
297 uint newblk;
298
299 if (!buf)
300 return -ENOMEM;
301 if (!*treeblk) {
302 ret = get_free_dqblk(info);
303 if (ret < 0)
304 goto out_buf;
305 *treeblk = ret;
306 memset(buf, 0, info->dqi_usable_bs);
307 newact = 1;
308 } else {
309 ret = read_blk(info, *treeblk, buf);
310 if (ret < 0) {
311 quota_error(dquot->dq_sb, "Can't read tree quota "
312 "block %u", *treeblk);
313 goto out_buf;
314 }
315 }
316 ref = (__le32 *)buf;
317 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
318 if (!newblk)
319 newson = 1;
320 if (depth == info->dqi_qtree_depth - 1) {
321#ifdef __QUOTA_QT_PARANOIA
322 if (newblk) {
323 quota_error(dquot->dq_sb, "Inserting already present "
324 "quota entry (block %u)",
325 le32_to_cpu(ref[get_index(info,
326 dquot->dq_id, depth)]));
327 ret = -EIO;
328 goto out_buf;
329 }
330#endif
331 newblk = find_free_dqentry(info, dquot, &ret);
332 } else {
333 ret = do_insert_tree(info, dquot, &newblk, depth+1);
334 }
335 if (newson && ret >= 0) {
336 ref[get_index(info, dquot->dq_id, depth)] =
337 cpu_to_le32(newblk);
338 ret = write_blk(info, *treeblk, buf);
339 } else if (newact && ret < 0) {
340 put_free_dqblk(info, buf, *treeblk);
341 }
342out_buf:
343 kfree(buf);
344 return ret;
345}
346
347
348static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
349 struct dquot *dquot)
350{
351 int tmp = QT_TREEOFF;
352
353#ifdef __QUOTA_QT_PARANOIA
354 if (info->dqi_blocks <= QT_TREEOFF) {
355 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
356 return -EIO;
357 }
358#endif
359 return do_insert_tree(info, dquot, &tmp, 0);
360}
361
362
363
364
365
366int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
367{
368 int type = dquot->dq_id.type;
369 struct super_block *sb = dquot->dq_sb;
370 ssize_t ret;
371 char *ddquot = getdqbuf(info->dqi_entry_size);
372
373 if (!ddquot)
374 return -ENOMEM;
375
376
377 if (!dquot->dq_off) {
378 ret = dq_insert_tree(info, dquot);
379 if (ret < 0) {
380 quota_error(sb, "Error %zd occurred while creating "
381 "quota", ret);
382 kfree(ddquot);
383 return ret;
384 }
385 }
386 spin_lock(&dq_data_lock);
387 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
388 spin_unlock(&dq_data_lock);
389 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
390 dquot->dq_off);
391 if (ret != info->dqi_entry_size) {
392 quota_error(sb, "dquota write failed");
393 if (ret >= 0)
394 ret = -ENOSPC;
395 } else {
396 ret = 0;
397 }
398 dqstats_inc(DQST_WRITES);
399 kfree(ddquot);
400
401 return ret;
402}
403EXPORT_SYMBOL(qtree_write_dquot);
404
405
406static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
407 uint blk)
408{
409 struct qt_disk_dqdbheader *dh;
410 char *buf = getdqbuf(info->dqi_usable_bs);
411 int ret = 0;
412
413 if (!buf)
414 return -ENOMEM;
415 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
416 quota_error(dquot->dq_sb, "Quota structure has offset to "
417 "other block (%u) than it should (%u)", blk,
418 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
419 goto out_buf;
420 }
421 ret = read_blk(info, blk, buf);
422 if (ret < 0) {
423 quota_error(dquot->dq_sb, "Can't read quota data block %u",
424 blk);
425 goto out_buf;
426 }
427 dh = (struct qt_disk_dqdbheader *)buf;
428 le16_add_cpu(&dh->dqdh_entries, -1);
429 if (!le16_to_cpu(dh->dqdh_entries)) {
430 ret = remove_free_dqentry(info, buf, blk);
431 if (ret >= 0)
432 ret = put_free_dqblk(info, buf, blk);
433 if (ret < 0) {
434 quota_error(dquot->dq_sb, "Can't move quota data block "
435 "(%u) to free list", blk);
436 goto out_buf;
437 }
438 } else {
439 memset(buf +
440 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
441 0, info->dqi_entry_size);
442 if (le16_to_cpu(dh->dqdh_entries) ==
443 qtree_dqstr_in_blk(info) - 1) {
444
445 ret = insert_free_dqentry(info, buf, blk);
446 if (ret < 0) {
447 quota_error(dquot->dq_sb, "Can't insert quota "
448 "data block (%u) to free entry list", blk);
449 goto out_buf;
450 }
451 } else {
452 ret = write_blk(info, blk, buf);
453 if (ret < 0) {
454 quota_error(dquot->dq_sb, "Can't write quota "
455 "data block %u", blk);
456 goto out_buf;
457 }
458 }
459 }
460 dquot->dq_off = 0;
461out_buf:
462 kfree(buf);
463 return ret;
464}
465
466
467static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
468 uint *blk, int depth)
469{
470 char *buf = getdqbuf(info->dqi_usable_bs);
471 int ret = 0;
472 uint newblk;
473 __le32 *ref = (__le32 *)buf;
474
475 if (!buf)
476 return -ENOMEM;
477 ret = read_blk(info, *blk, buf);
478 if (ret < 0) {
479 quota_error(dquot->dq_sb, "Can't read quota data block %u",
480 *blk);
481 goto out_buf;
482 }
483 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
484 if (depth == info->dqi_qtree_depth - 1) {
485 ret = free_dqentry(info, dquot, newblk);
486 newblk = 0;
487 } else {
488 ret = remove_tree(info, dquot, &newblk, depth+1);
489 }
490 if (ret >= 0 && !newblk) {
491 int i;
492 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
493
494 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
495 ;
496
497 if (i == (info->dqi_usable_bs >> 2)
498 && *blk != QT_TREEOFF) {
499 put_free_dqblk(info, buf, *blk);
500 *blk = 0;
501 } else {
502 ret = write_blk(info, *blk, buf);
503 if (ret < 0)
504 quota_error(dquot->dq_sb,
505 "Can't write quota tree block %u",
506 *blk);
507 }
508 }
509out_buf:
510 kfree(buf);
511 return ret;
512}
513
514
515int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
516{
517 uint tmp = QT_TREEOFF;
518
519 if (!dquot->dq_off)
520 return 0;
521 return remove_tree(info, dquot, &tmp, 0);
522}
523EXPORT_SYMBOL(qtree_delete_dquot);
524
525
526static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
527 struct dquot *dquot, uint blk)
528{
529 char *buf = getdqbuf(info->dqi_usable_bs);
530 loff_t ret = 0;
531 int i;
532 char *ddquot;
533
534 if (!buf)
535 return -ENOMEM;
536 ret = read_blk(info, blk, buf);
537 if (ret < 0) {
538 quota_error(dquot->dq_sb, "Can't read quota tree "
539 "block %u", blk);
540 goto out_buf;
541 }
542 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
543 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
544 if (info->dqi_ops->is_id(ddquot, dquot))
545 break;
546 ddquot += info->dqi_entry_size;
547 }
548 if (i == qtree_dqstr_in_blk(info)) {
549 quota_error(dquot->dq_sb,
550 "Quota for id %u referenced but not present",
551 from_kqid(&init_user_ns, dquot->dq_id));
552 ret = -EIO;
553 goto out_buf;
554 } else {
555 ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
556 qt_disk_dqdbheader) + i * info->dqi_entry_size;
557 }
558out_buf:
559 kfree(buf);
560 return ret;
561}
562
563
564static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
565 struct dquot *dquot, uint blk, int depth)
566{
567 char *buf = getdqbuf(info->dqi_usable_bs);
568 loff_t ret = 0;
569 __le32 *ref = (__le32 *)buf;
570
571 if (!buf)
572 return -ENOMEM;
573 ret = read_blk(info, blk, buf);
574 if (ret < 0) {
575 quota_error(dquot->dq_sb, "Can't read quota tree block %u",
576 blk);
577 goto out_buf;
578 }
579 ret = 0;
580 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
581 if (!blk)
582 goto out_buf;
583 if (depth < info->dqi_qtree_depth - 1)
584 ret = find_tree_dqentry(info, dquot, blk, depth+1);
585 else
586 ret = find_block_dqentry(info, dquot, blk);
587out_buf:
588 kfree(buf);
589 return ret;
590}
591
592
593static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
594 struct dquot *dquot)
595{
596 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
597}
598
599int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
600{
601 int type = dquot->dq_id.type;
602 struct super_block *sb = dquot->dq_sb;
603 loff_t offset;
604 char *ddquot;
605 int ret = 0;
606
607#ifdef __QUOTA_QT_PARANOIA
608
609 if (!sb_dqopt(dquot->dq_sb)->files[type]) {
610 quota_error(sb, "Quota invalidated while reading!");
611 return -EIO;
612 }
613#endif
614
615 if (!dquot->dq_off) {
616 offset = find_dqentry(info, dquot);
617 if (offset <= 0) {
618 if (offset < 0)
619 quota_error(sb,"Can't read quota structure "
620 "for id %u",
621 from_kqid(&init_user_ns,
622 dquot->dq_id));
623 dquot->dq_off = 0;
624 set_bit(DQ_FAKE_B, &dquot->dq_flags);
625 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
626 ret = offset;
627 goto out;
628 }
629 dquot->dq_off = offset;
630 }
631 ddquot = getdqbuf(info->dqi_entry_size);
632 if (!ddquot)
633 return -ENOMEM;
634 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
635 dquot->dq_off);
636 if (ret != info->dqi_entry_size) {
637 if (ret >= 0)
638 ret = -EIO;
639 quota_error(sb, "Error while reading quota structure for id %u",
640 from_kqid(&init_user_ns, dquot->dq_id));
641 set_bit(DQ_FAKE_B, &dquot->dq_flags);
642 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
643 kfree(ddquot);
644 goto out;
645 }
646 spin_lock(&dq_data_lock);
647 info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
648 if (!dquot->dq_dqb.dqb_bhardlimit &&
649 !dquot->dq_dqb.dqb_bsoftlimit &&
650 !dquot->dq_dqb.dqb_ihardlimit &&
651 !dquot->dq_dqb.dqb_isoftlimit)
652 set_bit(DQ_FAKE_B, &dquot->dq_flags);
653 spin_unlock(&dq_data_lock);
654 kfree(ddquot);
655out:
656 dqstats_inc(DQST_READS);
657 return ret;
658}
659EXPORT_SYMBOL(qtree_read_dquot);
660
661
662
663int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
664{
665 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
666 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
667 return qtree_delete_dquot(info, dquot);
668 return 0;
669}
670EXPORT_SYMBOL(qtree_release_dquot);
671