1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/fs.h>
25#include <linux/buffer_head.h>
26#include <linux/blkdev.h>
27#include <linux/vmalloc.h>
28
29#include "attrib.h"
30#include "inode.h"
31#include "debug.h"
32#include "ntfs.h"
33
34
35
36
37typedef enum {
38
39 NTFS_SYMBOL_TOKEN = 0,
40 NTFS_PHRASE_TOKEN = 1,
41 NTFS_TOKEN_MASK = 1,
42
43
44 NTFS_SB_SIZE_MASK = 0x0fff,
45 NTFS_SB_SIZE = 0x1000,
46 NTFS_SB_IS_COMPRESSED = 0x8000,
47
48
49
50
51
52
53
54 NTFS_MAX_CB_SIZE = 64 * 1024,
55} ntfs_compression_constants;
56
57
58
59
60static u8 *ntfs_compression_buffer = NULL;
61
62
63
64
65static DEFINE_SPINLOCK(ntfs_cb_lock);
66
67
68
69
70
71
72
73
74int allocate_compression_buffers(void)
75{
76 BUG_ON(ntfs_compression_buffer);
77
78 ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
79 if (!ntfs_compression_buffer)
80 return -ENOMEM;
81 return 0;
82}
83
84
85
86
87
88
89void free_compression_buffers(void)
90{
91 BUG_ON(!ntfs_compression_buffer);
92 vfree(ntfs_compression_buffer);
93 ntfs_compression_buffer = NULL;
94}
95
96
97
98
99static void zero_partial_compressed_page(struct page *page,
100 const s64 initialized_size)
101{
102 u8 *kp = page_address(page);
103 unsigned int kp_ofs;
104
105 ntfs_debug("Zeroing page region outside initialized size.");
106 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
107
108
109
110
111 clear_page(kp);
112 return;
113 }
114 kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
115 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
116 return;
117}
118
119
120
121
122static inline void handle_bounds_compressed_page(struct page *page,
123 const loff_t i_size, const s64 initialized_size)
124{
125 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
126 (initialized_size < i_size))
127 zero_partial_compressed_page(page, initialized_size);
128 return;
129}
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
169 int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
170 const int xpage, char *xpage_done, u8 *const cb_start,
171 const u32 cb_size, const loff_t i_size,
172 const s64 initialized_size)
173{
174
175
176
177
178 u8 *cb_end = cb_start + cb_size;
179 u8 *cb = cb_start;
180 u8 *cb_sb_start = cb;
181 u8 *cb_sb_end;
182
183
184 struct page *dp;
185 u8 *dp_addr;
186 u8 *dp_sb_start;
187 u8 *dp_sb_end;
188
189 u16 do_sb_start;
190 u16 do_sb_end;
191
192
193
194 u8 tag;
195 int token;
196
197
198 int completed_pages[dest_max_index - *dest_index + 1];
199 int nr_completed_pages = 0;
200
201
202 int err = -EOVERFLOW;
203
204 ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
205do_next_sb:
206 ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
207 cb - cb_start);
208
209
210
211
212
213
214 if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
215 (*dest_index == dest_max_index &&
216 *dest_ofs == dest_max_ofs)) {
217 int i;
218
219 ntfs_debug("Completed. Returning success (0).");
220 err = 0;
221return_error:
222
223 spin_unlock(&ntfs_cb_lock);
224
225 if (nr_completed_pages > 0) {
226 for (i = 0; i < nr_completed_pages; i++) {
227 int di = completed_pages[i];
228
229 dp = dest_pages[di];
230
231
232
233
234 handle_bounds_compressed_page(dp, i_size,
235 initialized_size);
236 flush_dcache_page(dp);
237 kunmap(dp);
238 SetPageUptodate(dp);
239 unlock_page(dp);
240 if (di == xpage)
241 *xpage_done = 1;
242 else
243 page_cache_release(dp);
244 dest_pages[di] = NULL;
245 }
246 }
247 return err;
248 }
249
250
251 do_sb_start = *dest_ofs;
252 do_sb_end = do_sb_start + NTFS_SB_SIZE;
253
254
255 if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
256 goto return_overflow;
257
258
259 if (cb + 6 > cb_end)
260 goto return_overflow;
261
262
263 cb_sb_start = cb;
264 cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
265 + 3;
266 if (cb_sb_end > cb_end)
267 goto return_overflow;
268
269
270 dp = dest_pages[*dest_index];
271 if (!dp) {
272
273 cb = cb_sb_end;
274
275
276 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
277 if (!*dest_ofs && (++*dest_index > dest_max_index))
278 goto return_overflow;
279 goto do_next_sb;
280 }
281
282
283 dp_addr = (u8*)page_address(dp) + do_sb_start;
284
285
286 if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
287 ntfs_debug("Found uncompressed sub-block.");
288
289
290
291 cb += 2;
292
293
294 if (cb_sb_end - cb != NTFS_SB_SIZE)
295 goto return_overflow;
296
297
298 memcpy(dp_addr, cb, NTFS_SB_SIZE);
299 cb += NTFS_SB_SIZE;
300
301
302 *dest_ofs += NTFS_SB_SIZE;
303 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
304finalize_page:
305
306
307
308
309 completed_pages[nr_completed_pages++] = *dest_index;
310 if (++*dest_index > dest_max_index)
311 goto return_overflow;
312 }
313 goto do_next_sb;
314 }
315 ntfs_debug("Found compressed sub-block.");
316
317
318
319 dp_sb_start = dp_addr;
320 dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
321
322
323 cb += 2;
324do_next_tag:
325 if (cb == cb_sb_end) {
326
327 if (dp_addr < dp_sb_end) {
328 int nr_bytes = do_sb_end - *dest_ofs;
329
330 ntfs_debug("Filling incomplete sub-block with "
331 "zeroes.");
332
333 memset(dp_addr, 0, nr_bytes);
334 *dest_ofs += nr_bytes;
335 }
336
337 if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
338 goto finalize_page;
339 goto do_next_sb;
340 }
341
342
343 if (cb > cb_sb_end || dp_addr > dp_sb_end)
344 goto return_overflow;
345
346
347 tag = *cb++;
348
349
350 for (token = 0; token < 8; token++, tag >>= 1) {
351 u16 lg, pt, length, max_non_overlap;
352 register u16 i;
353 u8 *dp_back_addr;
354
355
356 if (cb >= cb_sb_end || dp_addr > dp_sb_end)
357 break;
358
359
360 if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
361
362
363
364
365 *dp_addr++ = *cb++;
366 ++*dest_ofs;
367
368
369 continue;
370 }
371
372
373
374
375
376 if (dp_addr == dp_sb_start)
377 goto return_overflow;
378
379
380
381
382
383
384
385
386 lg = 0;
387 for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
388 lg++;
389
390
391 pt = le16_to_cpup((le16*)cb);
392
393
394
395
396
397
398 dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
399 if (dp_back_addr < dp_sb_start)
400 goto return_overflow;
401
402
403 length = (pt & (0xfff >> lg)) + 3;
404
405
406 *dest_ofs += length;
407 if (*dest_ofs > do_sb_end)
408 goto return_overflow;
409
410
411 max_non_overlap = dp_addr - dp_back_addr;
412
413 if (length <= max_non_overlap) {
414
415 memcpy(dp_addr, dp_back_addr, length);
416
417
418 dp_addr += length;
419 } else {
420
421
422
423
424
425
426 memcpy(dp_addr, dp_back_addr, max_non_overlap);
427 dp_addr += max_non_overlap;
428 dp_back_addr += max_non_overlap;
429 length -= max_non_overlap;
430 while (length--)
431 *dp_addr++ = *dp_back_addr++;
432 }
433
434
435 cb += 2;
436 }
437
438
439 goto do_next_tag;
440
441return_overflow:
442 ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
443 goto return_error;
444}
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481int ntfs_read_compressed_block(struct page *page)
482{
483 loff_t i_size;
484 s64 initialized_size;
485 struct address_space *mapping = page->mapping;
486 ntfs_inode *ni = NTFS_I(mapping->host);
487 ntfs_volume *vol = ni->vol;
488 struct super_block *sb = vol->sb;
489 runlist_element *rl;
490 unsigned long flags, block_size = sb->s_blocksize;
491 unsigned char block_size_bits = sb->s_blocksize_bits;
492 u8 *cb, *cb_pos, *cb_end;
493 struct buffer_head **bhs;
494 unsigned long offset, index = page->index;
495 u32 cb_size = ni->itype.compressed.block_size;
496 u64 cb_size_mask = cb_size - 1UL;
497 VCN vcn;
498 LCN lcn;
499
500 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
501 vol->cluster_size_bits;
502
503
504
505
506 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
507 & ~cb_size_mask) >> vol->cluster_size_bits;
508
509 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
510 >> ni->itype.compressed.block_size_bits;
511
512
513
514
515
516 unsigned int nr_pages = (end_vcn - start_vcn) <<
517 vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
518 unsigned int xpage, max_page, cur_page, cur_ofs, i;
519 unsigned int cb_clusters, cb_max_ofs;
520 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
521 struct page **pages;
522 unsigned char xpage_done = 0;
523
524 ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
525 "%i.", index, cb_size, nr_pages);
526
527
528
529
530 BUG_ON(ni->type != AT_DATA);
531 BUG_ON(ni->name_len);
532
533 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
534
535
536 bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
537 bhs = kmalloc(bhs_size, GFP_NOFS);
538
539 if (unlikely(!pages || !bhs)) {
540 kfree(bhs);
541 kfree(pages);
542 unlock_page(page);
543 ntfs_error(vol->sb, "Failed to allocate internal buffers.");
544 return -ENOMEM;
545 }
546
547
548
549
550
551 offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
552 xpage = index - offset;
553 pages[xpage] = page;
554
555
556
557
558 read_lock_irqsave(&ni->size_lock, flags);
559 i_size = i_size_read(VFS_I(ni));
560 initialized_size = ni->initialized_size;
561 read_unlock_irqrestore(&ni->size_lock, flags);
562 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
563 offset;
564
565 if (xpage >= max_page) {
566 kfree(bhs);
567 kfree(pages);
568 zero_user(page, 0, PAGE_CACHE_SIZE);
569 ntfs_debug("Compressed read outside i_size - truncated?");
570 SetPageUptodate(page);
571 unlock_page(page);
572 return 0;
573 }
574 if (nr_pages < max_page)
575 max_page = nr_pages;
576 for (i = 0; i < max_page; i++, offset++) {
577 if (i != xpage)
578 pages[i] = grab_cache_page_nowait(mapping, offset);
579 page = pages[i];
580 if (page) {
581
582
583
584
585
586 if (!PageDirty(page) && (!PageUptodate(page) ||
587 PageError(page))) {
588 ClearPageError(page);
589 kmap(page);
590 continue;
591 }
592 unlock_page(page);
593 page_cache_release(page);
594 pages[i] = NULL;
595 }
596 }
597
598
599
600
601
602 cur_page = 0;
603 cur_ofs = 0;
604 cb_clusters = ni->itype.compressed.block_clusters;
605do_next_cb:
606 nr_cbs--;
607 nr_bhs = 0;
608
609
610 rl = NULL;
611 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
612 vcn++) {
613 bool is_retry = false;
614
615 if (!rl) {
616lock_retry_remap:
617 down_read(&ni->runlist.lock);
618 rl = ni->runlist.rl;
619 }
620 if (likely(rl != NULL)) {
621
622 while (rl->length && rl[1].vcn <= vcn)
623 rl++;
624 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
625 } else
626 lcn = LCN_RL_NOT_MAPPED;
627 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
628 (unsigned long long)vcn,
629 (unsigned long long)lcn);
630 if (lcn < 0) {
631
632
633
634
635 if (lcn == LCN_HOLE)
636 break;
637 if (is_retry || lcn != LCN_RL_NOT_MAPPED)
638 goto rl_err;
639 is_retry = true;
640
641
642
643
644 up_read(&ni->runlist.lock);
645 if (!ntfs_map_runlist(ni, vcn))
646 goto lock_retry_remap;
647 goto map_rl_err;
648 }
649 block = lcn << vol->cluster_size_bits >> block_size_bits;
650
651 max_block = block + (vol->cluster_size >> block_size_bits);
652 do {
653 ntfs_debug("block = 0x%x.", block);
654 if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
655 goto getblk_err;
656 nr_bhs++;
657 } while (++block < max_block);
658 }
659
660
661 if (rl)
662 up_read(&ni->runlist.lock);
663
664
665 for (i = 0; i < nr_bhs; i++) {
666 struct buffer_head *tbh = bhs[i];
667
668 if (!trylock_buffer(tbh))
669 continue;
670 if (unlikely(buffer_uptodate(tbh))) {
671 unlock_buffer(tbh);
672 continue;
673 }
674 get_bh(tbh);
675 tbh->b_end_io = end_buffer_read_sync;
676 submit_bh(READ, tbh);
677 }
678
679
680 for (i = 0; i < nr_bhs; i++) {
681 struct buffer_head *tbh = bhs[i];
682
683 if (buffer_uptodate(tbh))
684 continue;
685 wait_on_buffer(tbh);
686
687
688
689
690
691
692
693
694 barrier();
695 if (unlikely(!buffer_uptodate(tbh))) {
696 ntfs_warning(vol->sb, "Buffer is unlocked but not "
697 "uptodate! Unplugging the disk queue "
698 "and rescheduling.");
699 get_bh(tbh);
700 blk_run_address_space(mapping);
701 schedule();
702 put_bh(tbh);
703 if (unlikely(!buffer_uptodate(tbh)))
704 goto read_err;
705 ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
706 }
707 }
708
709
710
711
712
713 spin_lock(&ntfs_cb_lock);
714 cb = ntfs_compression_buffer;
715
716 BUG_ON(!cb);
717
718 cb_pos = cb;
719 cb_end = cb + cb_size;
720
721
722 for (i = 0; i < nr_bhs; i++) {
723 memcpy(cb_pos, bhs[i]->b_data, block_size);
724 cb_pos += block_size;
725 }
726
727
728 if (cb_pos + 2 <= cb + cb_size)
729 *(u16*)cb_pos = 0;
730
731
732 cb_pos = cb;
733
734
735 ntfs_debug("Successfully read the compression block.");
736
737
738 cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
739 cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
740 cb_max_page >>= PAGE_CACHE_SHIFT;
741
742
743 if (cb_max_page > max_page)
744 cb_max_page = max_page;
745
746 if (vcn == start_vcn - cb_clusters) {
747
748 ntfs_debug("Found sparse compression block.");
749
750 spin_unlock(&ntfs_cb_lock);
751 if (cb_max_ofs)
752 cb_max_page--;
753 for (; cur_page < cb_max_page; cur_page++) {
754 page = pages[cur_page];
755 if (page) {
756
757
758
759
760
761 if (likely(!cur_ofs))
762 clear_page(page_address(page));
763 else
764 memset(page_address(page) + cur_ofs, 0,
765 PAGE_CACHE_SIZE -
766 cur_ofs);
767 flush_dcache_page(page);
768 kunmap(page);
769 SetPageUptodate(page);
770 unlock_page(page);
771 if (cur_page == xpage)
772 xpage_done = 1;
773 else
774 page_cache_release(page);
775 pages[cur_page] = NULL;
776 }
777 cb_pos += PAGE_CACHE_SIZE - cur_ofs;
778 cur_ofs = 0;
779 if (cb_pos >= cb_end)
780 break;
781 }
782
783 if (cb_max_ofs && cb_pos < cb_end) {
784 page = pages[cur_page];
785 if (page)
786 memset(page_address(page) + cur_ofs, 0,
787 cb_max_ofs - cur_ofs);
788
789
790
791
792 cur_ofs = cb_max_ofs;
793 }
794 } else if (vcn == start_vcn) {
795
796 unsigned int cur2_page = cur_page;
797 unsigned int cur_ofs2 = cur_ofs;
798 u8 *cb_pos2 = cb_pos;
799
800 ntfs_debug("Found uncompressed compression block.");
801
802
803
804
805
806
807
808
809
810
811
812 if (cb_max_ofs)
813 cb_max_page--;
814
815 for (; cur_page < cb_max_page; cur_page++) {
816 page = pages[cur_page];
817 if (page)
818 memcpy(page_address(page) + cur_ofs, cb_pos,
819 PAGE_CACHE_SIZE - cur_ofs);
820 cb_pos += PAGE_CACHE_SIZE - cur_ofs;
821 cur_ofs = 0;
822 if (cb_pos >= cb_end)
823 break;
824 }
825
826 if (cb_max_ofs && cb_pos < cb_end) {
827 page = pages[cur_page];
828 if (page)
829 memcpy(page_address(page) + cur_ofs, cb_pos,
830 cb_max_ofs - cur_ofs);
831 cb_pos += cb_max_ofs - cur_ofs;
832 cur_ofs = cb_max_ofs;
833 }
834
835 spin_unlock(&ntfs_cb_lock);
836
837 for (; cur2_page < cb_max_page; cur2_page++) {
838 page = pages[cur2_page];
839 if (page) {
840
841
842
843
844 handle_bounds_compressed_page(page, i_size,
845 initialized_size);
846 flush_dcache_page(page);
847 kunmap(page);
848 SetPageUptodate(page);
849 unlock_page(page);
850 if (cur2_page == xpage)
851 xpage_done = 1;
852 else
853 page_cache_release(page);
854 pages[cur2_page] = NULL;
855 }
856 cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
857 cur_ofs2 = 0;
858 if (cb_pos2 >= cb_end)
859 break;
860 }
861 } else {
862
863 unsigned int prev_cur_page = cur_page;
864
865 ntfs_debug("Found compressed compression block.");
866 err = ntfs_decompress(pages, &cur_page, &cur_ofs,
867 cb_max_page, cb_max_ofs, xpage, &xpage_done,
868 cb_pos, cb_size - (cb_pos - cb), i_size,
869 initialized_size);
870
871
872
873
874 if (err) {
875 ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
876 "0x%lx with error code %i. Skipping "
877 "this compression block.",
878 ni->mft_no, -err);
879
880 for (; prev_cur_page < cur_page; prev_cur_page++) {
881 page = pages[prev_cur_page];
882 if (page) {
883 flush_dcache_page(page);
884 kunmap(page);
885 unlock_page(page);
886 if (prev_cur_page != xpage)
887 page_cache_release(page);
888 pages[prev_cur_page] = NULL;
889 }
890 }
891 }
892 }
893
894
895 for (i = 0; i < nr_bhs; i++)
896 brelse(bhs[i]);
897
898
899 if (nr_cbs)
900 goto do_next_cb;
901
902
903 kfree(bhs);
904
905
906 for (cur_page = 0; cur_page < max_page; cur_page++) {
907 page = pages[cur_page];
908 if (page) {
909 ntfs_error(vol->sb, "Still have pages left! "
910 "Terminating them with extreme "
911 "prejudice. Inode 0x%lx, page index "
912 "0x%lx.", ni->mft_no, page->index);
913 flush_dcache_page(page);
914 kunmap(page);
915 unlock_page(page);
916 if (cur_page != xpage)
917 page_cache_release(page);
918 pages[cur_page] = NULL;
919 }
920 }
921
922
923 kfree(pages);
924
925
926 if (likely(xpage_done))
927 return 0;
928
929 ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
930 "EOVERFLOW" : (!err ? "EIO" : "unkown error"));
931 return err < 0 ? err : -EIO;
932
933read_err:
934 ntfs_error(vol->sb, "IO error while reading compressed data.");
935
936 for (i = 0; i < nr_bhs; i++)
937 brelse(bhs[i]);
938 goto err_out;
939
940map_rl_err:
941 ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
942 "compression block.");
943 goto err_out;
944
945rl_err:
946 up_read(&ni->runlist.lock);
947 ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
948 "compression block.");
949 goto err_out;
950
951getblk_err:
952 up_read(&ni->runlist.lock);
953 ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
954
955err_out:
956 kfree(bhs);
957 for (i = cur_page; i < max_page; i++) {
958 page = pages[i];
959 if (page) {
960 flush_dcache_page(page);
961 kunmap(page);
962 unlock_page(page);
963 if (i != xpage)
964 page_cache_release(page);
965 }
966 }
967 kfree(pages);
968 return -EIO;
969}
970