1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/fs.h>
25#include <linux/buffer_head.h>
26#include <linux/blkdev.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29
30#include "attrib.h"
31#include "inode.h"
32#include "debug.h"
33#include "ntfs.h"
34
35
36
37
38typedef enum {
39
40 NTFS_SYMBOL_TOKEN = 0,
41 NTFS_PHRASE_TOKEN = 1,
42 NTFS_TOKEN_MASK = 1,
43
44
45 NTFS_SB_SIZE_MASK = 0x0fff,
46 NTFS_SB_SIZE = 0x1000,
47 NTFS_SB_IS_COMPRESSED = 0x8000,
48
49
50
51
52
53
54
55 NTFS_MAX_CB_SIZE = 64 * 1024,
56} ntfs_compression_constants;
57
58
59
60
61static u8 *ntfs_compression_buffer = NULL;
62
63
64
65
66static DEFINE_SPINLOCK(ntfs_cb_lock);
67
68
69
70
71
72
73
74
75int allocate_compression_buffers(void)
76{
77 BUG_ON(ntfs_compression_buffer);
78
79 ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
80 if (!ntfs_compression_buffer)
81 return -ENOMEM;
82 return 0;
83}
84
85
86
87
88
89
90void free_compression_buffers(void)
91{
92 BUG_ON(!ntfs_compression_buffer);
93 vfree(ntfs_compression_buffer);
94 ntfs_compression_buffer = NULL;
95}
96
97
98
99
100static void zero_partial_compressed_page(struct page *page,
101 const s64 initialized_size)
102{
103 u8 *kp = page_address(page);
104 unsigned int kp_ofs;
105
106 ntfs_debug("Zeroing page region outside initialized size.");
107 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
108
109
110
111
112 clear_page(kp);
113 return;
114 }
115 kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
116 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
117 return;
118}
119
120
121
122
123static inline void handle_bounds_compressed_page(struct page *page,
124 const loff_t i_size, const s64 initialized_size)
125{
126 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
127 (initialized_size < i_size))
128 zero_partial_compressed_page(page, initialized_size);
129 return;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
170 int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
171 const int xpage, char *xpage_done, u8 *const cb_start,
172 const u32 cb_size, const loff_t i_size,
173 const s64 initialized_size)
174{
175
176
177
178
179 u8 *cb_end = cb_start + cb_size;
180 u8 *cb = cb_start;
181 u8 *cb_sb_start = cb;
182 u8 *cb_sb_end;
183
184
185 struct page *dp;
186 u8 *dp_addr;
187 u8 *dp_sb_start;
188 u8 *dp_sb_end;
189
190 u16 do_sb_start;
191 u16 do_sb_end;
192
193
194
195 u8 tag;
196 int token;
197
198
199 int completed_pages[dest_max_index - *dest_index + 1];
200 int nr_completed_pages = 0;
201
202
203 int err = -EOVERFLOW;
204
205 ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
206do_next_sb:
207 ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
208 cb - cb_start);
209
210
211
212
213
214
215 if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
216 (*dest_index == dest_max_index &&
217 *dest_ofs == dest_max_ofs)) {
218 int i;
219
220 ntfs_debug("Completed. Returning success (0).");
221 err = 0;
222return_error:
223
224 spin_unlock(&ntfs_cb_lock);
225
226 if (nr_completed_pages > 0) {
227 for (i = 0; i < nr_completed_pages; i++) {
228 int di = completed_pages[i];
229
230 dp = dest_pages[di];
231
232
233
234
235 handle_bounds_compressed_page(dp, i_size,
236 initialized_size);
237 flush_dcache_page(dp);
238 kunmap(dp);
239 SetPageUptodate(dp);
240 unlock_page(dp);
241 if (di == xpage)
242 *xpage_done = 1;
243 else
244 page_cache_release(dp);
245 dest_pages[di] = NULL;
246 }
247 }
248 return err;
249 }
250
251
252 do_sb_start = *dest_ofs;
253 do_sb_end = do_sb_start + NTFS_SB_SIZE;
254
255
256 if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
257 goto return_overflow;
258
259
260 if (cb + 6 > cb_end)
261 goto return_overflow;
262
263
264 cb_sb_start = cb;
265 cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
266 + 3;
267 if (cb_sb_end > cb_end)
268 goto return_overflow;
269
270
271 dp = dest_pages[*dest_index];
272 if (!dp) {
273
274 cb = cb_sb_end;
275
276
277 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
278 if (!*dest_ofs && (++*dest_index > dest_max_index))
279 goto return_overflow;
280 goto do_next_sb;
281 }
282
283
284 dp_addr = (u8*)page_address(dp) + do_sb_start;
285
286
287 if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
288 ntfs_debug("Found uncompressed sub-block.");
289
290
291
292 cb += 2;
293
294
295 if (cb_sb_end - cb != NTFS_SB_SIZE)
296 goto return_overflow;
297
298
299 memcpy(dp_addr, cb, NTFS_SB_SIZE);
300 cb += NTFS_SB_SIZE;
301
302
303 *dest_ofs += NTFS_SB_SIZE;
304 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
305finalize_page:
306
307
308
309
310 completed_pages[nr_completed_pages++] = *dest_index;
311 if (++*dest_index > dest_max_index)
312 goto return_overflow;
313 }
314 goto do_next_sb;
315 }
316 ntfs_debug("Found compressed sub-block.");
317
318
319
320 dp_sb_start = dp_addr;
321 dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
322
323
324 cb += 2;
325do_next_tag:
326 if (cb == cb_sb_end) {
327
328 if (dp_addr < dp_sb_end) {
329 int nr_bytes = do_sb_end - *dest_ofs;
330
331 ntfs_debug("Filling incomplete sub-block with "
332 "zeroes.");
333
334 memset(dp_addr, 0, nr_bytes);
335 *dest_ofs += nr_bytes;
336 }
337
338 if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
339 goto finalize_page;
340 goto do_next_sb;
341 }
342
343
344 if (cb > cb_sb_end || dp_addr > dp_sb_end)
345 goto return_overflow;
346
347
348 tag = *cb++;
349
350
351 for (token = 0; token < 8; token++, tag >>= 1) {
352 u16 lg, pt, length, max_non_overlap;
353 register u16 i;
354 u8 *dp_back_addr;
355
356
357 if (cb >= cb_sb_end || dp_addr > dp_sb_end)
358 break;
359
360
361 if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
362
363
364
365
366 *dp_addr++ = *cb++;
367 ++*dest_ofs;
368
369
370 continue;
371 }
372
373
374
375
376
377 if (dp_addr == dp_sb_start)
378 goto return_overflow;
379
380
381
382
383
384
385
386
387 lg = 0;
388 for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
389 lg++;
390
391
392 pt = le16_to_cpup((le16*)cb);
393
394
395
396
397
398
399 dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
400 if (dp_back_addr < dp_sb_start)
401 goto return_overflow;
402
403
404 length = (pt & (0xfff >> lg)) + 3;
405
406
407 *dest_ofs += length;
408 if (*dest_ofs > do_sb_end)
409 goto return_overflow;
410
411
412 max_non_overlap = dp_addr - dp_back_addr;
413
414 if (length <= max_non_overlap) {
415
416 memcpy(dp_addr, dp_back_addr, length);
417
418
419 dp_addr += length;
420 } else {
421
422
423
424
425
426
427 memcpy(dp_addr, dp_back_addr, max_non_overlap);
428 dp_addr += max_non_overlap;
429 dp_back_addr += max_non_overlap;
430 length -= max_non_overlap;
431 while (length--)
432 *dp_addr++ = *dp_back_addr++;
433 }
434
435
436 cb += 2;
437 }
438
439
440 goto do_next_tag;
441
442return_overflow:
443 ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
444 goto return_error;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482int ntfs_read_compressed_block(struct page *page)
483{
484 loff_t i_size;
485 s64 initialized_size;
486 struct address_space *mapping = page->mapping;
487 ntfs_inode *ni = NTFS_I(mapping->host);
488 ntfs_volume *vol = ni->vol;
489 struct super_block *sb = vol->sb;
490 runlist_element *rl;
491 unsigned long flags, block_size = sb->s_blocksize;
492 unsigned char block_size_bits = sb->s_blocksize_bits;
493 u8 *cb, *cb_pos, *cb_end;
494 struct buffer_head **bhs;
495 unsigned long offset, index = page->index;
496 u32 cb_size = ni->itype.compressed.block_size;
497 u64 cb_size_mask = cb_size - 1UL;
498 VCN vcn;
499 LCN lcn;
500
501 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
502 vol->cluster_size_bits;
503
504
505
506
507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
508 & ~cb_size_mask) >> vol->cluster_size_bits;
509
510 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
511 >> ni->itype.compressed.block_size_bits;
512
513
514
515
516
517 unsigned int nr_pages = (end_vcn - start_vcn) <<
518 vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
519 unsigned int xpage, max_page, cur_page, cur_ofs, i;
520 unsigned int cb_clusters, cb_max_ofs;
521 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
522 struct page **pages;
523 unsigned char xpage_done = 0;
524
525 ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
526 "%i.", index, cb_size, nr_pages);
527
528
529
530
531 BUG_ON(ni->type != AT_DATA);
532 BUG_ON(ni->name_len);
533
534 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
535
536
537 bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
538 bhs = kmalloc(bhs_size, GFP_NOFS);
539
540 if (unlikely(!pages || !bhs)) {
541 kfree(bhs);
542 kfree(pages);
543 unlock_page(page);
544 ntfs_error(vol->sb, "Failed to allocate internal buffers.");
545 return -ENOMEM;
546 }
547
548
549
550
551
552 offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
553 xpage = index - offset;
554 pages[xpage] = page;
555
556
557
558
559 read_lock_irqsave(&ni->size_lock, flags);
560 i_size = i_size_read(VFS_I(ni));
561 initialized_size = ni->initialized_size;
562 read_unlock_irqrestore(&ni->size_lock, flags);
563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
564 offset;
565
566 if (xpage >= max_page) {
567 kfree(bhs);
568 kfree(pages);
569 zero_user(page, 0, PAGE_CACHE_SIZE);
570 ntfs_debug("Compressed read outside i_size - truncated?");
571 SetPageUptodate(page);
572 unlock_page(page);
573 return 0;
574 }
575 if (nr_pages < max_page)
576 max_page = nr_pages;
577 for (i = 0; i < max_page; i++, offset++) {
578 if (i != xpage)
579 pages[i] = grab_cache_page_nowait(mapping, offset);
580 page = pages[i];
581 if (page) {
582
583
584
585
586
587 if (!PageDirty(page) && (!PageUptodate(page) ||
588 PageError(page))) {
589 ClearPageError(page);
590 kmap(page);
591 continue;
592 }
593 unlock_page(page);
594 page_cache_release(page);
595 pages[i] = NULL;
596 }
597 }
598
599
600
601
602
603 cur_page = 0;
604 cur_ofs = 0;
605 cb_clusters = ni->itype.compressed.block_clusters;
606do_next_cb:
607 nr_cbs--;
608 nr_bhs = 0;
609
610
611 rl = NULL;
612 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
613 vcn++) {
614 bool is_retry = false;
615
616 if (!rl) {
617lock_retry_remap:
618 down_read(&ni->runlist.lock);
619 rl = ni->runlist.rl;
620 }
621 if (likely(rl != NULL)) {
622
623 while (rl->length && rl[1].vcn <= vcn)
624 rl++;
625 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
626 } else
627 lcn = LCN_RL_NOT_MAPPED;
628 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
629 (unsigned long long)vcn,
630 (unsigned long long)lcn);
631 if (lcn < 0) {
632
633
634
635
636 if (lcn == LCN_HOLE)
637 break;
638 if (is_retry || lcn != LCN_RL_NOT_MAPPED)
639 goto rl_err;
640 is_retry = true;
641
642
643
644
645 up_read(&ni->runlist.lock);
646 if (!ntfs_map_runlist(ni, vcn))
647 goto lock_retry_remap;
648 goto map_rl_err;
649 }
650 block = lcn << vol->cluster_size_bits >> block_size_bits;
651
652 max_block = block + (vol->cluster_size >> block_size_bits);
653 do {
654 ntfs_debug("block = 0x%x.", block);
655 if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
656 goto getblk_err;
657 nr_bhs++;
658 } while (++block < max_block);
659 }
660
661
662 if (rl)
663 up_read(&ni->runlist.lock);
664
665
666 for (i = 0; i < nr_bhs; i++) {
667 struct buffer_head *tbh = bhs[i];
668
669 if (!trylock_buffer(tbh))
670 continue;
671 if (unlikely(buffer_uptodate(tbh))) {
672 unlock_buffer(tbh);
673 continue;
674 }
675 get_bh(tbh);
676 tbh->b_end_io = end_buffer_read_sync;
677 submit_bh(READ, tbh);
678 }
679
680
681 for (i = 0; i < nr_bhs; i++) {
682 struct buffer_head *tbh = bhs[i];
683
684 if (buffer_uptodate(tbh))
685 continue;
686 wait_on_buffer(tbh);
687
688
689
690
691
692
693
694
695 barrier();
696 if (unlikely(!buffer_uptodate(tbh))) {
697 ntfs_warning(vol->sb, "Buffer is unlocked but not "
698 "uptodate! Unplugging the disk queue "
699 "and rescheduling.");
700 get_bh(tbh);
701 blk_run_address_space(mapping);
702 schedule();
703 put_bh(tbh);
704 if (unlikely(!buffer_uptodate(tbh)))
705 goto read_err;
706 ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
707 }
708 }
709
710
711
712
713
714 spin_lock(&ntfs_cb_lock);
715 cb = ntfs_compression_buffer;
716
717 BUG_ON(!cb);
718
719 cb_pos = cb;
720 cb_end = cb + cb_size;
721
722
723 for (i = 0; i < nr_bhs; i++) {
724 memcpy(cb_pos, bhs[i]->b_data, block_size);
725 cb_pos += block_size;
726 }
727
728
729 if (cb_pos + 2 <= cb + cb_size)
730 *(u16*)cb_pos = 0;
731
732
733 cb_pos = cb;
734
735
736 ntfs_debug("Successfully read the compression block.");
737
738
739 cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
740 cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
741 cb_max_page >>= PAGE_CACHE_SHIFT;
742
743
744 if (cb_max_page > max_page)
745 cb_max_page = max_page;
746
747 if (vcn == start_vcn - cb_clusters) {
748
749 ntfs_debug("Found sparse compression block.");
750
751 spin_unlock(&ntfs_cb_lock);
752 if (cb_max_ofs)
753 cb_max_page--;
754 for (; cur_page < cb_max_page; cur_page++) {
755 page = pages[cur_page];
756 if (page) {
757
758
759
760
761
762 if (likely(!cur_ofs))
763 clear_page(page_address(page));
764 else
765 memset(page_address(page) + cur_ofs, 0,
766 PAGE_CACHE_SIZE -
767 cur_ofs);
768 flush_dcache_page(page);
769 kunmap(page);
770 SetPageUptodate(page);
771 unlock_page(page);
772 if (cur_page == xpage)
773 xpage_done = 1;
774 else
775 page_cache_release(page);
776 pages[cur_page] = NULL;
777 }
778 cb_pos += PAGE_CACHE_SIZE - cur_ofs;
779 cur_ofs = 0;
780 if (cb_pos >= cb_end)
781 break;
782 }
783
784 if (cb_max_ofs && cb_pos < cb_end) {
785 page = pages[cur_page];
786 if (page)
787 memset(page_address(page) + cur_ofs, 0,
788 cb_max_ofs - cur_ofs);
789
790
791
792
793 cur_ofs = cb_max_ofs;
794 }
795 } else if (vcn == start_vcn) {
796
797 unsigned int cur2_page = cur_page;
798 unsigned int cur_ofs2 = cur_ofs;
799 u8 *cb_pos2 = cb_pos;
800
801 ntfs_debug("Found uncompressed compression block.");
802
803
804
805
806
807
808
809
810
811
812
813 if (cb_max_ofs)
814 cb_max_page--;
815
816 for (; cur_page < cb_max_page; cur_page++) {
817 page = pages[cur_page];
818 if (page)
819 memcpy(page_address(page) + cur_ofs, cb_pos,
820 PAGE_CACHE_SIZE - cur_ofs);
821 cb_pos += PAGE_CACHE_SIZE - cur_ofs;
822 cur_ofs = 0;
823 if (cb_pos >= cb_end)
824 break;
825 }
826
827 if (cb_max_ofs && cb_pos < cb_end) {
828 page = pages[cur_page];
829 if (page)
830 memcpy(page_address(page) + cur_ofs, cb_pos,
831 cb_max_ofs - cur_ofs);
832 cb_pos += cb_max_ofs - cur_ofs;
833 cur_ofs = cb_max_ofs;
834 }
835
836 spin_unlock(&ntfs_cb_lock);
837
838 for (; cur2_page < cb_max_page; cur2_page++) {
839 page = pages[cur2_page];
840 if (page) {
841
842
843
844
845 handle_bounds_compressed_page(page, i_size,
846 initialized_size);
847 flush_dcache_page(page);
848 kunmap(page);
849 SetPageUptodate(page);
850 unlock_page(page);
851 if (cur2_page == xpage)
852 xpage_done = 1;
853 else
854 page_cache_release(page);
855 pages[cur2_page] = NULL;
856 }
857 cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
858 cur_ofs2 = 0;
859 if (cb_pos2 >= cb_end)
860 break;
861 }
862 } else {
863
864 unsigned int prev_cur_page = cur_page;
865
866 ntfs_debug("Found compressed compression block.");
867 err = ntfs_decompress(pages, &cur_page, &cur_ofs,
868 cb_max_page, cb_max_ofs, xpage, &xpage_done,
869 cb_pos, cb_size - (cb_pos - cb), i_size,
870 initialized_size);
871
872
873
874
875 if (err) {
876 ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
877 "0x%lx with error code %i. Skipping "
878 "this compression block.",
879 ni->mft_no, -err);
880
881 for (; prev_cur_page < cur_page; prev_cur_page++) {
882 page = pages[prev_cur_page];
883 if (page) {
884 flush_dcache_page(page);
885 kunmap(page);
886 unlock_page(page);
887 if (prev_cur_page != xpage)
888 page_cache_release(page);
889 pages[prev_cur_page] = NULL;
890 }
891 }
892 }
893 }
894
895
896 for (i = 0; i < nr_bhs; i++)
897 brelse(bhs[i]);
898
899
900 if (nr_cbs)
901 goto do_next_cb;
902
903
904 kfree(bhs);
905
906
907 for (cur_page = 0; cur_page < max_page; cur_page++) {
908 page = pages[cur_page];
909 if (page) {
910 ntfs_error(vol->sb, "Still have pages left! "
911 "Terminating them with extreme "
912 "prejudice. Inode 0x%lx, page index "
913 "0x%lx.", ni->mft_no, page->index);
914 flush_dcache_page(page);
915 kunmap(page);
916 unlock_page(page);
917 if (cur_page != xpage)
918 page_cache_release(page);
919 pages[cur_page] = NULL;
920 }
921 }
922
923
924 kfree(pages);
925
926
927 if (likely(xpage_done))
928 return 0;
929
930 ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
931 "EOVERFLOW" : (!err ? "EIO" : "unknown error"));
932 return err < 0 ? err : -EIO;
933
934read_err:
935 ntfs_error(vol->sb, "IO error while reading compressed data.");
936
937 for (i = 0; i < nr_bhs; i++)
938 brelse(bhs[i]);
939 goto err_out;
940
941map_rl_err:
942 ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
943 "compression block.");
944 goto err_out;
945
946rl_err:
947 up_read(&ni->runlist.lock);
948 ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
949 "compression block.");
950 goto err_out;
951
952getblk_err:
953 up_read(&ni->runlist.lock);
954 ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
955
956err_out:
957 kfree(bhs);
958 for (i = cur_page; i < max_page; i++) {
959 page = pages[i];
960 if (page) {
961 flush_dcache_page(page);
962 kunmap(page);
963 unlock_page(page);
964 if (i != xpage)
965 page_cache_release(page);
966 }
967 }
968 kfree(pages);
969 return -EIO;
970}
971