1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/fs.h>
25#include <linux/buffer_head.h>
26#include <linux/blkdev.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29
30#include "attrib.h"
31#include "inode.h"
32#include "debug.h"
33#include "ntfs.h"
34
35
36
37
38typedef enum {
39
40 NTFS_SYMBOL_TOKEN = 0,
41 NTFS_PHRASE_TOKEN = 1,
42 NTFS_TOKEN_MASK = 1,
43
44
45 NTFS_SB_SIZE_MASK = 0x0fff,
46 NTFS_SB_SIZE = 0x1000,
47 NTFS_SB_IS_COMPRESSED = 0x8000,
48
49
50
51
52
53
54
55 NTFS_MAX_CB_SIZE = 64 * 1024,
56} ntfs_compression_constants;
57
58
59
60
61static u8 *ntfs_compression_buffer;
62
63
64
65
66static DEFINE_SPINLOCK(ntfs_cb_lock);
67
68
69
70
71
72
73
74
75int allocate_compression_buffers(void)
76{
77 BUG_ON(ntfs_compression_buffer);
78
79 ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
80 if (!ntfs_compression_buffer)
81 return -ENOMEM;
82 return 0;
83}
84
85
86
87
88
89
90void free_compression_buffers(void)
91{
92 BUG_ON(!ntfs_compression_buffer);
93 vfree(ntfs_compression_buffer);
94 ntfs_compression_buffer = NULL;
95}
96
97
98
99
100static void zero_partial_compressed_page(struct page *page,
101 const s64 initialized_size)
102{
103 u8 *kp = page_address(page);
104 unsigned int kp_ofs;
105
106 ntfs_debug("Zeroing page region outside initialized size.");
107 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
108
109
110
111
112 clear_page(kp);
113 return;
114 }
115 kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
116 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
117 return;
118}
119
120
121
122
123static inline void handle_bounds_compressed_page(struct page *page,
124 const loff_t i_size, const s64 initialized_size)
125{
126 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
127 (initialized_size < i_size))
128 zero_partial_compressed_page(page, initialized_size);
129 return;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
170 int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
171 const int xpage, char *xpage_done, u8 *const cb_start,
172 const u32 cb_size, const loff_t i_size,
173 const s64 initialized_size)
174{
175
176
177
178
179 u8 *cb_end = cb_start + cb_size;
180 u8 *cb = cb_start;
181 u8 *cb_sb_start = cb;
182 u8 *cb_sb_end;
183
184
185 struct page *dp;
186 u8 *dp_addr;
187 u8 *dp_sb_start;
188 u8 *dp_sb_end;
189
190 u16 do_sb_start;
191 u16 do_sb_end;
192
193
194
195 u8 tag;
196 int token;
197
198
199 int completed_pages[dest_max_index - *dest_index + 1];
200 int nr_completed_pages = 0;
201
202
203 int err = -EOVERFLOW;
204
205 ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
206do_next_sb:
207 ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
208 cb - cb_start);
209
210
211
212
213
214
215 if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
216 (*dest_index == dest_max_index &&
217 *dest_ofs == dest_max_ofs)) {
218 int i;
219
220 ntfs_debug("Completed. Returning success (0).");
221 err = 0;
222return_error:
223
224 spin_unlock(&ntfs_cb_lock);
225
226 if (nr_completed_pages > 0) {
227 for (i = 0; i < nr_completed_pages; i++) {
228 int di = completed_pages[i];
229
230 dp = dest_pages[di];
231
232
233
234
235 handle_bounds_compressed_page(dp, i_size,
236 initialized_size);
237 flush_dcache_page(dp);
238 kunmap(dp);
239 SetPageUptodate(dp);
240 unlock_page(dp);
241 if (di == xpage)
242 *xpage_done = 1;
243 else
244 page_cache_release(dp);
245 dest_pages[di] = NULL;
246 }
247 }
248 return err;
249 }
250
251
252 do_sb_start = *dest_ofs;
253 do_sb_end = do_sb_start + NTFS_SB_SIZE;
254
255
256 if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
257 goto return_overflow;
258
259
260 if (cb + 6 > cb_end)
261 goto return_overflow;
262
263
264 cb_sb_start = cb;
265 cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
266 + 3;
267 if (cb_sb_end > cb_end)
268 goto return_overflow;
269
270
271 dp = dest_pages[*dest_index];
272 if (!dp) {
273
274 cb = cb_sb_end;
275
276
277 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
278 if (!*dest_ofs && (++*dest_index > dest_max_index))
279 goto return_overflow;
280 goto do_next_sb;
281 }
282
283
284 dp_addr = (u8*)page_address(dp) + do_sb_start;
285
286
287 if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
288 ntfs_debug("Found uncompressed sub-block.");
289
290
291
292 cb += 2;
293
294
295 if (cb_sb_end - cb != NTFS_SB_SIZE)
296 goto return_overflow;
297
298
299 memcpy(dp_addr, cb, NTFS_SB_SIZE);
300 cb += NTFS_SB_SIZE;
301
302
303 *dest_ofs += NTFS_SB_SIZE;
304 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
305finalize_page:
306
307
308
309
310 completed_pages[nr_completed_pages++] = *dest_index;
311 if (++*dest_index > dest_max_index)
312 goto return_overflow;
313 }
314 goto do_next_sb;
315 }
316 ntfs_debug("Found compressed sub-block.");
317
318
319
320 dp_sb_start = dp_addr;
321 dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
322
323
324 cb += 2;
325do_next_tag:
326 if (cb == cb_sb_end) {
327
328 if (dp_addr < dp_sb_end) {
329 int nr_bytes = do_sb_end - *dest_ofs;
330
331 ntfs_debug("Filling incomplete sub-block with "
332 "zeroes.");
333
334 memset(dp_addr, 0, nr_bytes);
335 *dest_ofs += nr_bytes;
336 }
337
338 if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
339 goto finalize_page;
340 goto do_next_sb;
341 }
342
343
344 if (cb > cb_sb_end || dp_addr > dp_sb_end)
345 goto return_overflow;
346
347
348 tag = *cb++;
349
350
351 for (token = 0; token < 8; token++, tag >>= 1) {
352 u16 lg, pt, length, max_non_overlap;
353 register u16 i;
354 u8 *dp_back_addr;
355
356
357 if (cb >= cb_sb_end || dp_addr > dp_sb_end)
358 break;
359
360
361 if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
362
363
364
365
366 *dp_addr++ = *cb++;
367 ++*dest_ofs;
368
369
370 continue;
371 }
372
373
374
375
376
377 if (dp_addr == dp_sb_start)
378 goto return_overflow;
379
380
381
382
383
384
385
386
387 lg = 0;
388 for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
389 lg++;
390
391
392 pt = le16_to_cpup((le16*)cb);
393
394
395
396
397
398
399 dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
400 if (dp_back_addr < dp_sb_start)
401 goto return_overflow;
402
403
404 length = (pt & (0xfff >> lg)) + 3;
405
406
407 *dest_ofs += length;
408 if (*dest_ofs > do_sb_end)
409 goto return_overflow;
410
411
412 max_non_overlap = dp_addr - dp_back_addr;
413
414 if (length <= max_non_overlap) {
415
416 memcpy(dp_addr, dp_back_addr, length);
417
418
419 dp_addr += length;
420 } else {
421
422
423
424
425
426
427 memcpy(dp_addr, dp_back_addr, max_non_overlap);
428 dp_addr += max_non_overlap;
429 dp_back_addr += max_non_overlap;
430 length -= max_non_overlap;
431 while (length--)
432 *dp_addr++ = *dp_back_addr++;
433 }
434
435
436 cb += 2;
437 }
438
439
440 goto do_next_tag;
441
442return_overflow:
443 ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
444 goto return_error;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482int ntfs_read_compressed_block(struct page *page)
483{
484 loff_t i_size;
485 s64 initialized_size;
486 struct address_space *mapping = page->mapping;
487 ntfs_inode *ni = NTFS_I(mapping->host);
488 ntfs_volume *vol = ni->vol;
489 struct super_block *sb = vol->sb;
490 runlist_element *rl;
491 unsigned long flags, block_size = sb->s_blocksize;
492 unsigned char block_size_bits = sb->s_blocksize_bits;
493 u8 *cb, *cb_pos, *cb_end;
494 struct buffer_head **bhs;
495 unsigned long offset, index = page->index;
496 u32 cb_size = ni->itype.compressed.block_size;
497 u64 cb_size_mask = cb_size - 1UL;
498 VCN vcn;
499 LCN lcn;
500
501 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
502 vol->cluster_size_bits;
503
504
505
506
507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
508 & ~cb_size_mask) >> vol->cluster_size_bits;
509
510 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
511 >> ni->itype.compressed.block_size_bits;
512
513
514
515
516
517 unsigned int nr_pages = (end_vcn - start_vcn) <<
518 vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
519 unsigned int xpage, max_page, cur_page, cur_ofs, i;
520 unsigned int cb_clusters, cb_max_ofs;
521 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
522 struct page **pages;
523 unsigned char xpage_done = 0;
524
525 ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
526 "%i.", index, cb_size, nr_pages);
527
528
529
530
531 BUG_ON(ni->type != AT_DATA);
532 BUG_ON(ni->name_len);
533
534 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
535
536
537 bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
538 bhs = kmalloc(bhs_size, GFP_NOFS);
539
540 if (unlikely(!pages || !bhs)) {
541 kfree(bhs);
542 kfree(pages);
543 unlock_page(page);
544 ntfs_error(vol->sb, "Failed to allocate internal buffers.");
545 return -ENOMEM;
546 }
547
548
549
550
551
552 offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
553 xpage = index - offset;
554 pages[xpage] = page;
555
556
557
558
559 read_lock_irqsave(&ni->size_lock, flags);
560 i_size = i_size_read(VFS_I(ni));
561 initialized_size = ni->initialized_size;
562 read_unlock_irqrestore(&ni->size_lock, flags);
563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
564 offset;
565
566 if (xpage >= max_page) {
567 kfree(bhs);
568 kfree(pages);
569 zero_user(page, 0, PAGE_CACHE_SIZE);
570 ntfs_debug("Compressed read outside i_size - truncated?");
571 SetPageUptodate(page);
572 unlock_page(page);
573 return 0;
574 }
575 if (nr_pages < max_page)
576 max_page = nr_pages;
577 for (i = 0; i < max_page; i++, offset++) {
578 if (i != xpage)
579 pages[i] = grab_cache_page_nowait(mapping, offset);
580 page = pages[i];
581 if (page) {
582
583
584
585
586
587 if (!PageDirty(page) && (!PageUptodate(page) ||
588 PageError(page))) {
589 ClearPageError(page);
590 kmap(page);
591 continue;
592 }
593 unlock_page(page);
594 page_cache_release(page);
595 pages[i] = NULL;
596 }
597 }
598
599
600
601
602
603 cur_page = 0;
604 cur_ofs = 0;
605 cb_clusters = ni->itype.compressed.block_clusters;
606do_next_cb:
607 nr_cbs--;
608 nr_bhs = 0;
609
610
611 rl = NULL;
612 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
613 vcn++) {
614 bool is_retry = false;
615
616 if (!rl) {
617lock_retry_remap:
618 down_read(&ni->runlist.lock);
619 rl = ni->runlist.rl;
620 }
621 if (likely(rl != NULL)) {
622
623 while (rl->length && rl[1].vcn <= vcn)
624 rl++;
625 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
626 } else
627 lcn = LCN_RL_NOT_MAPPED;
628 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
629 (unsigned long long)vcn,
630 (unsigned long long)lcn);
631 if (lcn < 0) {
632
633
634
635
636 if (lcn == LCN_HOLE)
637 break;
638 if (is_retry || lcn != LCN_RL_NOT_MAPPED)
639 goto rl_err;
640 is_retry = true;
641
642
643
644
645 up_read(&ni->runlist.lock);
646 if (!ntfs_map_runlist(ni, vcn))
647 goto lock_retry_remap;
648 goto map_rl_err;
649 }
650 block = lcn << vol->cluster_size_bits >> block_size_bits;
651
652 max_block = block + (vol->cluster_size >> block_size_bits);
653 do {
654 ntfs_debug("block = 0x%x.", block);
655 if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
656 goto getblk_err;
657 nr_bhs++;
658 } while (++block < max_block);
659 }
660
661
662 if (rl)
663 up_read(&ni->runlist.lock);
664
665
666 for (i = 0; i < nr_bhs; i++) {
667 struct buffer_head *tbh = bhs[i];
668
669 if (!trylock_buffer(tbh))
670 continue;
671 if (unlikely(buffer_uptodate(tbh))) {
672 unlock_buffer(tbh);
673 continue;
674 }
675 get_bh(tbh);
676 tbh->b_end_io = end_buffer_read_sync;
677 submit_bh(READ, tbh);
678 }
679
680
681 for (i = 0; i < nr_bhs; i++) {
682 struct buffer_head *tbh = bhs[i];
683
684 if (buffer_uptodate(tbh))
685 continue;
686 wait_on_buffer(tbh);
687
688
689
690
691
692
693
694
695 barrier();
696 if (unlikely(!buffer_uptodate(tbh))) {
697 ntfs_warning(vol->sb, "Buffer is unlocked but not "
698 "uptodate! Unplugging the disk queue "
699 "and rescheduling.");
700 get_bh(tbh);
701 io_schedule();
702 put_bh(tbh);
703 if (unlikely(!buffer_uptodate(tbh)))
704 goto read_err;
705 ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
706 }
707 }
708
709
710
711
712
713 spin_lock(&ntfs_cb_lock);
714 cb = ntfs_compression_buffer;
715
716 BUG_ON(!cb);
717
718 cb_pos = cb;
719 cb_end = cb + cb_size;
720
721
722 for (i = 0; i < nr_bhs; i++) {
723 memcpy(cb_pos, bhs[i]->b_data, block_size);
724 cb_pos += block_size;
725 }
726
727
728 if (cb_pos + 2 <= cb + cb_size)
729 *(u16*)cb_pos = 0;
730
731
732 cb_pos = cb;
733
734
735 ntfs_debug("Successfully read the compression block.");
736
737
738 cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
739 cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
740 cb_max_page >>= PAGE_CACHE_SHIFT;
741
742
743 if (cb_max_page > max_page)
744 cb_max_page = max_page;
745
746 if (vcn == start_vcn - cb_clusters) {
747
748 ntfs_debug("Found sparse compression block.");
749
750 spin_unlock(&ntfs_cb_lock);
751 if (cb_max_ofs)
752 cb_max_page--;
753 for (; cur_page < cb_max_page; cur_page++) {
754 page = pages[cur_page];
755 if (page) {
756
757
758
759
760
761 if (likely(!cur_ofs))
762 clear_page(page_address(page));
763 else
764 memset(page_address(page) + cur_ofs, 0,
765 PAGE_CACHE_SIZE -
766 cur_ofs);
767 flush_dcache_page(page);
768 kunmap(page);
769 SetPageUptodate(page);
770 unlock_page(page);
771 if (cur_page == xpage)
772 xpage_done = 1;
773 else
774 page_cache_release(page);
775 pages[cur_page] = NULL;
776 }
777 cb_pos += PAGE_CACHE_SIZE - cur_ofs;
778 cur_ofs = 0;
779 if (cb_pos >= cb_end)
780 break;
781 }
782
783 if (cb_max_ofs && cb_pos < cb_end) {
784 page = pages[cur_page];
785 if (page)
786 memset(page_address(page) + cur_ofs, 0,
787 cb_max_ofs - cur_ofs);
788
789
790
791
792 cur_ofs = cb_max_ofs;
793 }
794 } else if (vcn == start_vcn) {
795
796 unsigned int cur2_page = cur_page;
797 unsigned int cur_ofs2 = cur_ofs;
798 u8 *cb_pos2 = cb_pos;
799
800 ntfs_debug("Found uncompressed compression block.");
801
802
803
804
805
806
807
808
809
810
811
812 if (cb_max_ofs)
813 cb_max_page--;
814
815 for (; cur_page < cb_max_page; cur_page++) {
816 page = pages[cur_page];
817 if (page)
818 memcpy(page_address(page) + cur_ofs, cb_pos,
819 PAGE_CACHE_SIZE - cur_ofs);
820 cb_pos += PAGE_CACHE_SIZE - cur_ofs;
821 cur_ofs = 0;
822 if (cb_pos >= cb_end)
823 break;
824 }
825
826 if (cb_max_ofs && cb_pos < cb_end) {
827 page = pages[cur_page];
828 if (page)
829 memcpy(page_address(page) + cur_ofs, cb_pos,
830 cb_max_ofs - cur_ofs);
831 cb_pos += cb_max_ofs - cur_ofs;
832 cur_ofs = cb_max_ofs;
833 }
834
835 spin_unlock(&ntfs_cb_lock);
836
837 for (; cur2_page < cb_max_page; cur2_page++) {
838 page = pages[cur2_page];
839 if (page) {
840
841
842
843
844 handle_bounds_compressed_page(page, i_size,
845 initialized_size);
846 flush_dcache_page(page);
847 kunmap(page);
848 SetPageUptodate(page);
849 unlock_page(page);
850 if (cur2_page == xpage)
851 xpage_done = 1;
852 else
853 page_cache_release(page);
854 pages[cur2_page] = NULL;
855 }
856 cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
857 cur_ofs2 = 0;
858 if (cb_pos2 >= cb_end)
859 break;
860 }
861 } else {
862
863 unsigned int prev_cur_page = cur_page;
864
865 ntfs_debug("Found compressed compression block.");
866 err = ntfs_decompress(pages, &cur_page, &cur_ofs,
867 cb_max_page, cb_max_ofs, xpage, &xpage_done,
868 cb_pos, cb_size - (cb_pos - cb), i_size,
869 initialized_size);
870
871
872
873
874 if (err) {
875 ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
876 "0x%lx with error code %i. Skipping "
877 "this compression block.",
878 ni->mft_no, -err);
879
880 for (; prev_cur_page < cur_page; prev_cur_page++) {
881 page = pages[prev_cur_page];
882 if (page) {
883 flush_dcache_page(page);
884 kunmap(page);
885 unlock_page(page);
886 if (prev_cur_page != xpage)
887 page_cache_release(page);
888 pages[prev_cur_page] = NULL;
889 }
890 }
891 }
892 }
893
894
895 for (i = 0; i < nr_bhs; i++)
896 brelse(bhs[i]);
897
898
899 if (nr_cbs)
900 goto do_next_cb;
901
902
903 kfree(bhs);
904
905
906 for (cur_page = 0; cur_page < max_page; cur_page++) {
907 page = pages[cur_page];
908 if (page) {
909 ntfs_error(vol->sb, "Still have pages left! "
910 "Terminating them with extreme "
911 "prejudice. Inode 0x%lx, page index "
912 "0x%lx.", ni->mft_no, page->index);
913 flush_dcache_page(page);
914 kunmap(page);
915 unlock_page(page);
916 if (cur_page != xpage)
917 page_cache_release(page);
918 pages[cur_page] = NULL;
919 }
920 }
921
922
923 kfree(pages);
924
925
926 if (likely(xpage_done))
927 return 0;
928
929 ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
930 "EOVERFLOW" : (!err ? "EIO" : "unknown error"));
931 return err < 0 ? err : -EIO;
932
933read_err:
934 ntfs_error(vol->sb, "IO error while reading compressed data.");
935
936 for (i = 0; i < nr_bhs; i++)
937 brelse(bhs[i]);
938 goto err_out;
939
940map_rl_err:
941 ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
942 "compression block.");
943 goto err_out;
944
945rl_err:
946 up_read(&ni->runlist.lock);
947 ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
948 "compression block.");
949 goto err_out;
950
951getblk_err:
952 up_read(&ni->runlist.lock);
953 ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
954
955err_out:
956 kfree(bhs);
957 for (i = cur_page; i < max_page; i++) {
958 page = pages[i];
959 if (page) {
960 flush_dcache_page(page);
961 kunmap(page);
962 unlock_page(page);
963 if (i != xpage)
964 page_cache_release(page);
965 }
966 }
967 kfree(pages);
968 return -EIO;
969}
970