1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/fs.h>
25#include <linux/buffer_head.h>
26#include <linux/blkdev.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29
30#include "attrib.h"
31#include "inode.h"
32#include "debug.h"
33#include "ntfs.h"
34
35
36
37
38typedef enum {
39
40 NTFS_SYMBOL_TOKEN = 0,
41 NTFS_PHRASE_TOKEN = 1,
42 NTFS_TOKEN_MASK = 1,
43
44
45 NTFS_SB_SIZE_MASK = 0x0fff,
46 NTFS_SB_SIZE = 0x1000,
47 NTFS_SB_IS_COMPRESSED = 0x8000,
48
49
50
51
52
53
54
55 NTFS_MAX_CB_SIZE = 64 * 1024,
56} ntfs_compression_constants;
57
58
59
60
61static u8 *ntfs_compression_buffer;
62
63
64
65
66static DEFINE_SPINLOCK(ntfs_cb_lock);
67
68
69
70
71
72
73
74
75int allocate_compression_buffers(void)
76{
77 BUG_ON(ntfs_compression_buffer);
78
79 ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
80 if (!ntfs_compression_buffer)
81 return -ENOMEM;
82 return 0;
83}
84
85
86
87
88
89
90void free_compression_buffers(void)
91{
92 BUG_ON(!ntfs_compression_buffer);
93 vfree(ntfs_compression_buffer);
94 ntfs_compression_buffer = NULL;
95}
96
97
98
99
100static void zero_partial_compressed_page(struct page *page,
101 const s64 initialized_size)
102{
103 u8 *kp = page_address(page);
104 unsigned int kp_ofs;
105
106 ntfs_debug("Zeroing page region outside initialized size.");
107 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
108 clear_page(kp);
109 return;
110 }
111 kp_ofs = initialized_size & ~PAGE_MASK;
112 memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
113 return;
114}
115
116
117
118
119static inline void handle_bounds_compressed_page(struct page *page,
120 const loff_t i_size, const s64 initialized_size)
121{
122 if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
123 (initialized_size < i_size))
124 zero_partial_compressed_page(page, initialized_size);
125 return;
126}
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166static int ntfs_decompress(struct page *dest_pages[], int completed_pages[],
167 int *dest_index, int *dest_ofs, const int dest_max_index,
168 const int dest_max_ofs, const int xpage, char *xpage_done,
169 u8 *const cb_start, const u32 cb_size, const loff_t i_size,
170 const s64 initialized_size)
171{
172
173
174
175
176 u8 *cb_end = cb_start + cb_size;
177 u8 *cb = cb_start;
178 u8 *cb_sb_start = cb;
179 u8 *cb_sb_end;
180
181
182 struct page *dp;
183 u8 *dp_addr;
184 u8 *dp_sb_start;
185 u8 *dp_sb_end;
186
187 u16 do_sb_start;
188 u16 do_sb_end;
189
190
191
192 u8 tag;
193 int token;
194 int nr_completed_pages = 0;
195
196
197 int err = -EOVERFLOW;
198
199 ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
200do_next_sb:
201 ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
202 cb - cb_start);
203
204
205
206
207
208
209 if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
210 (*dest_index == dest_max_index &&
211 *dest_ofs == dest_max_ofs)) {
212 int i;
213
214 ntfs_debug("Completed. Returning success (0).");
215 err = 0;
216return_error:
217
218 spin_unlock(&ntfs_cb_lock);
219
220 if (nr_completed_pages > 0) {
221 for (i = 0; i < nr_completed_pages; i++) {
222 int di = completed_pages[i];
223
224 dp = dest_pages[di];
225
226
227
228
229 handle_bounds_compressed_page(dp, i_size,
230 initialized_size);
231 flush_dcache_page(dp);
232 kunmap(dp);
233 SetPageUptodate(dp);
234 unlock_page(dp);
235 if (di == xpage)
236 *xpage_done = 1;
237 else
238 put_page(dp);
239 dest_pages[di] = NULL;
240 }
241 }
242 return err;
243 }
244
245
246 do_sb_start = *dest_ofs;
247 do_sb_end = do_sb_start + NTFS_SB_SIZE;
248
249
250 if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
251 goto return_overflow;
252
253
254 if (cb + 6 > cb_end)
255 goto return_overflow;
256
257
258 cb_sb_start = cb;
259 cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
260 + 3;
261 if (cb_sb_end > cb_end)
262 goto return_overflow;
263
264
265 dp = dest_pages[*dest_index];
266 if (!dp) {
267
268 cb = cb_sb_end;
269
270
271 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
272 if (!*dest_ofs && (++*dest_index > dest_max_index))
273 goto return_overflow;
274 goto do_next_sb;
275 }
276
277
278 dp_addr = (u8*)page_address(dp) + do_sb_start;
279
280
281 if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
282 ntfs_debug("Found uncompressed sub-block.");
283
284
285
286 cb += 2;
287
288
289 if (cb_sb_end - cb != NTFS_SB_SIZE)
290 goto return_overflow;
291
292
293 memcpy(dp_addr, cb, NTFS_SB_SIZE);
294 cb += NTFS_SB_SIZE;
295
296
297 *dest_ofs += NTFS_SB_SIZE;
298 if (!(*dest_ofs &= ~PAGE_MASK)) {
299finalize_page:
300
301
302
303
304 completed_pages[nr_completed_pages++] = *dest_index;
305 if (++*dest_index > dest_max_index)
306 goto return_overflow;
307 }
308 goto do_next_sb;
309 }
310 ntfs_debug("Found compressed sub-block.");
311
312
313
314 dp_sb_start = dp_addr;
315 dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
316
317
318 cb += 2;
319do_next_tag:
320 if (cb == cb_sb_end) {
321
322 if (dp_addr < dp_sb_end) {
323 int nr_bytes = do_sb_end - *dest_ofs;
324
325 ntfs_debug("Filling incomplete sub-block with "
326 "zeroes.");
327
328 memset(dp_addr, 0, nr_bytes);
329 *dest_ofs += nr_bytes;
330 }
331
332 if (!(*dest_ofs &= ~PAGE_MASK))
333 goto finalize_page;
334 goto do_next_sb;
335 }
336
337
338 if (cb > cb_sb_end || dp_addr > dp_sb_end)
339 goto return_overflow;
340
341
342 tag = *cb++;
343
344
345 for (token = 0; token < 8; token++, tag >>= 1) {
346 u16 lg, pt, length, max_non_overlap;
347 register u16 i;
348 u8 *dp_back_addr;
349
350
351 if (cb >= cb_sb_end || dp_addr > dp_sb_end)
352 break;
353
354
355 if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
356
357
358
359
360 *dp_addr++ = *cb++;
361 ++*dest_ofs;
362
363
364 continue;
365 }
366
367
368
369
370
371 if (dp_addr == dp_sb_start)
372 goto return_overflow;
373
374
375
376
377
378
379
380
381 lg = 0;
382 for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
383 lg++;
384
385
386 pt = le16_to_cpup((le16*)cb);
387
388
389
390
391
392
393 dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
394 if (dp_back_addr < dp_sb_start)
395 goto return_overflow;
396
397
398 length = (pt & (0xfff >> lg)) + 3;
399
400
401 *dest_ofs += length;
402 if (*dest_ofs > do_sb_end)
403 goto return_overflow;
404
405
406 max_non_overlap = dp_addr - dp_back_addr;
407
408 if (length <= max_non_overlap) {
409
410 memcpy(dp_addr, dp_back_addr, length);
411
412
413 dp_addr += length;
414 } else {
415
416
417
418
419
420
421 memcpy(dp_addr, dp_back_addr, max_non_overlap);
422 dp_addr += max_non_overlap;
423 dp_back_addr += max_non_overlap;
424 length -= max_non_overlap;
425 while (length--)
426 *dp_addr++ = *dp_back_addr++;
427 }
428
429
430 cb += 2;
431 }
432
433
434 goto do_next_tag;
435
436return_overflow:
437 ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
438 goto return_error;
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476int ntfs_read_compressed_block(struct page *page)
477{
478 loff_t i_size;
479 s64 initialized_size;
480 struct address_space *mapping = page->mapping;
481 ntfs_inode *ni = NTFS_I(mapping->host);
482 ntfs_volume *vol = ni->vol;
483 struct super_block *sb = vol->sb;
484 runlist_element *rl;
485 unsigned long flags, block_size = sb->s_blocksize;
486 unsigned char block_size_bits = sb->s_blocksize_bits;
487 u8 *cb, *cb_pos, *cb_end;
488 struct buffer_head **bhs;
489 unsigned long offset, index = page->index;
490 u32 cb_size = ni->itype.compressed.block_size;
491 u64 cb_size_mask = cb_size - 1UL;
492 VCN vcn;
493 LCN lcn;
494
495 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
496 vol->cluster_size_bits;
497
498
499
500
501 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
502 & ~cb_size_mask) >> vol->cluster_size_bits;
503
504 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
505 >> ni->itype.compressed.block_size_bits;
506
507
508
509
510
511 unsigned int nr_pages = (end_vcn - start_vcn) <<
512 vol->cluster_size_bits >> PAGE_SHIFT;
513 unsigned int xpage, max_page, cur_page, cur_ofs, i;
514 unsigned int cb_clusters, cb_max_ofs;
515 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
516 struct page **pages;
517 int *completed_pages;
518 unsigned char xpage_done = 0;
519
520 ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
521 "%i.", index, cb_size, nr_pages);
522
523
524
525
526 BUG_ON(ni->type != AT_DATA);
527 BUG_ON(ni->name_len);
528
529 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
530 completed_pages = kmalloc_array(nr_pages + 1, sizeof(int), GFP_NOFS);
531
532
533 bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
534 bhs = kmalloc(bhs_size, GFP_NOFS);
535
536 if (unlikely(!pages || !bhs || !completed_pages)) {
537 kfree(bhs);
538 kfree(pages);
539 kfree(completed_pages);
540 unlock_page(page);
541 ntfs_error(vol->sb, "Failed to allocate internal buffers.");
542 return -ENOMEM;
543 }
544
545
546
547
548
549 offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
550 xpage = index - offset;
551 pages[xpage] = page;
552
553
554
555
556 read_lock_irqsave(&ni->size_lock, flags);
557 i_size = i_size_read(VFS_I(ni));
558 initialized_size = ni->initialized_size;
559 read_unlock_irqrestore(&ni->size_lock, flags);
560 max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
561 offset;
562
563 if (xpage >= max_page) {
564 kfree(bhs);
565 kfree(pages);
566 kfree(completed_pages);
567 zero_user(page, 0, PAGE_SIZE);
568 ntfs_debug("Compressed read outside i_size - truncated?");
569 SetPageUptodate(page);
570 unlock_page(page);
571 return 0;
572 }
573 if (nr_pages < max_page)
574 max_page = nr_pages;
575 for (i = 0; i < max_page; i++, offset++) {
576 if (i != xpage)
577 pages[i] = grab_cache_page_nowait(mapping, offset);
578 page = pages[i];
579 if (page) {
580
581
582
583
584
585 if (!PageDirty(page) && (!PageUptodate(page) ||
586 PageError(page))) {
587 ClearPageError(page);
588 kmap(page);
589 continue;
590 }
591 unlock_page(page);
592 put_page(page);
593 pages[i] = NULL;
594 }
595 }
596
597
598
599
600
601 cur_page = 0;
602 cur_ofs = 0;
603 cb_clusters = ni->itype.compressed.block_clusters;
604do_next_cb:
605 nr_cbs--;
606 nr_bhs = 0;
607
608
609 rl = NULL;
610 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
611 vcn++) {
612 bool is_retry = false;
613
614 if (!rl) {
615lock_retry_remap:
616 down_read(&ni->runlist.lock);
617 rl = ni->runlist.rl;
618 }
619 if (likely(rl != NULL)) {
620
621 while (rl->length && rl[1].vcn <= vcn)
622 rl++;
623 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
624 } else
625 lcn = LCN_RL_NOT_MAPPED;
626 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
627 (unsigned long long)vcn,
628 (unsigned long long)lcn);
629 if (lcn < 0) {
630
631
632
633
634 if (lcn == LCN_HOLE)
635 break;
636 if (is_retry || lcn != LCN_RL_NOT_MAPPED)
637 goto rl_err;
638 is_retry = true;
639
640
641
642
643 up_read(&ni->runlist.lock);
644 if (!ntfs_map_runlist(ni, vcn))
645 goto lock_retry_remap;
646 goto map_rl_err;
647 }
648 block = lcn << vol->cluster_size_bits >> block_size_bits;
649
650 max_block = block + (vol->cluster_size >> block_size_bits);
651 do {
652 ntfs_debug("block = 0x%x.", block);
653 if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
654 goto getblk_err;
655 nr_bhs++;
656 } while (++block < max_block);
657 }
658
659
660 if (rl)
661 up_read(&ni->runlist.lock);
662
663
664 for (i = 0; i < nr_bhs; i++) {
665 struct buffer_head *tbh = bhs[i];
666
667 if (!trylock_buffer(tbh))
668 continue;
669 if (unlikely(buffer_uptodate(tbh))) {
670 unlock_buffer(tbh);
671 continue;
672 }
673 get_bh(tbh);
674 tbh->b_end_io = end_buffer_read_sync;
675 submit_bh(REQ_OP_READ, 0, tbh);
676 }
677
678
679 for (i = 0; i < nr_bhs; i++) {
680 struct buffer_head *tbh = bhs[i];
681
682 if (buffer_uptodate(tbh))
683 continue;
684 wait_on_buffer(tbh);
685
686
687
688
689
690
691
692
693 barrier();
694 if (unlikely(!buffer_uptodate(tbh))) {
695 ntfs_warning(vol->sb, "Buffer is unlocked but not "
696 "uptodate! Unplugging the disk queue "
697 "and rescheduling.");
698 get_bh(tbh);
699 io_schedule();
700 put_bh(tbh);
701 if (unlikely(!buffer_uptodate(tbh)))
702 goto read_err;
703 ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
704 }
705 }
706
707
708
709
710
711 spin_lock(&ntfs_cb_lock);
712 cb = ntfs_compression_buffer;
713
714 BUG_ON(!cb);
715
716 cb_pos = cb;
717 cb_end = cb + cb_size;
718
719
720 for (i = 0; i < nr_bhs; i++) {
721 memcpy(cb_pos, bhs[i]->b_data, block_size);
722 cb_pos += block_size;
723 }
724
725
726 if (cb_pos + 2 <= cb + cb_size)
727 *(u16*)cb_pos = 0;
728
729
730 cb_pos = cb;
731
732
733 ntfs_debug("Successfully read the compression block.");
734
735
736 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
737 cb_max_ofs = cb_max_page & ~PAGE_MASK;
738 cb_max_page >>= PAGE_SHIFT;
739
740
741 if (cb_max_page > max_page)
742 cb_max_page = max_page;
743
744 if (vcn == start_vcn - cb_clusters) {
745
746 ntfs_debug("Found sparse compression block.");
747
748 spin_unlock(&ntfs_cb_lock);
749 if (cb_max_ofs)
750 cb_max_page--;
751 for (; cur_page < cb_max_page; cur_page++) {
752 page = pages[cur_page];
753 if (page) {
754 if (likely(!cur_ofs))
755 clear_page(page_address(page));
756 else
757 memset(page_address(page) + cur_ofs, 0,
758 PAGE_SIZE -
759 cur_ofs);
760 flush_dcache_page(page);
761 kunmap(page);
762 SetPageUptodate(page);
763 unlock_page(page);
764 if (cur_page == xpage)
765 xpage_done = 1;
766 else
767 put_page(page);
768 pages[cur_page] = NULL;
769 }
770 cb_pos += PAGE_SIZE - cur_ofs;
771 cur_ofs = 0;
772 if (cb_pos >= cb_end)
773 break;
774 }
775
776 if (cb_max_ofs && cb_pos < cb_end) {
777 page = pages[cur_page];
778 if (page)
779 memset(page_address(page) + cur_ofs, 0,
780 cb_max_ofs - cur_ofs);
781
782
783
784
785 cur_ofs = cb_max_ofs;
786 }
787 } else if (vcn == start_vcn) {
788
789 unsigned int cur2_page = cur_page;
790 unsigned int cur_ofs2 = cur_ofs;
791 u8 *cb_pos2 = cb_pos;
792
793 ntfs_debug("Found uncompressed compression block.");
794
795
796
797
798
799
800
801
802
803
804
805 if (cb_max_ofs)
806 cb_max_page--;
807
808 for (; cur_page < cb_max_page; cur_page++) {
809 page = pages[cur_page];
810 if (page)
811 memcpy(page_address(page) + cur_ofs, cb_pos,
812 PAGE_SIZE - cur_ofs);
813 cb_pos += PAGE_SIZE - cur_ofs;
814 cur_ofs = 0;
815 if (cb_pos >= cb_end)
816 break;
817 }
818
819 if (cb_max_ofs && cb_pos < cb_end) {
820 page = pages[cur_page];
821 if (page)
822 memcpy(page_address(page) + cur_ofs, cb_pos,
823 cb_max_ofs - cur_ofs);
824 cb_pos += cb_max_ofs - cur_ofs;
825 cur_ofs = cb_max_ofs;
826 }
827
828 spin_unlock(&ntfs_cb_lock);
829
830 for (; cur2_page < cb_max_page; cur2_page++) {
831 page = pages[cur2_page];
832 if (page) {
833
834
835
836
837 handle_bounds_compressed_page(page, i_size,
838 initialized_size);
839 flush_dcache_page(page);
840 kunmap(page);
841 SetPageUptodate(page);
842 unlock_page(page);
843 if (cur2_page == xpage)
844 xpage_done = 1;
845 else
846 put_page(page);
847 pages[cur2_page] = NULL;
848 }
849 cb_pos2 += PAGE_SIZE - cur_ofs2;
850 cur_ofs2 = 0;
851 if (cb_pos2 >= cb_end)
852 break;
853 }
854 } else {
855
856 unsigned int prev_cur_page = cur_page;
857
858 ntfs_debug("Found compressed compression block.");
859 err = ntfs_decompress(pages, completed_pages, &cur_page,
860 &cur_ofs, cb_max_page, cb_max_ofs, xpage,
861 &xpage_done, cb_pos, cb_size - (cb_pos - cb),
862 i_size, initialized_size);
863
864
865
866
867 if (err) {
868 ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
869 "0x%lx with error code %i. Skipping "
870 "this compression block.",
871 ni->mft_no, -err);
872
873 for (; prev_cur_page < cur_page; prev_cur_page++) {
874 page = pages[prev_cur_page];
875 if (page) {
876 flush_dcache_page(page);
877 kunmap(page);
878 unlock_page(page);
879 if (prev_cur_page != xpage)
880 put_page(page);
881 pages[prev_cur_page] = NULL;
882 }
883 }
884 }
885 }
886
887
888 for (i = 0; i < nr_bhs; i++)
889 brelse(bhs[i]);
890
891
892 if (nr_cbs)
893 goto do_next_cb;
894
895
896 kfree(bhs);
897
898
899 for (cur_page = 0; cur_page < max_page; cur_page++) {
900 page = pages[cur_page];
901 if (page) {
902 ntfs_error(vol->sb, "Still have pages left! "
903 "Terminating them with extreme "
904 "prejudice. Inode 0x%lx, page index "
905 "0x%lx.", ni->mft_no, page->index);
906 flush_dcache_page(page);
907 kunmap(page);
908 unlock_page(page);
909 if (cur_page != xpage)
910 put_page(page);
911 pages[cur_page] = NULL;
912 }
913 }
914
915
916 kfree(pages);
917 kfree(completed_pages);
918
919
920 if (likely(xpage_done))
921 return 0;
922
923 ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
924 "EOVERFLOW" : (!err ? "EIO" : "unknown error"));
925 return err < 0 ? err : -EIO;
926
927read_err:
928 ntfs_error(vol->sb, "IO error while reading compressed data.");
929
930 for (i = 0; i < nr_bhs; i++)
931 brelse(bhs[i]);
932 goto err_out;
933
934map_rl_err:
935 ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
936 "compression block.");
937 goto err_out;
938
939rl_err:
940 up_read(&ni->runlist.lock);
941 ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
942 "compression block.");
943 goto err_out;
944
945getblk_err:
946 up_read(&ni->runlist.lock);
947 ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
948
949err_out:
950 kfree(bhs);
951 for (i = cur_page; i < max_page; i++) {
952 page = pages[i];
953 if (page) {
954 flush_dcache_page(page);
955 kunmap(page);
956 unlock_page(page);
957 if (i != xpage)
958 put_page(page);
959 }
960 }
961 kfree(pages);
962 kfree(completed_pages);
963 return -EIO;
964}
965