1
2
3
4
5
6
7
8
9
10#include <linux/fs.h>
11#include <linux/buffer_head.h>
12#include <linux/blkdev.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15
16#include "attrib.h"
17#include "inode.h"
18#include "debug.h"
19#include "ntfs.h"
20
21
22
23
24typedef enum {
25
26 NTFS_SYMBOL_TOKEN = 0,
27 NTFS_PHRASE_TOKEN = 1,
28 NTFS_TOKEN_MASK = 1,
29
30
31 NTFS_SB_SIZE_MASK = 0x0fff,
32 NTFS_SB_SIZE = 0x1000,
33 NTFS_SB_IS_COMPRESSED = 0x8000,
34
35
36
37
38
39
40
41 NTFS_MAX_CB_SIZE = 64 * 1024,
42} ntfs_compression_constants;
43
44
45
46
47static u8 *ntfs_compression_buffer;
48
49
50
51
52static DEFINE_SPINLOCK(ntfs_cb_lock);
53
54
55
56
57
58
59
60
61int allocate_compression_buffers(void)
62{
63 BUG_ON(ntfs_compression_buffer);
64
65 ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
66 if (!ntfs_compression_buffer)
67 return -ENOMEM;
68 return 0;
69}
70
71
72
73
74
75
76void free_compression_buffers(void)
77{
78 BUG_ON(!ntfs_compression_buffer);
79 vfree(ntfs_compression_buffer);
80 ntfs_compression_buffer = NULL;
81}
82
83
84
85
86static void zero_partial_compressed_page(struct page *page,
87 const s64 initialized_size)
88{
89 u8 *kp = page_address(page);
90 unsigned int kp_ofs;
91
92 ntfs_debug("Zeroing page region outside initialized size.");
93 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
94 clear_page(kp);
95 return;
96 }
97 kp_ofs = initialized_size & ~PAGE_MASK;
98 memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
99 return;
100}
101
102
103
104
105static inline void handle_bounds_compressed_page(struct page *page,
106 const loff_t i_size, const s64 initialized_size)
107{
108 if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
109 (initialized_size < i_size))
110 zero_partial_compressed_page(page, initialized_size);
111 return;
112}
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152static int ntfs_decompress(struct page *dest_pages[], int completed_pages[],
153 int *dest_index, int *dest_ofs, const int dest_max_index,
154 const int dest_max_ofs, const int xpage, char *xpage_done,
155 u8 *const cb_start, const u32 cb_size, const loff_t i_size,
156 const s64 initialized_size)
157{
158
159
160
161
162 u8 *cb_end = cb_start + cb_size;
163 u8 *cb = cb_start;
164 u8 *cb_sb_start = cb;
165 u8 *cb_sb_end;
166
167
168 struct page *dp;
169 u8 *dp_addr;
170 u8 *dp_sb_start;
171 u8 *dp_sb_end;
172
173 u16 do_sb_start;
174 u16 do_sb_end;
175
176
177
178 u8 tag;
179 int token;
180 int nr_completed_pages = 0;
181
182
183 int err = -EOVERFLOW;
184
185 ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
186do_next_sb:
187 ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
188 cb - cb_start);
189
190
191
192
193
194
195 if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
196 (*dest_index == dest_max_index &&
197 *dest_ofs == dest_max_ofs)) {
198 int i;
199
200 ntfs_debug("Completed. Returning success (0).");
201 err = 0;
202return_error:
203
204 spin_unlock(&ntfs_cb_lock);
205
206 if (nr_completed_pages > 0) {
207 for (i = 0; i < nr_completed_pages; i++) {
208 int di = completed_pages[i];
209
210 dp = dest_pages[di];
211
212
213
214
215 handle_bounds_compressed_page(dp, i_size,
216 initialized_size);
217 flush_dcache_page(dp);
218 kunmap(dp);
219 SetPageUptodate(dp);
220 unlock_page(dp);
221 if (di == xpage)
222 *xpage_done = 1;
223 else
224 put_page(dp);
225 dest_pages[di] = NULL;
226 }
227 }
228 return err;
229 }
230
231
232 do_sb_start = *dest_ofs;
233 do_sb_end = do_sb_start + NTFS_SB_SIZE;
234
235
236 if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
237 goto return_overflow;
238
239
240 if (cb + 6 > cb_end)
241 goto return_overflow;
242
243
244 cb_sb_start = cb;
245 cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
246 + 3;
247 if (cb_sb_end > cb_end)
248 goto return_overflow;
249
250
251 dp = dest_pages[*dest_index];
252 if (!dp) {
253
254 cb = cb_sb_end;
255
256
257 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
258 if (!*dest_ofs && (++*dest_index > dest_max_index))
259 goto return_overflow;
260 goto do_next_sb;
261 }
262
263
264 dp_addr = (u8*)page_address(dp) + do_sb_start;
265
266
267 if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
268 ntfs_debug("Found uncompressed sub-block.");
269
270
271
272 cb += 2;
273
274
275 if (cb_sb_end - cb != NTFS_SB_SIZE)
276 goto return_overflow;
277
278
279 memcpy(dp_addr, cb, NTFS_SB_SIZE);
280 cb += NTFS_SB_SIZE;
281
282
283 *dest_ofs += NTFS_SB_SIZE;
284 if (!(*dest_ofs &= ~PAGE_MASK)) {
285finalize_page:
286
287
288
289
290 completed_pages[nr_completed_pages++] = *dest_index;
291 if (++*dest_index > dest_max_index)
292 goto return_overflow;
293 }
294 goto do_next_sb;
295 }
296 ntfs_debug("Found compressed sub-block.");
297
298
299
300 dp_sb_start = dp_addr;
301 dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
302
303
304 cb += 2;
305do_next_tag:
306 if (cb == cb_sb_end) {
307
308 if (dp_addr < dp_sb_end) {
309 int nr_bytes = do_sb_end - *dest_ofs;
310
311 ntfs_debug("Filling incomplete sub-block with "
312 "zeroes.");
313
314 memset(dp_addr, 0, nr_bytes);
315 *dest_ofs += nr_bytes;
316 }
317
318 if (!(*dest_ofs &= ~PAGE_MASK))
319 goto finalize_page;
320 goto do_next_sb;
321 }
322
323
324 if (cb > cb_sb_end || dp_addr > dp_sb_end)
325 goto return_overflow;
326
327
328 tag = *cb++;
329
330
331 for (token = 0; token < 8; token++, tag >>= 1) {
332 u16 lg, pt, length, max_non_overlap;
333 register u16 i;
334 u8 *dp_back_addr;
335
336
337 if (cb >= cb_sb_end || dp_addr > dp_sb_end)
338 break;
339
340
341 if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
342
343
344
345
346 *dp_addr++ = *cb++;
347 ++*dest_ofs;
348
349
350 continue;
351 }
352
353
354
355
356
357 if (dp_addr == dp_sb_start)
358 goto return_overflow;
359
360
361
362
363
364
365
366
367 lg = 0;
368 for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
369 lg++;
370
371
372 pt = le16_to_cpup((le16*)cb);
373
374
375
376
377
378
379 dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
380 if (dp_back_addr < dp_sb_start)
381 goto return_overflow;
382
383
384 length = (pt & (0xfff >> lg)) + 3;
385
386
387 *dest_ofs += length;
388 if (*dest_ofs > do_sb_end)
389 goto return_overflow;
390
391
392 max_non_overlap = dp_addr - dp_back_addr;
393
394 if (length <= max_non_overlap) {
395
396 memcpy(dp_addr, dp_back_addr, length);
397
398
399 dp_addr += length;
400 } else {
401
402
403
404
405
406
407 memcpy(dp_addr, dp_back_addr, max_non_overlap);
408 dp_addr += max_non_overlap;
409 dp_back_addr += max_non_overlap;
410 length -= max_non_overlap;
411 while (length--)
412 *dp_addr++ = *dp_back_addr++;
413 }
414
415
416 cb += 2;
417 }
418
419
420 goto do_next_tag;
421
422return_overflow:
423 ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
424 goto return_error;
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462int ntfs_read_compressed_block(struct page *page)
463{
464 loff_t i_size;
465 s64 initialized_size;
466 struct address_space *mapping = page->mapping;
467 ntfs_inode *ni = NTFS_I(mapping->host);
468 ntfs_volume *vol = ni->vol;
469 struct super_block *sb = vol->sb;
470 runlist_element *rl;
471 unsigned long flags, block_size = sb->s_blocksize;
472 unsigned char block_size_bits = sb->s_blocksize_bits;
473 u8 *cb, *cb_pos, *cb_end;
474 struct buffer_head **bhs;
475 unsigned long offset, index = page->index;
476 u32 cb_size = ni->itype.compressed.block_size;
477 u64 cb_size_mask = cb_size - 1UL;
478 VCN vcn;
479 LCN lcn;
480
481 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
482 vol->cluster_size_bits;
483
484
485
486
487 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
488 & ~cb_size_mask) >> vol->cluster_size_bits;
489
490 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
491 >> ni->itype.compressed.block_size_bits;
492
493
494
495
496
497 unsigned int nr_pages = (end_vcn - start_vcn) <<
498 vol->cluster_size_bits >> PAGE_SHIFT;
499 unsigned int xpage, max_page, cur_page, cur_ofs, i;
500 unsigned int cb_clusters, cb_max_ofs;
501 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
502 struct page **pages;
503 int *completed_pages;
504 unsigned char xpage_done = 0;
505
506 ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
507 "%i.", index, cb_size, nr_pages);
508
509
510
511
512 BUG_ON(ni->type != AT_DATA);
513 BUG_ON(ni->name_len);
514
515 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
516 completed_pages = kmalloc_array(nr_pages + 1, sizeof(int), GFP_NOFS);
517
518
519 bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
520 bhs = kmalloc(bhs_size, GFP_NOFS);
521
522 if (unlikely(!pages || !bhs || !completed_pages)) {
523 kfree(bhs);
524 kfree(pages);
525 kfree(completed_pages);
526 unlock_page(page);
527 ntfs_error(vol->sb, "Failed to allocate internal buffers.");
528 return -ENOMEM;
529 }
530
531
532
533
534
535 offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
536 xpage = index - offset;
537 pages[xpage] = page;
538
539
540
541
542 read_lock_irqsave(&ni->size_lock, flags);
543 i_size = i_size_read(VFS_I(ni));
544 initialized_size = ni->initialized_size;
545 read_unlock_irqrestore(&ni->size_lock, flags);
546 max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
547 offset;
548
549 if (xpage >= max_page) {
550 kfree(bhs);
551 kfree(pages);
552 kfree(completed_pages);
553 zero_user(page, 0, PAGE_SIZE);
554 ntfs_debug("Compressed read outside i_size - truncated?");
555 SetPageUptodate(page);
556 unlock_page(page);
557 return 0;
558 }
559 if (nr_pages < max_page)
560 max_page = nr_pages;
561 for (i = 0; i < max_page; i++, offset++) {
562 if (i != xpage)
563 pages[i] = grab_cache_page_nowait(mapping, offset);
564 page = pages[i];
565 if (page) {
566
567
568
569
570
571 if (!PageDirty(page) && (!PageUptodate(page) ||
572 PageError(page))) {
573 ClearPageError(page);
574 kmap(page);
575 continue;
576 }
577 unlock_page(page);
578 put_page(page);
579 pages[i] = NULL;
580 }
581 }
582
583
584
585
586
587 cur_page = 0;
588 cur_ofs = 0;
589 cb_clusters = ni->itype.compressed.block_clusters;
590do_next_cb:
591 nr_cbs--;
592 nr_bhs = 0;
593
594
595 rl = NULL;
596 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
597 vcn++) {
598 bool is_retry = false;
599
600 if (!rl) {
601lock_retry_remap:
602 down_read(&ni->runlist.lock);
603 rl = ni->runlist.rl;
604 }
605 if (likely(rl != NULL)) {
606
607 while (rl->length && rl[1].vcn <= vcn)
608 rl++;
609 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
610 } else
611 lcn = LCN_RL_NOT_MAPPED;
612 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
613 (unsigned long long)vcn,
614 (unsigned long long)lcn);
615 if (lcn < 0) {
616
617
618
619
620 if (lcn == LCN_HOLE)
621 break;
622 if (is_retry || lcn != LCN_RL_NOT_MAPPED)
623 goto rl_err;
624 is_retry = true;
625
626
627
628
629 up_read(&ni->runlist.lock);
630 if (!ntfs_map_runlist(ni, vcn))
631 goto lock_retry_remap;
632 goto map_rl_err;
633 }
634 block = lcn << vol->cluster_size_bits >> block_size_bits;
635
636 max_block = block + (vol->cluster_size >> block_size_bits);
637 do {
638 ntfs_debug("block = 0x%x.", block);
639 if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
640 goto getblk_err;
641 nr_bhs++;
642 } while (++block < max_block);
643 }
644
645
646 if (rl)
647 up_read(&ni->runlist.lock);
648
649
650 for (i = 0; i < nr_bhs; i++) {
651 struct buffer_head *tbh = bhs[i];
652
653 if (!trylock_buffer(tbh))
654 continue;
655 if (unlikely(buffer_uptodate(tbh))) {
656 unlock_buffer(tbh);
657 continue;
658 }
659 get_bh(tbh);
660 tbh->b_end_io = end_buffer_read_sync;
661 submit_bh(REQ_OP_READ, 0, tbh);
662 }
663
664
665 for (i = 0; i < nr_bhs; i++) {
666 struct buffer_head *tbh = bhs[i];
667
668 if (buffer_uptodate(tbh))
669 continue;
670 wait_on_buffer(tbh);
671
672
673
674
675
676
677
678
679 barrier();
680 if (unlikely(!buffer_uptodate(tbh))) {
681 ntfs_warning(vol->sb, "Buffer is unlocked but not "
682 "uptodate! Unplugging the disk queue "
683 "and rescheduling.");
684 get_bh(tbh);
685 io_schedule();
686 put_bh(tbh);
687 if (unlikely(!buffer_uptodate(tbh)))
688 goto read_err;
689 ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
690 }
691 }
692
693
694
695
696
697 spin_lock(&ntfs_cb_lock);
698 cb = ntfs_compression_buffer;
699
700 BUG_ON(!cb);
701
702 cb_pos = cb;
703 cb_end = cb + cb_size;
704
705
706 for (i = 0; i < nr_bhs; i++) {
707 memcpy(cb_pos, bhs[i]->b_data, block_size);
708 cb_pos += block_size;
709 }
710
711
712 if (cb_pos + 2 <= cb + cb_size)
713 *(u16*)cb_pos = 0;
714
715
716 cb_pos = cb;
717
718
719 ntfs_debug("Successfully read the compression block.");
720
721
722 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
723 cb_max_ofs = cb_max_page & ~PAGE_MASK;
724 cb_max_page >>= PAGE_SHIFT;
725
726
727 if (cb_max_page > max_page)
728 cb_max_page = max_page;
729
730 if (vcn == start_vcn - cb_clusters) {
731
732 ntfs_debug("Found sparse compression block.");
733
734 spin_unlock(&ntfs_cb_lock);
735 if (cb_max_ofs)
736 cb_max_page--;
737 for (; cur_page < cb_max_page; cur_page++) {
738 page = pages[cur_page];
739 if (page) {
740 if (likely(!cur_ofs))
741 clear_page(page_address(page));
742 else
743 memset(page_address(page) + cur_ofs, 0,
744 PAGE_SIZE -
745 cur_ofs);
746 flush_dcache_page(page);
747 kunmap(page);
748 SetPageUptodate(page);
749 unlock_page(page);
750 if (cur_page == xpage)
751 xpage_done = 1;
752 else
753 put_page(page);
754 pages[cur_page] = NULL;
755 }
756 cb_pos += PAGE_SIZE - cur_ofs;
757 cur_ofs = 0;
758 if (cb_pos >= cb_end)
759 break;
760 }
761
762 if (cb_max_ofs && cb_pos < cb_end) {
763 page = pages[cur_page];
764 if (page)
765 memset(page_address(page) + cur_ofs, 0,
766 cb_max_ofs - cur_ofs);
767
768
769
770
771 cur_ofs = cb_max_ofs;
772 }
773 } else if (vcn == start_vcn) {
774
775 unsigned int cur2_page = cur_page;
776 unsigned int cur_ofs2 = cur_ofs;
777 u8 *cb_pos2 = cb_pos;
778
779 ntfs_debug("Found uncompressed compression block.");
780
781
782
783
784
785
786
787
788
789
790
791 if (cb_max_ofs)
792 cb_max_page--;
793
794 for (; cur_page < cb_max_page; cur_page++) {
795 page = pages[cur_page];
796 if (page)
797 memcpy(page_address(page) + cur_ofs, cb_pos,
798 PAGE_SIZE - cur_ofs);
799 cb_pos += PAGE_SIZE - cur_ofs;
800 cur_ofs = 0;
801 if (cb_pos >= cb_end)
802 break;
803 }
804
805 if (cb_max_ofs && cb_pos < cb_end) {
806 page = pages[cur_page];
807 if (page)
808 memcpy(page_address(page) + cur_ofs, cb_pos,
809 cb_max_ofs - cur_ofs);
810 cb_pos += cb_max_ofs - cur_ofs;
811 cur_ofs = cb_max_ofs;
812 }
813
814 spin_unlock(&ntfs_cb_lock);
815
816 for (; cur2_page < cb_max_page; cur2_page++) {
817 page = pages[cur2_page];
818 if (page) {
819
820
821
822
823 handle_bounds_compressed_page(page, i_size,
824 initialized_size);
825 flush_dcache_page(page);
826 kunmap(page);
827 SetPageUptodate(page);
828 unlock_page(page);
829 if (cur2_page == xpage)
830 xpage_done = 1;
831 else
832 put_page(page);
833 pages[cur2_page] = NULL;
834 }
835 cb_pos2 += PAGE_SIZE - cur_ofs2;
836 cur_ofs2 = 0;
837 if (cb_pos2 >= cb_end)
838 break;
839 }
840 } else {
841
842 unsigned int prev_cur_page = cur_page;
843
844 ntfs_debug("Found compressed compression block.");
845 err = ntfs_decompress(pages, completed_pages, &cur_page,
846 &cur_ofs, cb_max_page, cb_max_ofs, xpage,
847 &xpage_done, cb_pos, cb_size - (cb_pos - cb),
848 i_size, initialized_size);
849
850
851
852
853 if (err) {
854 ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
855 "0x%lx with error code %i. Skipping "
856 "this compression block.",
857 ni->mft_no, -err);
858
859 for (; prev_cur_page < cur_page; prev_cur_page++) {
860 page = pages[prev_cur_page];
861 if (page) {
862 flush_dcache_page(page);
863 kunmap(page);
864 unlock_page(page);
865 if (prev_cur_page != xpage)
866 put_page(page);
867 pages[prev_cur_page] = NULL;
868 }
869 }
870 }
871 }
872
873
874 for (i = 0; i < nr_bhs; i++)
875 brelse(bhs[i]);
876
877
878 if (nr_cbs)
879 goto do_next_cb;
880
881
882 kfree(bhs);
883
884
885 for (cur_page = 0; cur_page < max_page; cur_page++) {
886 page = pages[cur_page];
887 if (page) {
888 ntfs_error(vol->sb, "Still have pages left! "
889 "Terminating them with extreme "
890 "prejudice. Inode 0x%lx, page index "
891 "0x%lx.", ni->mft_no, page->index);
892 flush_dcache_page(page);
893 kunmap(page);
894 unlock_page(page);
895 if (cur_page != xpage)
896 put_page(page);
897 pages[cur_page] = NULL;
898 }
899 }
900
901
902 kfree(pages);
903 kfree(completed_pages);
904
905
906 if (likely(xpage_done))
907 return 0;
908
909 ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
910 "EOVERFLOW" : (!err ? "EIO" : "unknown error"));
911 return err < 0 ? err : -EIO;
912
913read_err:
914 ntfs_error(vol->sb, "IO error while reading compressed data.");
915
916 for (i = 0; i < nr_bhs; i++)
917 brelse(bhs[i]);
918 goto err_out;
919
920map_rl_err:
921 ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
922 "compression block.");
923 goto err_out;
924
925rl_err:
926 up_read(&ni->runlist.lock);
927 ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
928 "compression block.");
929 goto err_out;
930
931getblk_err:
932 up_read(&ni->runlist.lock);
933 ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
934
935err_out:
936 kfree(bhs);
937 for (i = cur_page; i < max_page; i++) {
938 page = pages[i];
939 if (page) {
940 flush_dcache_page(page);
941 kunmap(page);
942 unlock_page(page);
943 if (i != xpage)
944 put_page(page);
945 }
946 }
947 kfree(pages);
948 kfree(completed_pages);
949 return -EIO;
950}
951