1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * compress.c - NTFS kernel compressed attributes handling. |
4 | * Part of the Linux-NTFS project. |
5 | * |
6 | * Copyright (c) 2001-2004 Anton Altaparmakov |
7 | * Copyright (c) 2002 Richard Russon |
8 | */ |
9 | |
10 | #include <linux/fs.h> |
11 | #include <linux/buffer_head.h> |
12 | #include <linux/blkdev.h> |
13 | #include <linux/vmalloc.h> |
14 | #include <linux/slab.h> |
15 | |
16 | #include "attrib.h" |
17 | #include "inode.h" |
18 | #include "debug.h" |
19 | #include "ntfs.h" |
20 | |
21 | /** |
22 | * ntfs_compression_constants - enum of constants used in the compression code |
23 | */ |
24 | typedef enum { |
25 | /* Token types and access mask. */ |
26 | NTFS_SYMBOL_TOKEN = 0, |
27 | NTFS_PHRASE_TOKEN = 1, |
28 | NTFS_TOKEN_MASK = 1, |
29 | |
30 | /* Compression sub-block constants. */ |
31 | NTFS_SB_SIZE_MASK = 0x0fff, |
32 | NTFS_SB_SIZE = 0x1000, |
33 | NTFS_SB_IS_COMPRESSED = 0x8000, |
34 | |
35 | /* |
36 | * The maximum compression block size is by definition 16 * the cluster |
37 | * size, with the maximum supported cluster size being 4kiB. Thus the |
38 | * maximum compression buffer size is 64kiB, so we use this when |
39 | * initializing the compression buffer. |
40 | */ |
41 | NTFS_MAX_CB_SIZE = 64 * 1024, |
42 | } ntfs_compression_constants; |
43 | |
44 | /* |
45 | * ntfs_compression_buffer - one buffer for the decompression engine |
46 | */ |
47 | static u8 *ntfs_compression_buffer; |
48 | |
49 | /* |
50 | * ntfs_cb_lock - spinlock which protects ntfs_compression_buffer |
51 | */ |
52 | static DEFINE_SPINLOCK(ntfs_cb_lock); |
53 | |
54 | /** |
55 | * allocate_compression_buffers - allocate the decompression buffers |
56 | * |
57 | * Caller has to hold the ntfs_lock mutex. |
58 | * |
59 | * Return 0 on success or -ENOMEM if the allocations failed. |
60 | */ |
61 | int allocate_compression_buffers(void) |
62 | { |
63 | BUG_ON(ntfs_compression_buffer); |
64 | |
65 | ntfs_compression_buffer = vmalloc(size: NTFS_MAX_CB_SIZE); |
66 | if (!ntfs_compression_buffer) |
67 | return -ENOMEM; |
68 | return 0; |
69 | } |
70 | |
71 | /** |
72 | * free_compression_buffers - free the decompression buffers |
73 | * |
74 | * Caller has to hold the ntfs_lock mutex. |
75 | */ |
76 | void free_compression_buffers(void) |
77 | { |
78 | BUG_ON(!ntfs_compression_buffer); |
79 | vfree(addr: ntfs_compression_buffer); |
80 | ntfs_compression_buffer = NULL; |
81 | } |
82 | |
83 | /** |
84 | * zero_partial_compressed_page - zero out of bounds compressed page region |
85 | */ |
86 | static void zero_partial_compressed_page(struct page *page, |
87 | const s64 initialized_size) |
88 | { |
89 | u8 *kp = page_address(page); |
90 | unsigned int kp_ofs; |
91 | |
92 | ntfs_debug("Zeroing page region outside initialized size." ); |
93 | if (((s64)page->index << PAGE_SHIFT) >= initialized_size) { |
94 | clear_page(page: kp); |
95 | return; |
96 | } |
97 | kp_ofs = initialized_size & ~PAGE_MASK; |
98 | memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs); |
99 | return; |
100 | } |
101 | |
102 | /** |
103 | * handle_bounds_compressed_page - test for&handle out of bounds compressed page |
104 | */ |
105 | static inline void handle_bounds_compressed_page(struct page *page, |
106 | const loff_t i_size, const s64 initialized_size) |
107 | { |
108 | if ((page->index >= (initialized_size >> PAGE_SHIFT)) && |
109 | (initialized_size < i_size)) |
110 | zero_partial_compressed_page(page, initialized_size); |
111 | return; |
112 | } |
113 | |
114 | /** |
115 | * ntfs_decompress - decompress a compression block into an array of pages |
116 | * @dest_pages: destination array of pages |
117 | * @completed_pages: scratch space to track completed pages |
118 | * @dest_index: current index into @dest_pages (IN/OUT) |
119 | * @dest_ofs: current offset within @dest_pages[@dest_index] (IN/OUT) |
120 | * @dest_max_index: maximum index into @dest_pages (IN) |
121 | * @dest_max_ofs: maximum offset within @dest_pages[@dest_max_index] (IN) |
122 | * @xpage: the target page (-1 if none) (IN) |
123 | * @xpage_done: set to 1 if xpage was completed successfully (IN/OUT) |
124 | * @cb_start: compression block to decompress (IN) |
125 | * @cb_size: size of compression block @cb_start in bytes (IN) |
126 | * @i_size: file size when we started the read (IN) |
127 | * @initialized_size: initialized file size when we started the read (IN) |
128 | * |
129 | * The caller must have disabled preemption. ntfs_decompress() reenables it when |
130 | * the critical section is finished. |
131 | * |
132 | * This decompresses the compression block @cb_start into the array of |
133 | * destination pages @dest_pages starting at index @dest_index into @dest_pages |
134 | * and at offset @dest_pos into the page @dest_pages[@dest_index]. |
135 | * |
136 | * When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1. |
137 | * If xpage is -1 or @xpage has not been completed, @xpage_done is not modified. |
138 | * |
139 | * @cb_start is a pointer to the compression block which needs decompressing |
140 | * and @cb_size is the size of @cb_start in bytes (8-64kiB). |
141 | * |
142 | * Return 0 if success or -EOVERFLOW on error in the compressed stream. |
143 | * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was |
144 | * completed during the decompression of the compression block (@cb_start). |
145 | * |
146 | * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up |
147 | * unpredicatbly! You have been warned! |
148 | * |
149 | * Note to hackers: This function may not sleep until it has finished accessing |
150 | * the compression block @cb_start as it is a per-CPU buffer. |
151 | */ |
152 | static int ntfs_decompress(struct page *dest_pages[], int completed_pages[], |
153 | int *dest_index, int *dest_ofs, const int dest_max_index, |
154 | const int dest_max_ofs, const int xpage, char *xpage_done, |
155 | u8 *const cb_start, const u32 cb_size, const loff_t i_size, |
156 | const s64 initialized_size) |
157 | { |
158 | /* |
159 | * Pointers into the compressed data, i.e. the compression block (cb), |
160 | * and the therein contained sub-blocks (sb). |
161 | */ |
162 | u8 *cb_end = cb_start + cb_size; /* End of cb. */ |
163 | u8 *cb = cb_start; /* Current position in cb. */ |
164 | u8 *cb_sb_start; /* Beginning of the current sb in the cb. */ |
165 | u8 *cb_sb_end; /* End of current sb / beginning of next sb. */ |
166 | |
167 | /* Variables for uncompressed data / destination. */ |
168 | struct page *dp; /* Current destination page being worked on. */ |
169 | u8 *dp_addr; /* Current pointer into dp. */ |
170 | u8 *dp_sb_start; /* Start of current sub-block in dp. */ |
171 | u8 *dp_sb_end; /* End of current sb in dp (dp_sb_start + |
172 | NTFS_SB_SIZE). */ |
173 | u16 do_sb_start; /* @dest_ofs when starting this sub-block. */ |
174 | u16 do_sb_end; /* @dest_ofs of end of this sb (do_sb_start + |
175 | NTFS_SB_SIZE). */ |
176 | |
177 | /* Variables for tag and token parsing. */ |
178 | u8 tag; /* Current tag. */ |
179 | int token; /* Loop counter for the eight tokens in tag. */ |
180 | int nr_completed_pages = 0; |
181 | |
182 | /* Default error code. */ |
183 | int err = -EOVERFLOW; |
184 | |
185 | ntfs_debug("Entering, cb_size = 0x%x." , cb_size); |
186 | do_next_sb: |
187 | ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb." , |
188 | cb - cb_start); |
189 | /* |
190 | * Have we reached the end of the compression block or the end of the |
191 | * decompressed data? The latter can happen for example if the current |
192 | * position in the compression block is one byte before its end so the |
193 | * first two checks do not detect it. |
194 | */ |
195 | if (cb == cb_end || !le16_to_cpup(p: (le16*)cb) || |
196 | (*dest_index == dest_max_index && |
197 | *dest_ofs == dest_max_ofs)) { |
198 | int i; |
199 | |
200 | ntfs_debug("Completed. Returning success (0)." ); |
201 | err = 0; |
202 | return_error: |
203 | /* We can sleep from now on, so we drop lock. */ |
204 | spin_unlock(lock: &ntfs_cb_lock); |
205 | /* Second stage: finalize completed pages. */ |
206 | if (nr_completed_pages > 0) { |
207 | for (i = 0; i < nr_completed_pages; i++) { |
208 | int di = completed_pages[i]; |
209 | |
210 | dp = dest_pages[di]; |
211 | /* |
212 | * If we are outside the initialized size, zero |
213 | * the out of bounds page range. |
214 | */ |
215 | handle_bounds_compressed_page(page: dp, i_size, |
216 | initialized_size); |
217 | flush_dcache_page(page: dp); |
218 | kunmap(page: dp); |
219 | SetPageUptodate(dp); |
220 | unlock_page(page: dp); |
221 | if (di == xpage) |
222 | *xpage_done = 1; |
223 | else |
224 | put_page(page: dp); |
225 | dest_pages[di] = NULL; |
226 | } |
227 | } |
228 | return err; |
229 | } |
230 | |
231 | /* Setup offsets for the current sub-block destination. */ |
232 | do_sb_start = *dest_ofs; |
233 | do_sb_end = do_sb_start + NTFS_SB_SIZE; |
234 | |
235 | /* Check that we are still within allowed boundaries. */ |
236 | if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs) |
237 | goto return_overflow; |
238 | |
239 | /* Does the minimum size of a compressed sb overflow valid range? */ |
240 | if (cb + 6 > cb_end) |
241 | goto return_overflow; |
242 | |
243 | /* Setup the current sub-block source pointers and validate range. */ |
244 | cb_sb_start = cb; |
245 | cb_sb_end = cb_sb_start + (le16_to_cpup(p: (le16*)cb) & NTFS_SB_SIZE_MASK) |
246 | + 3; |
247 | if (cb_sb_end > cb_end) |
248 | goto return_overflow; |
249 | |
250 | /* Get the current destination page. */ |
251 | dp = dest_pages[*dest_index]; |
252 | if (!dp) { |
253 | /* No page present. Skip decompression of this sub-block. */ |
254 | cb = cb_sb_end; |
255 | |
256 | /* Advance destination position to next sub-block. */ |
257 | *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK; |
258 | if (!*dest_ofs && (++*dest_index > dest_max_index)) |
259 | goto return_overflow; |
260 | goto do_next_sb; |
261 | } |
262 | |
263 | /* We have a valid destination page. Setup the destination pointers. */ |
264 | dp_addr = (u8*)page_address(dp) + do_sb_start; |
265 | |
266 | /* Now, we are ready to process the current sub-block (sb). */ |
267 | if (!(le16_to_cpup(p: (le16*)cb) & NTFS_SB_IS_COMPRESSED)) { |
268 | ntfs_debug("Found uncompressed sub-block." ); |
269 | /* This sb is not compressed, just copy it into destination. */ |
270 | |
271 | /* Advance source position to first data byte. */ |
272 | cb += 2; |
273 | |
274 | /* An uncompressed sb must be full size. */ |
275 | if (cb_sb_end - cb != NTFS_SB_SIZE) |
276 | goto return_overflow; |
277 | |
278 | /* Copy the block and advance the source position. */ |
279 | memcpy(dp_addr, cb, NTFS_SB_SIZE); |
280 | cb += NTFS_SB_SIZE; |
281 | |
282 | /* Advance destination position to next sub-block. */ |
283 | *dest_ofs += NTFS_SB_SIZE; |
284 | if (!(*dest_ofs &= ~PAGE_MASK)) { |
285 | finalize_page: |
286 | /* |
287 | * First stage: add current page index to array of |
288 | * completed pages. |
289 | */ |
290 | completed_pages[nr_completed_pages++] = *dest_index; |
291 | if (++*dest_index > dest_max_index) |
292 | goto return_overflow; |
293 | } |
294 | goto do_next_sb; |
295 | } |
296 | ntfs_debug("Found compressed sub-block." ); |
297 | /* This sb is compressed, decompress it into destination. */ |
298 | |
299 | /* Setup destination pointers. */ |
300 | dp_sb_start = dp_addr; |
301 | dp_sb_end = dp_sb_start + NTFS_SB_SIZE; |
302 | |
303 | /* Forward to the first tag in the sub-block. */ |
304 | cb += 2; |
305 | do_next_tag: |
306 | if (cb == cb_sb_end) { |
307 | /* Check if the decompressed sub-block was not full-length. */ |
308 | if (dp_addr < dp_sb_end) { |
309 | int nr_bytes = do_sb_end - *dest_ofs; |
310 | |
311 | ntfs_debug("Filling incomplete sub-block with " |
312 | "zeroes." ); |
313 | /* Zero remainder and update destination position. */ |
314 | memset(dp_addr, 0, nr_bytes); |
315 | *dest_ofs += nr_bytes; |
316 | } |
317 | /* We have finished the current sub-block. */ |
318 | if (!(*dest_ofs &= ~PAGE_MASK)) |
319 | goto finalize_page; |
320 | goto do_next_sb; |
321 | } |
322 | |
323 | /* Check we are still in range. */ |
324 | if (cb > cb_sb_end || dp_addr > dp_sb_end) |
325 | goto return_overflow; |
326 | |
327 | /* Get the next tag and advance to first token. */ |
328 | tag = *cb++; |
329 | |
330 | /* Parse the eight tokens described by the tag. */ |
331 | for (token = 0; token < 8; token++, tag >>= 1) { |
332 | u16 lg, pt, length, max_non_overlap; |
333 | register u16 i; |
334 | u8 *dp_back_addr; |
335 | |
336 | /* Check if we are done / still in range. */ |
337 | if (cb >= cb_sb_end || dp_addr > dp_sb_end) |
338 | break; |
339 | |
340 | /* Determine token type and parse appropriately.*/ |
341 | if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) { |
342 | /* |
343 | * We have a symbol token, copy the symbol across, and |
344 | * advance the source and destination positions. |
345 | */ |
346 | *dp_addr++ = *cb++; |
347 | ++*dest_ofs; |
348 | |
349 | /* Continue with the next token. */ |
350 | continue; |
351 | } |
352 | |
353 | /* |
354 | * We have a phrase token. Make sure it is not the first tag in |
355 | * the sb as this is illegal and would confuse the code below. |
356 | */ |
357 | if (dp_addr == dp_sb_start) |
358 | goto return_overflow; |
359 | |
360 | /* |
361 | * Determine the number of bytes to go back (p) and the number |
362 | * of bytes to copy (l). We use an optimized algorithm in which |
363 | * we first calculate log2(current destination position in sb), |
364 | * which allows determination of l and p in O(1) rather than |
365 | * O(n). We just need an arch-optimized log2() function now. |
366 | */ |
367 | lg = 0; |
368 | for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1) |
369 | lg++; |
370 | |
371 | /* Get the phrase token into i. */ |
372 | pt = le16_to_cpup(p: (le16*)cb); |
373 | |
374 | /* |
375 | * Calculate starting position of the byte sequence in |
376 | * the destination using the fact that p = (pt >> (12 - lg)) + 1 |
377 | * and make sure we don't go too far back. |
378 | */ |
379 | dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1; |
380 | if (dp_back_addr < dp_sb_start) |
381 | goto return_overflow; |
382 | |
383 | /* Now calculate the length of the byte sequence. */ |
384 | length = (pt & (0xfff >> lg)) + 3; |
385 | |
386 | /* Advance destination position and verify it is in range. */ |
387 | *dest_ofs += length; |
388 | if (*dest_ofs > do_sb_end) |
389 | goto return_overflow; |
390 | |
391 | /* The number of non-overlapping bytes. */ |
392 | max_non_overlap = dp_addr - dp_back_addr; |
393 | |
394 | if (length <= max_non_overlap) { |
395 | /* The byte sequence doesn't overlap, just copy it. */ |
396 | memcpy(dp_addr, dp_back_addr, length); |
397 | |
398 | /* Advance destination pointer. */ |
399 | dp_addr += length; |
400 | } else { |
401 | /* |
402 | * The byte sequence does overlap, copy non-overlapping |
403 | * part and then do a slow byte by byte copy for the |
404 | * overlapping part. Also, advance the destination |
405 | * pointer. |
406 | */ |
407 | memcpy(dp_addr, dp_back_addr, max_non_overlap); |
408 | dp_addr += max_non_overlap; |
409 | dp_back_addr += max_non_overlap; |
410 | length -= max_non_overlap; |
411 | while (length--) |
412 | *dp_addr++ = *dp_back_addr++; |
413 | } |
414 | |
415 | /* Advance source position and continue with the next token. */ |
416 | cb += 2; |
417 | } |
418 | |
419 | /* No tokens left in the current tag. Continue with the next tag. */ |
420 | goto do_next_tag; |
421 | |
422 | return_overflow: |
423 | ntfs_error(NULL, "Failed. Returning -EOVERFLOW." ); |
424 | goto return_error; |
425 | } |
426 | |
427 | /** |
428 | * ntfs_read_compressed_block - read a compressed block into the page cache |
429 | * @page: locked page in the compression block(s) we need to read |
430 | * |
431 | * When we are called the page has already been verified to be locked and the |
432 | * attribute is known to be non-resident, not encrypted, but compressed. |
433 | * |
434 | * 1. Determine which compression block(s) @page is in. |
435 | * 2. Get hold of all pages corresponding to this/these compression block(s). |
436 | * 3. Read the (first) compression block. |
437 | * 4. Decompress it into the corresponding pages. |
438 | * 5. Throw the compressed data away and proceed to 3. for the next compression |
439 | * block or return success if no more compression blocks left. |
440 | * |
441 | * Warning: We have to be careful what we do about existing pages. They might |
442 | * have been written to so that we would lose data if we were to just overwrite |
443 | * them with the out-of-date uncompressed data. |
444 | * |
445 | * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at |
446 | * the end of the file I think. We need to detect this case and zero the out |
447 | * of bounds remainder of the page in question and mark it as handled. At the |
448 | * moment we would just return -EIO on such a page. This bug will only become |
449 | * apparent if pages are above 8kiB and the NTFS volume only uses 512 byte |
450 | * clusters so is probably not going to be seen by anyone. Still this should |
451 | * be fixed. (AIA) |
452 | * |
453 | * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in |
454 | * handling sparse and compressed cbs. (AIA) |
455 | * |
456 | * FIXME: At the moment we don't do any zeroing out in the case that |
457 | * initialized_size is less than data_size. This should be safe because of the |
458 | * nature of the compression algorithm used. Just in case we check and output |
459 | * an error message in read inode if the two sizes are not equal for a |
460 | * compressed file. (AIA) |
461 | */ |
462 | int ntfs_read_compressed_block(struct page *page) |
463 | { |
464 | loff_t i_size; |
465 | s64 initialized_size; |
466 | struct address_space *mapping = page->mapping; |
467 | ntfs_inode *ni = NTFS_I(inode: mapping->host); |
468 | ntfs_volume *vol = ni->vol; |
469 | struct super_block *sb = vol->sb; |
470 | runlist_element *rl; |
471 | unsigned long flags, block_size = sb->s_blocksize; |
472 | unsigned char block_size_bits = sb->s_blocksize_bits; |
473 | u8 *cb, *cb_pos, *cb_end; |
474 | struct buffer_head **bhs; |
475 | unsigned long offset, index = page->index; |
476 | u32 cb_size = ni->itype.compressed.block_size; |
477 | u64 cb_size_mask = cb_size - 1UL; |
478 | VCN vcn; |
479 | LCN lcn; |
480 | /* The first wanted vcn (minimum alignment is PAGE_SIZE). */ |
481 | VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >> |
482 | vol->cluster_size_bits; |
483 | /* |
484 | * The first vcn after the last wanted vcn (minimum alignment is again |
485 | * PAGE_SIZE. |
486 | */ |
487 | VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1) |
488 | & ~cb_size_mask) >> vol->cluster_size_bits; |
489 | /* Number of compression blocks (cbs) in the wanted vcn range. */ |
490 | unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits |
491 | >> ni->itype.compressed.block_size_bits; |
492 | /* |
493 | * Number of pages required to store the uncompressed data from all |
494 | * compression blocks (cbs) overlapping @page. Due to alignment |
495 | * guarantees of start_vcn and end_vcn, no need to round up here. |
496 | */ |
497 | unsigned int nr_pages = (end_vcn - start_vcn) << |
498 | vol->cluster_size_bits >> PAGE_SHIFT; |
499 | unsigned int xpage, max_page, cur_page, cur_ofs, i; |
500 | unsigned int cb_clusters, cb_max_ofs; |
501 | int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; |
502 | struct page **pages; |
503 | int *completed_pages; |
504 | unsigned char xpage_done = 0; |
505 | |
506 | ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = " |
507 | "%i." , index, cb_size, nr_pages); |
508 | /* |
509 | * Bad things happen if we get here for anything that is not an |
510 | * unnamed $DATA attribute. |
511 | */ |
512 | BUG_ON(ni->type != AT_DATA); |
513 | BUG_ON(ni->name_len); |
514 | |
515 | pages = kmalloc_array(n: nr_pages, size: sizeof(struct page *), GFP_NOFS); |
516 | completed_pages = kmalloc_array(n: nr_pages + 1, size: sizeof(int), GFP_NOFS); |
517 | |
518 | /* Allocate memory to store the buffer heads we need. */ |
519 | bhs_size = cb_size / block_size * sizeof(struct buffer_head *); |
520 | bhs = kmalloc(size: bhs_size, GFP_NOFS); |
521 | |
522 | if (unlikely(!pages || !bhs || !completed_pages)) { |
523 | kfree(objp: bhs); |
524 | kfree(objp: pages); |
525 | kfree(objp: completed_pages); |
526 | unlock_page(page); |
527 | ntfs_error(vol->sb, "Failed to allocate internal buffers." ); |
528 | return -ENOMEM; |
529 | } |
530 | |
531 | /* |
532 | * We have already been given one page, this is the one we must do. |
533 | * Once again, the alignment guarantees keep it simple. |
534 | */ |
535 | offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT; |
536 | xpage = index - offset; |
537 | pages[xpage] = page; |
538 | /* |
539 | * The remaining pages need to be allocated and inserted into the page |
540 | * cache, alignment guarantees keep all the below much simpler. (-8 |
541 | */ |
542 | read_lock_irqsave(&ni->size_lock, flags); |
543 | i_size = i_size_read(inode: VFS_I(ni)); |
544 | initialized_size = ni->initialized_size; |
545 | read_unlock_irqrestore(&ni->size_lock, flags); |
546 | max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) - |
547 | offset; |
548 | /* Is the page fully outside i_size? (truncate in progress) */ |
549 | if (xpage >= max_page) { |
550 | kfree(objp: bhs); |
551 | kfree(objp: pages); |
552 | kfree(objp: completed_pages); |
553 | zero_user(page, start: 0, PAGE_SIZE); |
554 | ntfs_debug("Compressed read outside i_size - truncated?" ); |
555 | SetPageUptodate(page); |
556 | unlock_page(page); |
557 | return 0; |
558 | } |
559 | if (nr_pages < max_page) |
560 | max_page = nr_pages; |
561 | for (i = 0; i < max_page; i++, offset++) { |
562 | if (i != xpage) |
563 | pages[i] = grab_cache_page_nowait(mapping, index: offset); |
564 | page = pages[i]; |
565 | if (page) { |
566 | /* |
567 | * We only (re)read the page if it isn't already read |
568 | * in and/or dirty or we would be losing data or at |
569 | * least wasting our time. |
570 | */ |
571 | if (!PageDirty(page) && (!PageUptodate(page) || |
572 | PageError(page))) { |
573 | ClearPageError(page); |
574 | kmap(page); |
575 | continue; |
576 | } |
577 | unlock_page(page); |
578 | put_page(page); |
579 | pages[i] = NULL; |
580 | } |
581 | } |
582 | |
583 | /* |
584 | * We have the runlist, and all the destination pages we need to fill. |
585 | * Now read the first compression block. |
586 | */ |
587 | cur_page = 0; |
588 | cur_ofs = 0; |
589 | cb_clusters = ni->itype.compressed.block_clusters; |
590 | do_next_cb: |
591 | nr_cbs--; |
592 | nr_bhs = 0; |
593 | |
594 | /* Read all cb buffer heads one cluster at a time. */ |
595 | rl = NULL; |
596 | for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; |
597 | vcn++) { |
598 | bool is_retry = false; |
599 | |
600 | if (!rl) { |
601 | lock_retry_remap: |
602 | down_read(sem: &ni->runlist.lock); |
603 | rl = ni->runlist.rl; |
604 | } |
605 | if (likely(rl != NULL)) { |
606 | /* Seek to element containing target vcn. */ |
607 | while (rl->length && rl[1].vcn <= vcn) |
608 | rl++; |
609 | lcn = ntfs_rl_vcn_to_lcn(rl, vcn); |
610 | } else |
611 | lcn = LCN_RL_NOT_MAPPED; |
612 | ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx." , |
613 | (unsigned long long)vcn, |
614 | (unsigned long long)lcn); |
615 | if (lcn < 0) { |
616 | /* |
617 | * When we reach the first sparse cluster we have |
618 | * finished with the cb. |
619 | */ |
620 | if (lcn == LCN_HOLE) |
621 | break; |
622 | if (is_retry || lcn != LCN_RL_NOT_MAPPED) |
623 | goto rl_err; |
624 | is_retry = true; |
625 | /* |
626 | * Attempt to map runlist, dropping lock for the |
627 | * duration. |
628 | */ |
629 | up_read(sem: &ni->runlist.lock); |
630 | if (!ntfs_map_runlist(ni, vcn)) |
631 | goto lock_retry_remap; |
632 | goto map_rl_err; |
633 | } |
634 | block = lcn << vol->cluster_size_bits >> block_size_bits; |
635 | /* Read the lcn from device in chunks of block_size bytes. */ |
636 | max_block = block + (vol->cluster_size >> block_size_bits); |
637 | do { |
638 | ntfs_debug("block = 0x%x." , block); |
639 | if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block)))) |
640 | goto getblk_err; |
641 | nr_bhs++; |
642 | } while (++block < max_block); |
643 | } |
644 | |
645 | /* Release the lock if we took it. */ |
646 | if (rl) |
647 | up_read(sem: &ni->runlist.lock); |
648 | |
649 | /* Setup and initiate io on all buffer heads. */ |
650 | for (i = 0; i < nr_bhs; i++) { |
651 | struct buffer_head *tbh = bhs[i]; |
652 | |
653 | if (!trylock_buffer(bh: tbh)) |
654 | continue; |
655 | if (unlikely(buffer_uptodate(tbh))) { |
656 | unlock_buffer(bh: tbh); |
657 | continue; |
658 | } |
659 | get_bh(bh: tbh); |
660 | tbh->b_end_io = end_buffer_read_sync; |
661 | submit_bh(REQ_OP_READ, tbh); |
662 | } |
663 | |
664 | /* Wait for io completion on all buffer heads. */ |
665 | for (i = 0; i < nr_bhs; i++) { |
666 | struct buffer_head *tbh = bhs[i]; |
667 | |
668 | if (buffer_uptodate(bh: tbh)) |
669 | continue; |
670 | wait_on_buffer(bh: tbh); |
671 | /* |
672 | * We need an optimization barrier here, otherwise we start |
673 | * hitting the below fixup code when accessing a loopback |
674 | * mounted ntfs partition. This indicates either there is a |
675 | * race condition in the loop driver or, more likely, gcc |
676 | * overoptimises the code without the barrier and it doesn't |
677 | * do the Right Thing(TM). |
678 | */ |
679 | barrier(); |
680 | if (unlikely(!buffer_uptodate(tbh))) { |
681 | ntfs_warning(vol->sb, "Buffer is unlocked but not " |
682 | "uptodate! Unplugging the disk queue " |
683 | "and rescheduling." ); |
684 | get_bh(bh: tbh); |
685 | io_schedule(); |
686 | put_bh(bh: tbh); |
687 | if (unlikely(!buffer_uptodate(tbh))) |
688 | goto read_err; |
689 | ntfs_warning(vol->sb, "Buffer is now uptodate. Good." ); |
690 | } |
691 | } |
692 | |
693 | /* |
694 | * Get the compression buffer. We must not sleep any more |
695 | * until we are finished with it. |
696 | */ |
697 | spin_lock(lock: &ntfs_cb_lock); |
698 | cb = ntfs_compression_buffer; |
699 | |
700 | BUG_ON(!cb); |
701 | |
702 | cb_pos = cb; |
703 | cb_end = cb + cb_size; |
704 | |
705 | /* Copy the buffer heads into the contiguous buffer. */ |
706 | for (i = 0; i < nr_bhs; i++) { |
707 | memcpy(cb_pos, bhs[i]->b_data, block_size); |
708 | cb_pos += block_size; |
709 | } |
710 | |
711 | /* Just a precaution. */ |
712 | if (cb_pos + 2 <= cb + cb_size) |
713 | *(u16*)cb_pos = 0; |
714 | |
715 | /* Reset cb_pos back to the beginning. */ |
716 | cb_pos = cb; |
717 | |
718 | /* We now have both source (if present) and destination. */ |
719 | ntfs_debug("Successfully read the compression block." ); |
720 | |
721 | /* The last page and maximum offset within it for the current cb. */ |
722 | cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size; |
723 | cb_max_ofs = cb_max_page & ~PAGE_MASK; |
724 | cb_max_page >>= PAGE_SHIFT; |
725 | |
726 | /* Catch end of file inside a compression block. */ |
727 | if (cb_max_page > max_page) |
728 | cb_max_page = max_page; |
729 | |
730 | if (vcn == start_vcn - cb_clusters) { |
731 | /* Sparse cb, zero out page range overlapping the cb. */ |
732 | ntfs_debug("Found sparse compression block." ); |
733 | /* We can sleep from now on, so we drop lock. */ |
734 | spin_unlock(lock: &ntfs_cb_lock); |
735 | if (cb_max_ofs) |
736 | cb_max_page--; |
737 | for (; cur_page < cb_max_page; cur_page++) { |
738 | page = pages[cur_page]; |
739 | if (page) { |
740 | if (likely(!cur_ofs)) |
741 | clear_page(page_address(page)); |
742 | else |
743 | memset(page_address(page) + cur_ofs, 0, |
744 | PAGE_SIZE - |
745 | cur_ofs); |
746 | flush_dcache_page(page); |
747 | kunmap(page); |
748 | SetPageUptodate(page); |
749 | unlock_page(page); |
750 | if (cur_page == xpage) |
751 | xpage_done = 1; |
752 | else |
753 | put_page(page); |
754 | pages[cur_page] = NULL; |
755 | } |
756 | cb_pos += PAGE_SIZE - cur_ofs; |
757 | cur_ofs = 0; |
758 | if (cb_pos >= cb_end) |
759 | break; |
760 | } |
761 | /* If we have a partial final page, deal with it now. */ |
762 | if (cb_max_ofs && cb_pos < cb_end) { |
763 | page = pages[cur_page]; |
764 | if (page) |
765 | memset(page_address(page) + cur_ofs, 0, |
766 | cb_max_ofs - cur_ofs); |
767 | /* |
768 | * No need to update cb_pos at this stage: |
769 | * cb_pos += cb_max_ofs - cur_ofs; |
770 | */ |
771 | cur_ofs = cb_max_ofs; |
772 | } |
773 | } else if (vcn == start_vcn) { |
774 | /* We can't sleep so we need two stages. */ |
775 | unsigned int cur2_page = cur_page; |
776 | unsigned int cur_ofs2 = cur_ofs; |
777 | u8 *cb_pos2 = cb_pos; |
778 | |
779 | ntfs_debug("Found uncompressed compression block." ); |
780 | /* Uncompressed cb, copy it to the destination pages. */ |
781 | /* |
782 | * TODO: As a big optimization, we could detect this case |
783 | * before we read all the pages and use block_read_full_folio() |
784 | * on all full pages instead (we still have to treat partial |
785 | * pages especially but at least we are getting rid of the |
786 | * synchronous io for the majority of pages. |
787 | * Or if we choose not to do the read-ahead/-behind stuff, we |
788 | * could just return block_read_full_folio(pages[xpage]) as long |
789 | * as PAGE_SIZE <= cb_size. |
790 | */ |
791 | if (cb_max_ofs) |
792 | cb_max_page--; |
793 | /* First stage: copy data into destination pages. */ |
794 | for (; cur_page < cb_max_page; cur_page++) { |
795 | page = pages[cur_page]; |
796 | if (page) |
797 | memcpy(page_address(page) + cur_ofs, cb_pos, |
798 | PAGE_SIZE - cur_ofs); |
799 | cb_pos += PAGE_SIZE - cur_ofs; |
800 | cur_ofs = 0; |
801 | if (cb_pos >= cb_end) |
802 | break; |
803 | } |
804 | /* If we have a partial final page, deal with it now. */ |
805 | if (cb_max_ofs && cb_pos < cb_end) { |
806 | page = pages[cur_page]; |
807 | if (page) |
808 | memcpy(page_address(page) + cur_ofs, cb_pos, |
809 | cb_max_ofs - cur_ofs); |
810 | cb_pos += cb_max_ofs - cur_ofs; |
811 | cur_ofs = cb_max_ofs; |
812 | } |
813 | /* We can sleep from now on, so drop lock. */ |
814 | spin_unlock(lock: &ntfs_cb_lock); |
815 | /* Second stage: finalize pages. */ |
816 | for (; cur2_page < cb_max_page; cur2_page++) { |
817 | page = pages[cur2_page]; |
818 | if (page) { |
819 | /* |
820 | * If we are outside the initialized size, zero |
821 | * the out of bounds page range. |
822 | */ |
823 | handle_bounds_compressed_page(page, i_size, |
824 | initialized_size); |
825 | flush_dcache_page(page); |
826 | kunmap(page); |
827 | SetPageUptodate(page); |
828 | unlock_page(page); |
829 | if (cur2_page == xpage) |
830 | xpage_done = 1; |
831 | else |
832 | put_page(page); |
833 | pages[cur2_page] = NULL; |
834 | } |
835 | cb_pos2 += PAGE_SIZE - cur_ofs2; |
836 | cur_ofs2 = 0; |
837 | if (cb_pos2 >= cb_end) |
838 | break; |
839 | } |
840 | } else { |
841 | /* Compressed cb, decompress it into the destination page(s). */ |
842 | unsigned int prev_cur_page = cur_page; |
843 | |
844 | ntfs_debug("Found compressed compression block." ); |
845 | err = ntfs_decompress(dest_pages: pages, completed_pages, dest_index: &cur_page, |
846 | dest_ofs: &cur_ofs, dest_max_index: cb_max_page, dest_max_ofs: cb_max_ofs, xpage, |
847 | xpage_done: &xpage_done, cb_start: cb_pos, cb_size: cb_size - (cb_pos - cb), |
848 | i_size, initialized_size); |
849 | /* |
850 | * We can sleep from now on, lock already dropped by |
851 | * ntfs_decompress(). |
852 | */ |
853 | if (err) { |
854 | ntfs_error(vol->sb, "ntfs_decompress() failed in inode " |
855 | "0x%lx with error code %i. Skipping " |
856 | "this compression block." , |
857 | ni->mft_no, -err); |
858 | /* Release the unfinished pages. */ |
859 | for (; prev_cur_page < cur_page; prev_cur_page++) { |
860 | page = pages[prev_cur_page]; |
861 | if (page) { |
862 | flush_dcache_page(page); |
863 | kunmap(page); |
864 | unlock_page(page); |
865 | if (prev_cur_page != xpage) |
866 | put_page(page); |
867 | pages[prev_cur_page] = NULL; |
868 | } |
869 | } |
870 | } |
871 | } |
872 | |
873 | /* Release the buffer heads. */ |
874 | for (i = 0; i < nr_bhs; i++) |
875 | brelse(bh: bhs[i]); |
876 | |
877 | /* Do we have more work to do? */ |
878 | if (nr_cbs) |
879 | goto do_next_cb; |
880 | |
881 | /* We no longer need the list of buffer heads. */ |
882 | kfree(objp: bhs); |
883 | |
884 | /* Clean up if we have any pages left. Should never happen. */ |
885 | for (cur_page = 0; cur_page < max_page; cur_page++) { |
886 | page = pages[cur_page]; |
887 | if (page) { |
888 | ntfs_error(vol->sb, "Still have pages left! " |
889 | "Terminating them with extreme " |
890 | "prejudice. Inode 0x%lx, page index " |
891 | "0x%lx." , ni->mft_no, page->index); |
892 | flush_dcache_page(page); |
893 | kunmap(page); |
894 | unlock_page(page); |
895 | if (cur_page != xpage) |
896 | put_page(page); |
897 | pages[cur_page] = NULL; |
898 | } |
899 | } |
900 | |
901 | /* We no longer need the list of pages. */ |
902 | kfree(objp: pages); |
903 | kfree(objp: completed_pages); |
904 | |
905 | /* If we have completed the requested page, we return success. */ |
906 | if (likely(xpage_done)) |
907 | return 0; |
908 | |
909 | ntfs_debug("Failed. Returning error code %s." , err == -EOVERFLOW ? |
910 | "EOVERFLOW" : (!err ? "EIO" : "unknown error" )); |
911 | return err < 0 ? err : -EIO; |
912 | |
913 | read_err: |
914 | ntfs_error(vol->sb, "IO error while reading compressed data." ); |
915 | /* Release the buffer heads. */ |
916 | for (i = 0; i < nr_bhs; i++) |
917 | brelse(bh: bhs[i]); |
918 | goto err_out; |
919 | |
920 | map_rl_err: |
921 | ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read " |
922 | "compression block." ); |
923 | goto err_out; |
924 | |
925 | rl_err: |
926 | up_read(sem: &ni->runlist.lock); |
927 | ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read " |
928 | "compression block." ); |
929 | goto err_out; |
930 | |
931 | getblk_err: |
932 | up_read(sem: &ni->runlist.lock); |
933 | ntfs_error(vol->sb, "getblk() failed. Cannot read compression block." ); |
934 | |
935 | err_out: |
936 | kfree(objp: bhs); |
937 | for (i = cur_page; i < max_page; i++) { |
938 | page = pages[i]; |
939 | if (page) { |
940 | flush_dcache_page(page); |
941 | kunmap(page); |
942 | unlock_page(page); |
943 | if (i != xpage) |
944 | put_page(page); |
945 | } |
946 | } |
947 | kfree(objp: pages); |
948 | kfree(objp: completed_pages); |
949 | return -EIO; |
950 | } |
951 | |