1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2008 Oracle. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/kernel.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/file.h> |
9 | #include <linux/fs.h> |
10 | #include <linux/pagemap.h> |
11 | #include <linux/pagevec.h> |
12 | #include <linux/highmem.h> |
13 | #include <linux/kthread.h> |
14 | #include <linux/time.h> |
15 | #include <linux/init.h> |
16 | #include <linux/string.h> |
17 | #include <linux/backing-dev.h> |
18 | #include <linux/writeback.h> |
19 | #include <linux/psi.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/sched/mm.h> |
22 | #include <linux/log2.h> |
23 | #include <linux/shrinker.h> |
24 | #include <crypto/hash.h> |
25 | #include "misc.h" |
26 | #include "ctree.h" |
27 | #include "fs.h" |
28 | #include "btrfs_inode.h" |
29 | #include "bio.h" |
30 | #include "ordered-data.h" |
31 | #include "compression.h" |
32 | #include "extent_io.h" |
33 | #include "extent_map.h" |
34 | #include "subpage.h" |
35 | #include "messages.h" |
36 | #include "super.h" |
37 | |
38 | static struct bio_set btrfs_compressed_bioset; |
39 | |
40 | static const char* const btrfs_compress_types[] = { "" , "zlib" , "lzo" , "zstd" }; |
41 | |
42 | const char* btrfs_compress_type2str(enum btrfs_compression_type type) |
43 | { |
44 | switch (type) { |
45 | case BTRFS_COMPRESS_ZLIB: |
46 | case BTRFS_COMPRESS_LZO: |
47 | case BTRFS_COMPRESS_ZSTD: |
48 | case BTRFS_COMPRESS_NONE: |
49 | return btrfs_compress_types[type]; |
50 | default: |
51 | break; |
52 | } |
53 | |
54 | return NULL; |
55 | } |
56 | |
57 | static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio) |
58 | { |
59 | return container_of(bbio, struct compressed_bio, bbio); |
60 | } |
61 | |
62 | static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode, |
63 | u64 start, blk_opf_t op, |
64 | btrfs_bio_end_io_t end_io) |
65 | { |
66 | struct btrfs_bio *bbio; |
67 | |
68 | bbio = btrfs_bio(bio: bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, opf: op, |
69 | GFP_NOFS, bs: &btrfs_compressed_bioset)); |
70 | btrfs_bio_init(bbio, fs_info: inode->root->fs_info, end_io, NULL); |
71 | bbio->inode = inode; |
72 | bbio->file_offset = start; |
73 | return to_compressed_bio(bbio); |
74 | } |
75 | |
76 | bool btrfs_compress_is_valid_type(const char *str, size_t len) |
77 | { |
78 | int i; |
79 | |
80 | for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) { |
81 | size_t comp_len = strlen(btrfs_compress_types[i]); |
82 | |
83 | if (len < comp_len) |
84 | continue; |
85 | |
86 | if (!strncmp(btrfs_compress_types[i], str, comp_len)) |
87 | return true; |
88 | } |
89 | return false; |
90 | } |
91 | |
92 | static int compression_compress_pages(int type, struct list_head *ws, |
93 | struct address_space *mapping, u64 start, struct page **pages, |
94 | unsigned long *out_pages, unsigned long *total_in, |
95 | unsigned long *total_out) |
96 | { |
97 | switch (type) { |
98 | case BTRFS_COMPRESS_ZLIB: |
99 | return zlib_compress_pages(ws, mapping, start, pages, |
100 | out_pages, total_in, total_out); |
101 | case BTRFS_COMPRESS_LZO: |
102 | return lzo_compress_pages(ws, mapping, start, pages, |
103 | out_pages, total_in, total_out); |
104 | case BTRFS_COMPRESS_ZSTD: |
105 | return zstd_compress_pages(ws, mapping, start, pages, |
106 | out_pages, total_in, total_out); |
107 | case BTRFS_COMPRESS_NONE: |
108 | default: |
109 | /* |
110 | * This can happen when compression races with remount setting |
111 | * it to 'no compress', while caller doesn't call |
112 | * inode_need_compress() to check if we really need to |
113 | * compress. |
114 | * |
115 | * Not a big deal, just need to inform caller that we |
116 | * haven't allocated any pages yet. |
117 | */ |
118 | *out_pages = 0; |
119 | return -E2BIG; |
120 | } |
121 | } |
122 | |
123 | static int compression_decompress_bio(struct list_head *ws, |
124 | struct compressed_bio *cb) |
125 | { |
126 | switch (cb->compress_type) { |
127 | case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); |
128 | case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); |
129 | case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); |
130 | case BTRFS_COMPRESS_NONE: |
131 | default: |
132 | /* |
133 | * This can't happen, the type is validated several times |
134 | * before we get here. |
135 | */ |
136 | BUG(); |
137 | } |
138 | } |
139 | |
140 | static int compression_decompress(int type, struct list_head *ws, |
141 | const u8 *data_in, struct page *dest_page, |
142 | unsigned long dest_pgoff, size_t srclen, size_t destlen) |
143 | { |
144 | switch (type) { |
145 | case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, |
146 | dest_pgoff, srclen, destlen); |
147 | case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, |
148 | dest_pgoff, srclen, destlen); |
149 | case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, |
150 | dest_pgoff, srclen, destlen); |
151 | case BTRFS_COMPRESS_NONE: |
152 | default: |
153 | /* |
154 | * This can't happen, the type is validated several times |
155 | * before we get here. |
156 | */ |
157 | BUG(); |
158 | } |
159 | } |
160 | |
161 | static void btrfs_free_compressed_pages(struct compressed_bio *cb) |
162 | { |
163 | for (unsigned int i = 0; i < cb->nr_pages; i++) |
164 | btrfs_free_compr_page(page: cb->compressed_pages[i]); |
165 | kfree(objp: cb->compressed_pages); |
166 | } |
167 | |
168 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
169 | |
170 | /* |
171 | * Global cache of last unused pages for compression/decompression. |
172 | */ |
173 | static struct btrfs_compr_pool { |
174 | struct shrinker *shrinker; |
175 | spinlock_t lock; |
176 | struct list_head list; |
177 | int count; |
178 | int thresh; |
179 | } compr_pool; |
180 | |
181 | static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc) |
182 | { |
183 | int ret; |
184 | |
185 | /* |
186 | * We must not read the values more than once if 'ret' gets expanded in |
187 | * the return statement so we don't accidentally return a negative |
188 | * number, even if the first condition finds it positive. |
189 | */ |
190 | ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh); |
191 | |
192 | return ret > 0 ? ret : 0; |
193 | } |
194 | |
195 | static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc) |
196 | { |
197 | struct list_head remove; |
198 | struct list_head *tmp, *next; |
199 | int freed; |
200 | |
201 | if (compr_pool.count == 0) |
202 | return SHRINK_STOP; |
203 | |
204 | INIT_LIST_HEAD(list: &remove); |
205 | |
206 | /* For now, just simply drain the whole list. */ |
207 | spin_lock(lock: &compr_pool.lock); |
208 | list_splice_init(list: &compr_pool.list, head: &remove); |
209 | freed = compr_pool.count; |
210 | compr_pool.count = 0; |
211 | spin_unlock(lock: &compr_pool.lock); |
212 | |
213 | list_for_each_safe(tmp, next, &remove) { |
214 | struct page *page = list_entry(tmp, struct page, lru); |
215 | |
216 | ASSERT(page_ref_count(page) == 1); |
217 | put_page(page); |
218 | } |
219 | |
220 | return freed; |
221 | } |
222 | |
223 | /* |
224 | * Common wrappers for page allocation from compression wrappers |
225 | */ |
226 | struct page *btrfs_alloc_compr_page(void) |
227 | { |
228 | struct page *page = NULL; |
229 | |
230 | spin_lock(lock: &compr_pool.lock); |
231 | if (compr_pool.count > 0) { |
232 | page = list_first_entry(&compr_pool.list, struct page, lru); |
233 | list_del_init(entry: &page->lru); |
234 | compr_pool.count--; |
235 | } |
236 | spin_unlock(lock: &compr_pool.lock); |
237 | |
238 | if (page) |
239 | return page; |
240 | |
241 | return alloc_page(GFP_NOFS); |
242 | } |
243 | |
244 | void btrfs_free_compr_page(struct page *page) |
245 | { |
246 | bool do_free = false; |
247 | |
248 | spin_lock(lock: &compr_pool.lock); |
249 | if (compr_pool.count > compr_pool.thresh) { |
250 | do_free = true; |
251 | } else { |
252 | list_add(new: &page->lru, head: &compr_pool.list); |
253 | compr_pool.count++; |
254 | } |
255 | spin_unlock(lock: &compr_pool.lock); |
256 | |
257 | if (!do_free) |
258 | return; |
259 | |
260 | ASSERT(page_ref_count(page) == 1); |
261 | put_page(page); |
262 | } |
263 | |
264 | static void (struct btrfs_bio *bbio) |
265 | { |
266 | struct compressed_bio *cb = to_compressed_bio(bbio); |
267 | blk_status_t status = bbio->bio.bi_status; |
268 | |
269 | if (!status) |
270 | status = errno_to_blk_status(errno: btrfs_decompress_bio(cb)); |
271 | |
272 | btrfs_free_compressed_pages(cb); |
273 | btrfs_bio_end_io(bbio: cb->orig_bbio, status); |
274 | bio_put(&bbio->bio); |
275 | } |
276 | |
277 | /* |
278 | * Clear the writeback bits on all of the file |
279 | * pages for a compressed write |
280 | */ |
281 | static noinline void end_compressed_writeback(const struct compressed_bio *cb) |
282 | { |
283 | struct inode *inode = &cb->bbio.inode->vfs_inode; |
284 | struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); |
285 | unsigned long index = cb->start >> PAGE_SHIFT; |
286 | unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; |
287 | struct folio_batch fbatch; |
288 | const int error = blk_status_to_errno(status: cb->bbio.bio.bi_status); |
289 | int i; |
290 | int ret; |
291 | |
292 | if (error) |
293 | mapping_set_error(mapping: inode->i_mapping, error); |
294 | |
295 | folio_batch_init(fbatch: &fbatch); |
296 | while (index <= end_index) { |
297 | ret = filemap_get_folios(mapping: inode->i_mapping, start: &index, end: end_index, |
298 | fbatch: &fbatch); |
299 | |
300 | if (ret == 0) |
301 | return; |
302 | |
303 | for (i = 0; i < ret; i++) { |
304 | struct folio *folio = fbatch.folios[i]; |
305 | |
306 | btrfs_folio_clamp_clear_writeback(fs_info, folio, |
307 | start: cb->start, len: cb->len); |
308 | } |
309 | folio_batch_release(fbatch: &fbatch); |
310 | } |
311 | /* the inode may be gone now */ |
312 | } |
313 | |
314 | static void btrfs_finish_compressed_write_work(struct work_struct *work) |
315 | { |
316 | struct compressed_bio *cb = |
317 | container_of(work, struct compressed_bio, write_end_work); |
318 | |
319 | btrfs_finish_ordered_extent(ordered: cb->bbio.ordered, NULL, file_offset: cb->start, len: cb->len, |
320 | uptodate: cb->bbio.bio.bi_status == BLK_STS_OK); |
321 | |
322 | if (cb->writeback) |
323 | end_compressed_writeback(cb); |
324 | /* Note, our inode could be gone now */ |
325 | |
326 | btrfs_free_compressed_pages(cb); |
327 | bio_put(&cb->bbio.bio); |
328 | } |
329 | |
330 | /* |
331 | * Do the cleanup once all the compressed pages hit the disk. This will clear |
332 | * writeback on the file pages and free the compressed pages. |
333 | * |
334 | * This also calls the writeback end hooks for the file pages so that metadata |
335 | * and checksums can be updated in the file. |
336 | */ |
337 | static void (struct btrfs_bio *bbio) |
338 | { |
339 | struct compressed_bio *cb = to_compressed_bio(bbio); |
340 | struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; |
341 | |
342 | queue_work(wq: fs_info->compressed_write_workers, work: &cb->write_end_work); |
343 | } |
344 | |
345 | static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb) |
346 | { |
347 | struct bio *bio = &cb->bbio.bio; |
348 | u32 offset = 0; |
349 | |
350 | while (offset < cb->compressed_len) { |
351 | u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE); |
352 | |
353 | /* Maximum compressed extent is smaller than bio size limit. */ |
354 | __bio_add_page(bio, page: cb->compressed_pages[offset >> PAGE_SHIFT], |
355 | len, off: 0); |
356 | offset += len; |
357 | } |
358 | } |
359 | |
360 | /* |
361 | * worker function to build and submit bios for previously compressed pages. |
362 | * The corresponding pages in the inode should be marked for writeback |
363 | * and the compressed pages should have a reference on them for dropping |
364 | * when the IO is complete. |
365 | * |
366 | * This also checksums the file bytes and gets things ready for |
367 | * the end io hooks. |
368 | */ |
369 | void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, |
370 | struct page **compressed_pages, |
371 | unsigned int nr_pages, |
372 | blk_opf_t write_flags, |
373 | bool writeback) |
374 | { |
375 | struct btrfs_inode *inode = BTRFS_I(inode: ordered->inode); |
376 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
377 | struct compressed_bio *cb; |
378 | |
379 | ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize)); |
380 | ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize)); |
381 | |
382 | cb = alloc_compressed_bio(inode, start: ordered->file_offset, |
383 | op: REQ_OP_WRITE | write_flags, |
384 | end_io: end_bbio_comprssed_write); |
385 | cb->start = ordered->file_offset; |
386 | cb->len = ordered->num_bytes; |
387 | cb->compressed_pages = compressed_pages; |
388 | cb->compressed_len = ordered->disk_num_bytes; |
389 | cb->writeback = writeback; |
390 | INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); |
391 | cb->nr_pages = nr_pages; |
392 | cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; |
393 | cb->bbio.ordered = ordered; |
394 | btrfs_add_compressed_bio_pages(cb); |
395 | |
396 | btrfs_submit_bio(bbio: &cb->bbio, mirror_num: 0); |
397 | } |
398 | |
399 | /* |
400 | * Add extra pages in the same compressed file extent so that we don't need to |
401 | * re-read the same extent again and again. |
402 | * |
403 | * NOTE: this won't work well for subpage, as for subpage read, we lock the |
404 | * full page then submit bio for each compressed/regular extents. |
405 | * |
406 | * This means, if we have several sectors in the same page points to the same |
407 | * on-disk compressed data, we will re-read the same extent many times and |
408 | * this function can only help for the next page. |
409 | */ |
410 | static noinline int add_ra_bio_pages(struct inode *inode, |
411 | u64 compressed_end, |
412 | struct compressed_bio *cb, |
413 | int *memstall, unsigned long *pflags) |
414 | { |
415 | struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); |
416 | unsigned long end_index; |
417 | struct bio *orig_bio = &cb->orig_bbio->bio; |
418 | u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; |
419 | u64 isize = i_size_read(inode); |
420 | int ret; |
421 | struct page *page; |
422 | struct extent_map *em; |
423 | struct address_space *mapping = inode->i_mapping; |
424 | struct extent_map_tree *em_tree; |
425 | struct extent_io_tree *tree; |
426 | int sectors_missed = 0; |
427 | |
428 | em_tree = &BTRFS_I(inode)->extent_tree; |
429 | tree = &BTRFS_I(inode)->io_tree; |
430 | |
431 | if (isize == 0) |
432 | return 0; |
433 | |
434 | /* |
435 | * For current subpage support, we only support 64K page size, |
436 | * which means maximum compressed extent size (128K) is just 2x page |
437 | * size. |
438 | * This makes readahead less effective, so here disable readahead for |
439 | * subpage for now, until full compressed write is supported. |
440 | */ |
441 | if (fs_info->sectorsize < PAGE_SIZE) |
442 | return 0; |
443 | |
444 | end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
445 | |
446 | while (cur < compressed_end) { |
447 | u64 page_end; |
448 | u64 pg_index = cur >> PAGE_SHIFT; |
449 | u32 add_size; |
450 | |
451 | if (pg_index > end_index) |
452 | break; |
453 | |
454 | page = xa_load(&mapping->i_pages, index: pg_index); |
455 | if (page && !xa_is_value(entry: page)) { |
456 | sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >> |
457 | fs_info->sectorsize_bits; |
458 | |
459 | /* Beyond threshold, no need to continue */ |
460 | if (sectors_missed > 4) |
461 | break; |
462 | |
463 | /* |
464 | * Jump to next page start as we already have page for |
465 | * current offset. |
466 | */ |
467 | cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; |
468 | continue; |
469 | } |
470 | |
471 | page = __page_cache_alloc(gfp: mapping_gfp_constraint(mapping, |
472 | gfp_mask: ~__GFP_FS)); |
473 | if (!page) |
474 | break; |
475 | |
476 | if (add_to_page_cache_lru(page, mapping, index: pg_index, GFP_NOFS)) { |
477 | put_page(page); |
478 | /* There is already a page, skip to page end */ |
479 | cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; |
480 | continue; |
481 | } |
482 | |
483 | if (!*memstall && PageWorkingset(page)) { |
484 | psi_memstall_enter(flags: pflags); |
485 | *memstall = 1; |
486 | } |
487 | |
488 | ret = set_page_extent_mapped(page); |
489 | if (ret < 0) { |
490 | unlock_page(page); |
491 | put_page(page); |
492 | break; |
493 | } |
494 | |
495 | page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1; |
496 | lock_extent(tree, start: cur, end: page_end, NULL); |
497 | read_lock(&em_tree->lock); |
498 | em = lookup_extent_mapping(tree: em_tree, start: cur, len: page_end + 1 - cur); |
499 | read_unlock(&em_tree->lock); |
500 | |
501 | /* |
502 | * At this point, we have a locked page in the page cache for |
503 | * these bytes in the file. But, we have to make sure they map |
504 | * to this compressed extent on disk. |
505 | */ |
506 | if (!em || cur < em->start || |
507 | (cur + fs_info->sectorsize > extent_map_end(em)) || |
508 | (em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) { |
509 | free_extent_map(em); |
510 | unlock_extent(tree, start: cur, end: page_end, NULL); |
511 | unlock_page(page); |
512 | put_page(page); |
513 | break; |
514 | } |
515 | free_extent_map(em); |
516 | |
517 | if (page->index == end_index) { |
518 | size_t zero_offset = offset_in_page(isize); |
519 | |
520 | if (zero_offset) { |
521 | int zeros; |
522 | zeros = PAGE_SIZE - zero_offset; |
523 | memzero_page(page, offset: zero_offset, len: zeros); |
524 | } |
525 | } |
526 | |
527 | add_size = min(em->start + em->len, page_end + 1) - cur; |
528 | ret = bio_add_page(bio: orig_bio, page, len: add_size, offset_in_page(cur)); |
529 | if (ret != add_size) { |
530 | unlock_extent(tree, start: cur, end: page_end, NULL); |
531 | unlock_page(page); |
532 | put_page(page); |
533 | break; |
534 | } |
535 | /* |
536 | * If it's subpage, we also need to increase its |
537 | * subpage::readers number, as at endio we will decrease |
538 | * subpage::readers and to unlock the page. |
539 | */ |
540 | if (fs_info->sectorsize < PAGE_SIZE) |
541 | btrfs_subpage_start_reader(fs_info, page_folio(page), |
542 | start: cur, len: add_size); |
543 | put_page(page); |
544 | cur += add_size; |
545 | } |
546 | return 0; |
547 | } |
548 | |
549 | /* |
550 | * for a compressed read, the bio we get passed has all the inode pages |
551 | * in it. We don't actually do IO on those pages but allocate new ones |
552 | * to hold the compressed pages on disk. |
553 | * |
554 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
555 | * bio->bi_io_vec points to all of the inode pages |
556 | * |
557 | * After the compressed pages are read, we copy the bytes into the |
558 | * bio we were passed and then call the bio end_io calls |
559 | */ |
560 | void btrfs_submit_compressed_read(struct btrfs_bio *bbio) |
561 | { |
562 | struct btrfs_inode *inode = bbio->inode; |
563 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
564 | struct extent_map_tree *em_tree = &inode->extent_tree; |
565 | struct compressed_bio *cb; |
566 | unsigned int compressed_len; |
567 | u64 file_offset = bbio->file_offset; |
568 | u64 em_len; |
569 | u64 em_start; |
570 | struct extent_map *em; |
571 | unsigned long pflags; |
572 | int memstall = 0; |
573 | blk_status_t ret; |
574 | int ret2; |
575 | |
576 | /* we need the actual starting offset of this extent in the file */ |
577 | read_lock(&em_tree->lock); |
578 | em = lookup_extent_mapping(tree: em_tree, start: file_offset, len: fs_info->sectorsize); |
579 | read_unlock(&em_tree->lock); |
580 | if (!em) { |
581 | ret = BLK_STS_IOERR; |
582 | goto out; |
583 | } |
584 | |
585 | ASSERT(extent_map_is_compressed(em)); |
586 | compressed_len = em->block_len; |
587 | |
588 | cb = alloc_compressed_bio(inode, start: file_offset, op: REQ_OP_READ, |
589 | end_io: end_bbio_comprssed_read); |
590 | |
591 | cb->start = em->orig_start; |
592 | em_len = em->len; |
593 | em_start = em->start; |
594 | |
595 | cb->len = bbio->bio.bi_iter.bi_size; |
596 | cb->compressed_len = compressed_len; |
597 | cb->compress_type = extent_map_compression(em); |
598 | cb->orig_bbio = bbio; |
599 | |
600 | free_extent_map(em); |
601 | |
602 | cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); |
603 | cb->compressed_pages = kcalloc(n: cb->nr_pages, size: sizeof(struct page *), GFP_NOFS); |
604 | if (!cb->compressed_pages) { |
605 | ret = BLK_STS_RESOURCE; |
606 | goto out_free_bio; |
607 | } |
608 | |
609 | ret2 = btrfs_alloc_page_array(nr_pages: cb->nr_pages, page_array: cb->compressed_pages, extra_gfp: 0); |
610 | if (ret2) { |
611 | ret = BLK_STS_RESOURCE; |
612 | goto out_free_compressed_pages; |
613 | } |
614 | |
615 | add_ra_bio_pages(inode: &inode->vfs_inode, compressed_end: em_start + em_len, cb, memstall: &memstall, |
616 | pflags: &pflags); |
617 | |
618 | /* include any pages we added in add_ra-bio_pages */ |
619 | cb->len = bbio->bio.bi_iter.bi_size; |
620 | cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector; |
621 | btrfs_add_compressed_bio_pages(cb); |
622 | |
623 | if (memstall) |
624 | psi_memstall_leave(flags: &pflags); |
625 | |
626 | btrfs_submit_bio(bbio: &cb->bbio, mirror_num: 0); |
627 | return; |
628 | |
629 | out_free_compressed_pages: |
630 | kfree(objp: cb->compressed_pages); |
631 | out_free_bio: |
632 | bio_put(&cb->bbio.bio); |
633 | out: |
634 | btrfs_bio_end_io(bbio, status: ret); |
635 | } |
636 | |
637 | /* |
638 | * Heuristic uses systematic sampling to collect data from the input data |
639 | * range, the logic can be tuned by the following constants: |
640 | * |
641 | * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample |
642 | * @SAMPLING_INTERVAL - range from which the sampled data can be collected |
643 | */ |
644 | #define SAMPLING_READ_SIZE (16) |
645 | #define SAMPLING_INTERVAL (256) |
646 | |
647 | /* |
648 | * For statistical analysis of the input data we consider bytes that form a |
649 | * Galois Field of 256 objects. Each object has an attribute count, ie. how |
650 | * many times the object appeared in the sample. |
651 | */ |
652 | #define BUCKET_SIZE (256) |
653 | |
654 | /* |
655 | * The size of the sample is based on a statistical sampling rule of thumb. |
656 | * The common way is to perform sampling tests as long as the number of |
657 | * elements in each cell is at least 5. |
658 | * |
659 | * Instead of 5, we choose 32 to obtain more accurate results. |
660 | * If the data contain the maximum number of symbols, which is 256, we obtain a |
661 | * sample size bound by 8192. |
662 | * |
663 | * For a sample of at most 8KB of data per data range: 16 consecutive bytes |
664 | * from up to 512 locations. |
665 | */ |
666 | #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ |
667 | SAMPLING_READ_SIZE / SAMPLING_INTERVAL) |
668 | |
669 | struct bucket_item { |
670 | u32 count; |
671 | }; |
672 | |
673 | struct heuristic_ws { |
674 | /* Partial copy of input data */ |
675 | u8 *sample; |
676 | u32 sample_size; |
677 | /* Buckets store counters for each byte value */ |
678 | struct bucket_item *bucket; |
679 | /* Sorting buffer */ |
680 | struct bucket_item *bucket_b; |
681 | struct list_head list; |
682 | }; |
683 | |
684 | static struct workspace_manager heuristic_wsm; |
685 | |
686 | static void free_heuristic_ws(struct list_head *ws) |
687 | { |
688 | struct heuristic_ws *workspace; |
689 | |
690 | workspace = list_entry(ws, struct heuristic_ws, list); |
691 | |
692 | kvfree(addr: workspace->sample); |
693 | kfree(objp: workspace->bucket); |
694 | kfree(objp: workspace->bucket_b); |
695 | kfree(objp: workspace); |
696 | } |
697 | |
698 | static struct list_head *alloc_heuristic_ws(unsigned int level) |
699 | { |
700 | struct heuristic_ws *ws; |
701 | |
702 | ws = kzalloc(size: sizeof(*ws), GFP_KERNEL); |
703 | if (!ws) |
704 | return ERR_PTR(error: -ENOMEM); |
705 | |
706 | ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); |
707 | if (!ws->sample) |
708 | goto fail; |
709 | |
710 | ws->bucket = kcalloc(BUCKET_SIZE, size: sizeof(*ws->bucket), GFP_KERNEL); |
711 | if (!ws->bucket) |
712 | goto fail; |
713 | |
714 | ws->bucket_b = kcalloc(BUCKET_SIZE, size: sizeof(*ws->bucket_b), GFP_KERNEL); |
715 | if (!ws->bucket_b) |
716 | goto fail; |
717 | |
718 | INIT_LIST_HEAD(list: &ws->list); |
719 | return &ws->list; |
720 | fail: |
721 | free_heuristic_ws(ws: &ws->list); |
722 | return ERR_PTR(error: -ENOMEM); |
723 | } |
724 | |
725 | const struct btrfs_compress_op btrfs_heuristic_compress = { |
726 | .workspace_manager = &heuristic_wsm, |
727 | }; |
728 | |
729 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
730 | /* The heuristic is represented as compression type 0 */ |
731 | &btrfs_heuristic_compress, |
732 | &btrfs_zlib_compress, |
733 | &btrfs_lzo_compress, |
734 | &btrfs_zstd_compress, |
735 | }; |
736 | |
737 | static struct list_head *alloc_workspace(int type, unsigned int level) |
738 | { |
739 | switch (type) { |
740 | case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); |
741 | case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); |
742 | case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); |
743 | case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); |
744 | default: |
745 | /* |
746 | * This can't happen, the type is validated several times |
747 | * before we get here. |
748 | */ |
749 | BUG(); |
750 | } |
751 | } |
752 | |
753 | static void free_workspace(int type, struct list_head *ws) |
754 | { |
755 | switch (type) { |
756 | case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); |
757 | case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); |
758 | case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); |
759 | case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); |
760 | default: |
761 | /* |
762 | * This can't happen, the type is validated several times |
763 | * before we get here. |
764 | */ |
765 | BUG(); |
766 | } |
767 | } |
768 | |
769 | static void btrfs_init_workspace_manager(int type) |
770 | { |
771 | struct workspace_manager *wsm; |
772 | struct list_head *workspace; |
773 | |
774 | wsm = btrfs_compress_op[type]->workspace_manager; |
775 | INIT_LIST_HEAD(list: &wsm->idle_ws); |
776 | spin_lock_init(&wsm->ws_lock); |
777 | atomic_set(v: &wsm->total_ws, i: 0); |
778 | init_waitqueue_head(&wsm->ws_wait); |
779 | |
780 | /* |
781 | * Preallocate one workspace for each compression type so we can |
782 | * guarantee forward progress in the worst case |
783 | */ |
784 | workspace = alloc_workspace(type, level: 0); |
785 | if (IS_ERR(ptr: workspace)) { |
786 | pr_warn( |
787 | "BTRFS: cannot preallocate compression workspace, will try later\n" ); |
788 | } else { |
789 | atomic_set(v: &wsm->total_ws, i: 1); |
790 | wsm->free_ws = 1; |
791 | list_add(new: workspace, head: &wsm->idle_ws); |
792 | } |
793 | } |
794 | |
795 | static void btrfs_cleanup_workspace_manager(int type) |
796 | { |
797 | struct workspace_manager *wsman; |
798 | struct list_head *ws; |
799 | |
800 | wsman = btrfs_compress_op[type]->workspace_manager; |
801 | while (!list_empty(head: &wsman->idle_ws)) { |
802 | ws = wsman->idle_ws.next; |
803 | list_del(entry: ws); |
804 | free_workspace(type, ws); |
805 | atomic_dec(v: &wsman->total_ws); |
806 | } |
807 | } |
808 | |
809 | /* |
810 | * This finds an available workspace or allocates a new one. |
811 | * If it's not possible to allocate a new one, waits until there's one. |
812 | * Preallocation makes a forward progress guarantees and we do not return |
813 | * errors. |
814 | */ |
815 | struct list_head *btrfs_get_workspace(int type, unsigned int level) |
816 | { |
817 | struct workspace_manager *wsm; |
818 | struct list_head *workspace; |
819 | int cpus = num_online_cpus(); |
820 | unsigned nofs_flag; |
821 | struct list_head *idle_ws; |
822 | spinlock_t *ws_lock; |
823 | atomic_t *total_ws; |
824 | wait_queue_head_t *ws_wait; |
825 | int *free_ws; |
826 | |
827 | wsm = btrfs_compress_op[type]->workspace_manager; |
828 | idle_ws = &wsm->idle_ws; |
829 | ws_lock = &wsm->ws_lock; |
830 | total_ws = &wsm->total_ws; |
831 | ws_wait = &wsm->ws_wait; |
832 | free_ws = &wsm->free_ws; |
833 | |
834 | again: |
835 | spin_lock(lock: ws_lock); |
836 | if (!list_empty(head: idle_ws)) { |
837 | workspace = idle_ws->next; |
838 | list_del(entry: workspace); |
839 | (*free_ws)--; |
840 | spin_unlock(lock: ws_lock); |
841 | return workspace; |
842 | |
843 | } |
844 | if (atomic_read(v: total_ws) > cpus) { |
845 | DEFINE_WAIT(wait); |
846 | |
847 | spin_unlock(lock: ws_lock); |
848 | prepare_to_wait(wq_head: ws_wait, wq_entry: &wait, TASK_UNINTERRUPTIBLE); |
849 | if (atomic_read(v: total_ws) > cpus && !*free_ws) |
850 | schedule(); |
851 | finish_wait(wq_head: ws_wait, wq_entry: &wait); |
852 | goto again; |
853 | } |
854 | atomic_inc(v: total_ws); |
855 | spin_unlock(lock: ws_lock); |
856 | |
857 | /* |
858 | * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have |
859 | * to turn it off here because we might get called from the restricted |
860 | * context of btrfs_compress_bio/btrfs_compress_pages |
861 | */ |
862 | nofs_flag = memalloc_nofs_save(); |
863 | workspace = alloc_workspace(type, level); |
864 | memalloc_nofs_restore(flags: nofs_flag); |
865 | |
866 | if (IS_ERR(ptr: workspace)) { |
867 | atomic_dec(v: total_ws); |
868 | wake_up(ws_wait); |
869 | |
870 | /* |
871 | * Do not return the error but go back to waiting. There's a |
872 | * workspace preallocated for each type and the compression |
873 | * time is bounded so we get to a workspace eventually. This |
874 | * makes our caller's life easier. |
875 | * |
876 | * To prevent silent and low-probability deadlocks (when the |
877 | * initial preallocation fails), check if there are any |
878 | * workspaces at all. |
879 | */ |
880 | if (atomic_read(v: total_ws) == 0) { |
881 | static DEFINE_RATELIMIT_STATE(_rs, |
882 | /* once per minute */ 60 * HZ, |
883 | /* no burst */ 1); |
884 | |
885 | if (__ratelimit(&_rs)) { |
886 | pr_warn("BTRFS: no compression workspaces, low memory, retrying\n" ); |
887 | } |
888 | } |
889 | goto again; |
890 | } |
891 | return workspace; |
892 | } |
893 | |
894 | static struct list_head *get_workspace(int type, int level) |
895 | { |
896 | switch (type) { |
897 | case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); |
898 | case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); |
899 | case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); |
900 | case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); |
901 | default: |
902 | /* |
903 | * This can't happen, the type is validated several times |
904 | * before we get here. |
905 | */ |
906 | BUG(); |
907 | } |
908 | } |
909 | |
910 | /* |
911 | * put a workspace struct back on the list or free it if we have enough |
912 | * idle ones sitting around |
913 | */ |
914 | void btrfs_put_workspace(int type, struct list_head *ws) |
915 | { |
916 | struct workspace_manager *wsm; |
917 | struct list_head *idle_ws; |
918 | spinlock_t *ws_lock; |
919 | atomic_t *total_ws; |
920 | wait_queue_head_t *ws_wait; |
921 | int *free_ws; |
922 | |
923 | wsm = btrfs_compress_op[type]->workspace_manager; |
924 | idle_ws = &wsm->idle_ws; |
925 | ws_lock = &wsm->ws_lock; |
926 | total_ws = &wsm->total_ws; |
927 | ws_wait = &wsm->ws_wait; |
928 | free_ws = &wsm->free_ws; |
929 | |
930 | spin_lock(lock: ws_lock); |
931 | if (*free_ws <= num_online_cpus()) { |
932 | list_add(new: ws, head: idle_ws); |
933 | (*free_ws)++; |
934 | spin_unlock(lock: ws_lock); |
935 | goto wake; |
936 | } |
937 | spin_unlock(lock: ws_lock); |
938 | |
939 | free_workspace(type, ws); |
940 | atomic_dec(v: total_ws); |
941 | wake: |
942 | cond_wake_up(wq: ws_wait); |
943 | } |
944 | |
945 | static void put_workspace(int type, struct list_head *ws) |
946 | { |
947 | switch (type) { |
948 | case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); |
949 | case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); |
950 | case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); |
951 | case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); |
952 | default: |
953 | /* |
954 | * This can't happen, the type is validated several times |
955 | * before we get here. |
956 | */ |
957 | BUG(); |
958 | } |
959 | } |
960 | |
961 | /* |
962 | * Adjust @level according to the limits of the compression algorithm or |
963 | * fallback to default |
964 | */ |
965 | static unsigned int btrfs_compress_set_level(int type, unsigned level) |
966 | { |
967 | const struct btrfs_compress_op *ops = btrfs_compress_op[type]; |
968 | |
969 | if (level == 0) |
970 | level = ops->default_level; |
971 | else |
972 | level = min(level, ops->max_level); |
973 | |
974 | return level; |
975 | } |
976 | |
977 | /* |
978 | * Given an address space and start and length, compress the bytes into @pages |
979 | * that are allocated on demand. |
980 | * |
981 | * @type_level is encoded algorithm and level, where level 0 means whatever |
982 | * default the algorithm chooses and is opaque here; |
983 | * - compression algo are 0-3 |
984 | * - the level are bits 4-7 |
985 | * |
986 | * @out_pages is an in/out parameter, holds maximum number of pages to allocate |
987 | * and returns number of actually allocated pages |
988 | * |
989 | * @total_in is used to return the number of bytes actually read. It |
990 | * may be smaller than the input length if we had to exit early because we |
991 | * ran out of room in the pages array or because we cross the |
992 | * max_out threshold. |
993 | * |
994 | * @total_out is an in/out parameter, must be set to the input length and will |
995 | * be also used to return the total number of compressed bytes |
996 | */ |
997 | int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, |
998 | u64 start, struct page **pages, |
999 | unsigned long *out_pages, |
1000 | unsigned long *total_in, |
1001 | unsigned long *total_out) |
1002 | { |
1003 | int type = btrfs_compress_type(type_level); |
1004 | int level = btrfs_compress_level(type_level); |
1005 | struct list_head *workspace; |
1006 | int ret; |
1007 | |
1008 | level = btrfs_compress_set_level(type, level); |
1009 | workspace = get_workspace(type, level); |
1010 | ret = compression_compress_pages(type, ws: workspace, mapping, start, pages, |
1011 | out_pages, total_in, total_out); |
1012 | put_workspace(type, ws: workspace); |
1013 | return ret; |
1014 | } |
1015 | |
1016 | static int btrfs_decompress_bio(struct compressed_bio *cb) |
1017 | { |
1018 | struct list_head *workspace; |
1019 | int ret; |
1020 | int type = cb->compress_type; |
1021 | |
1022 | workspace = get_workspace(type, level: 0); |
1023 | ret = compression_decompress_bio(ws: workspace, cb); |
1024 | put_workspace(type, ws: workspace); |
1025 | |
1026 | if (!ret) |
1027 | zero_fill_bio(bio: &cb->orig_bbio->bio); |
1028 | return ret; |
1029 | } |
1030 | |
1031 | /* |
1032 | * a less complex decompression routine. Our compressed data fits in a |
1033 | * single page, and we want to read a single page out of it. |
1034 | * start_byte tells us the offset into the compressed data we're interested in |
1035 | */ |
1036 | int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, |
1037 | unsigned long dest_pgoff, size_t srclen, size_t destlen) |
1038 | { |
1039 | struct btrfs_fs_info *fs_info = page_to_fs_info(dest_page); |
1040 | struct list_head *workspace; |
1041 | const u32 sectorsize = fs_info->sectorsize; |
1042 | int ret; |
1043 | |
1044 | /* |
1045 | * The full destination page range should not exceed the page size. |
1046 | * And the @destlen should not exceed sectorsize, as this is only called for |
1047 | * inline file extents, which should not exceed sectorsize. |
1048 | */ |
1049 | ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize); |
1050 | |
1051 | workspace = get_workspace(type, level: 0); |
1052 | ret = compression_decompress(type, ws: workspace, data_in, dest_page, |
1053 | dest_pgoff, srclen, destlen); |
1054 | put_workspace(type, ws: workspace); |
1055 | |
1056 | return ret; |
1057 | } |
1058 | |
1059 | int __init btrfs_init_compress(void) |
1060 | { |
1061 | if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE, |
1062 | offsetof(struct compressed_bio, bbio.bio), |
1063 | flags: BIOSET_NEED_BVECS)) |
1064 | return -ENOMEM; |
1065 | |
1066 | compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, fmt: "btrfs-compr-pages" ); |
1067 | if (!compr_pool.shrinker) |
1068 | return -ENOMEM; |
1069 | |
1070 | btrfs_init_workspace_manager(type: BTRFS_COMPRESS_NONE); |
1071 | btrfs_init_workspace_manager(type: BTRFS_COMPRESS_ZLIB); |
1072 | btrfs_init_workspace_manager(type: BTRFS_COMPRESS_LZO); |
1073 | zstd_init_workspace_manager(); |
1074 | |
1075 | spin_lock_init(&compr_pool.lock); |
1076 | INIT_LIST_HEAD(list: &compr_pool.list); |
1077 | compr_pool.count = 0; |
1078 | /* 128K / 4K = 32, for 8 threads is 256 pages. */ |
1079 | compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8; |
1080 | compr_pool.shrinker->count_objects = btrfs_compr_pool_count; |
1081 | compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan; |
1082 | compr_pool.shrinker->batch = 32; |
1083 | compr_pool.shrinker->seeks = DEFAULT_SEEKS; |
1084 | shrinker_register(shrinker: compr_pool.shrinker); |
1085 | |
1086 | return 0; |
1087 | } |
1088 | |
1089 | void __cold btrfs_exit_compress(void) |
1090 | { |
1091 | /* For now scan drains all pages and does not touch the parameters. */ |
1092 | btrfs_compr_pool_scan(NULL, NULL); |
1093 | shrinker_free(shrinker: compr_pool.shrinker); |
1094 | |
1095 | btrfs_cleanup_workspace_manager(type: BTRFS_COMPRESS_NONE); |
1096 | btrfs_cleanup_workspace_manager(type: BTRFS_COMPRESS_ZLIB); |
1097 | btrfs_cleanup_workspace_manager(type: BTRFS_COMPRESS_LZO); |
1098 | zstd_cleanup_workspace_manager(); |
1099 | bioset_exit(&btrfs_compressed_bioset); |
1100 | } |
1101 | |
1102 | /* |
1103 | * Copy decompressed data from working buffer to pages. |
1104 | * |
1105 | * @buf: The decompressed data buffer |
1106 | * @buf_len: The decompressed data length |
1107 | * @decompressed: Number of bytes that are already decompressed inside the |
1108 | * compressed extent |
1109 | * @cb: The compressed extent descriptor |
1110 | * @orig_bio: The original bio that the caller wants to read for |
1111 | * |
1112 | * An easier to understand graph is like below: |
1113 | * |
1114 | * |<- orig_bio ->| |<- orig_bio->| |
1115 | * |<------- full decompressed extent ----->| |
1116 | * |<----------- @cb range ---->| |
1117 | * | |<-- @buf_len -->| |
1118 | * |<--- @decompressed --->| |
1119 | * |
1120 | * Note that, @cb can be a subpage of the full decompressed extent, but |
1121 | * @cb->start always has the same as the orig_file_offset value of the full |
1122 | * decompressed extent. |
1123 | * |
1124 | * When reading compressed extent, we have to read the full compressed extent, |
1125 | * while @orig_bio may only want part of the range. |
1126 | * Thus this function will ensure only data covered by @orig_bio will be copied |
1127 | * to. |
1128 | * |
1129 | * Return 0 if we have copied all needed contents for @orig_bio. |
1130 | * Return >0 if we need continue decompress. |
1131 | */ |
1132 | int btrfs_decompress_buf2page(const char *buf, u32 buf_len, |
1133 | struct compressed_bio *cb, u32 decompressed) |
1134 | { |
1135 | struct bio *orig_bio = &cb->orig_bbio->bio; |
1136 | /* Offset inside the full decompressed extent */ |
1137 | u32 cur_offset; |
1138 | |
1139 | cur_offset = decompressed; |
1140 | /* The main loop to do the copy */ |
1141 | while (cur_offset < decompressed + buf_len) { |
1142 | struct bio_vec bvec; |
1143 | size_t copy_len; |
1144 | u32 copy_start; |
1145 | /* Offset inside the full decompressed extent */ |
1146 | u32 bvec_offset; |
1147 | |
1148 | bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter); |
1149 | /* |
1150 | * cb->start may underflow, but subtracting that value can still |
1151 | * give us correct offset inside the full decompressed extent. |
1152 | */ |
1153 | bvec_offset = page_offset(page: bvec.bv_page) + bvec.bv_offset - cb->start; |
1154 | |
1155 | /* Haven't reached the bvec range, exit */ |
1156 | if (decompressed + buf_len <= bvec_offset) |
1157 | return 1; |
1158 | |
1159 | copy_start = max(cur_offset, bvec_offset); |
1160 | copy_len = min(bvec_offset + bvec.bv_len, |
1161 | decompressed + buf_len) - copy_start; |
1162 | ASSERT(copy_len); |
1163 | |
1164 | /* |
1165 | * Extra range check to ensure we didn't go beyond |
1166 | * @buf + @buf_len. |
1167 | */ |
1168 | ASSERT(copy_start - decompressed < buf_len); |
1169 | memcpy_to_page(page: bvec.bv_page, offset: bvec.bv_offset, |
1170 | from: buf + copy_start - decompressed, len: copy_len); |
1171 | cur_offset += copy_len; |
1172 | |
1173 | bio_advance(bio: orig_bio, nbytes: copy_len); |
1174 | /* Finished the bio */ |
1175 | if (!orig_bio->bi_iter.bi_size) |
1176 | return 0; |
1177 | } |
1178 | return 1; |
1179 | } |
1180 | |
1181 | /* |
1182 | * Shannon Entropy calculation |
1183 | * |
1184 | * Pure byte distribution analysis fails to determine compressibility of data. |
1185 | * Try calculating entropy to estimate the average minimum number of bits |
1186 | * needed to encode the sampled data. |
1187 | * |
1188 | * For convenience, return the percentage of needed bits, instead of amount of |
1189 | * bits directly. |
1190 | * |
1191 | * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy |
1192 | * and can be compressible with high probability |
1193 | * |
1194 | * @ENTROPY_LVL_HIGH - data are not compressible with high probability |
1195 | * |
1196 | * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. |
1197 | */ |
1198 | #define ENTROPY_LVL_ACEPTABLE (65) |
1199 | #define ENTROPY_LVL_HIGH (80) |
1200 | |
1201 | /* |
1202 | * For increasead precision in shannon_entropy calculation, |
1203 | * let's do pow(n, M) to save more digits after comma: |
1204 | * |
1205 | * - maximum int bit length is 64 |
1206 | * - ilog2(MAX_SAMPLE_SIZE) -> 13 |
1207 | * - 13 * 4 = 52 < 64 -> M = 4 |
1208 | * |
1209 | * So use pow(n, 4). |
1210 | */ |
1211 | static inline u32 ilog2_w(u64 n) |
1212 | { |
1213 | return ilog2(n * n * n * n); |
1214 | } |
1215 | |
1216 | static u32 shannon_entropy(struct heuristic_ws *ws) |
1217 | { |
1218 | const u32 entropy_max = 8 * ilog2_w(n: 2); |
1219 | u32 entropy_sum = 0; |
1220 | u32 p, p_base, sz_base; |
1221 | u32 i; |
1222 | |
1223 | sz_base = ilog2_w(n: ws->sample_size); |
1224 | for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { |
1225 | p = ws->bucket[i].count; |
1226 | p_base = ilog2_w(n: p); |
1227 | entropy_sum += p * (sz_base - p_base); |
1228 | } |
1229 | |
1230 | entropy_sum /= ws->sample_size; |
1231 | return entropy_sum * 100 / entropy_max; |
1232 | } |
1233 | |
1234 | #define RADIX_BASE 4U |
1235 | #define COUNTERS_SIZE (1U << RADIX_BASE) |
1236 | |
1237 | static u8 get4bits(u64 num, int shift) { |
1238 | u8 low4bits; |
1239 | |
1240 | num >>= shift; |
1241 | /* Reverse order */ |
1242 | low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); |
1243 | return low4bits; |
1244 | } |
1245 | |
1246 | /* |
1247 | * Use 4 bits as radix base |
1248 | * Use 16 u32 counters for calculating new position in buf array |
1249 | * |
1250 | * @array - array that will be sorted |
1251 | * @array_buf - buffer array to store sorting results |
1252 | * must be equal in size to @array |
1253 | * @num - array size |
1254 | */ |
1255 | static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, |
1256 | int num) |
1257 | { |
1258 | u64 max_num; |
1259 | u64 buf_num; |
1260 | u32 counters[COUNTERS_SIZE]; |
1261 | u32 new_addr; |
1262 | u32 addr; |
1263 | int bitlen; |
1264 | int shift; |
1265 | int i; |
1266 | |
1267 | /* |
1268 | * Try avoid useless loop iterations for small numbers stored in big |
1269 | * counters. Example: 48 33 4 ... in 64bit array |
1270 | */ |
1271 | max_num = array[0].count; |
1272 | for (i = 1; i < num; i++) { |
1273 | buf_num = array[i].count; |
1274 | if (buf_num > max_num) |
1275 | max_num = buf_num; |
1276 | } |
1277 | |
1278 | buf_num = ilog2(max_num); |
1279 | bitlen = ALIGN(buf_num, RADIX_BASE * 2); |
1280 | |
1281 | shift = 0; |
1282 | while (shift < bitlen) { |
1283 | memset(counters, 0, sizeof(counters)); |
1284 | |
1285 | for (i = 0; i < num; i++) { |
1286 | buf_num = array[i].count; |
1287 | addr = get4bits(num: buf_num, shift); |
1288 | counters[addr]++; |
1289 | } |
1290 | |
1291 | for (i = 1; i < COUNTERS_SIZE; i++) |
1292 | counters[i] += counters[i - 1]; |
1293 | |
1294 | for (i = num - 1; i >= 0; i--) { |
1295 | buf_num = array[i].count; |
1296 | addr = get4bits(num: buf_num, shift); |
1297 | counters[addr]--; |
1298 | new_addr = counters[addr]; |
1299 | array_buf[new_addr] = array[i]; |
1300 | } |
1301 | |
1302 | shift += RADIX_BASE; |
1303 | |
1304 | /* |
1305 | * Normal radix expects to move data from a temporary array, to |
1306 | * the main one. But that requires some CPU time. Avoid that |
1307 | * by doing another sort iteration to original array instead of |
1308 | * memcpy() |
1309 | */ |
1310 | memset(counters, 0, sizeof(counters)); |
1311 | |
1312 | for (i = 0; i < num; i ++) { |
1313 | buf_num = array_buf[i].count; |
1314 | addr = get4bits(num: buf_num, shift); |
1315 | counters[addr]++; |
1316 | } |
1317 | |
1318 | for (i = 1; i < COUNTERS_SIZE; i++) |
1319 | counters[i] += counters[i - 1]; |
1320 | |
1321 | for (i = num - 1; i >= 0; i--) { |
1322 | buf_num = array_buf[i].count; |
1323 | addr = get4bits(num: buf_num, shift); |
1324 | counters[addr]--; |
1325 | new_addr = counters[addr]; |
1326 | array[new_addr] = array_buf[i]; |
1327 | } |
1328 | |
1329 | shift += RADIX_BASE; |
1330 | } |
1331 | } |
1332 | |
1333 | /* |
1334 | * Size of the core byte set - how many bytes cover 90% of the sample |
1335 | * |
1336 | * There are several types of structured binary data that use nearly all byte |
1337 | * values. The distribution can be uniform and counts in all buckets will be |
1338 | * nearly the same (eg. encrypted data). Unlikely to be compressible. |
1339 | * |
1340 | * Other possibility is normal (Gaussian) distribution, where the data could |
1341 | * be potentially compressible, but we have to take a few more steps to decide |
1342 | * how much. |
1343 | * |
1344 | * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, |
1345 | * compression algo can easy fix that |
1346 | * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high |
1347 | * probability is not compressible |
1348 | */ |
1349 | #define BYTE_CORE_SET_LOW (64) |
1350 | #define BYTE_CORE_SET_HIGH (200) |
1351 | |
1352 | static int byte_core_set_size(struct heuristic_ws *ws) |
1353 | { |
1354 | u32 i; |
1355 | u32 coreset_sum = 0; |
1356 | const u32 core_set_threshold = ws->sample_size * 90 / 100; |
1357 | struct bucket_item *bucket = ws->bucket; |
1358 | |
1359 | /* Sort in reverse order */ |
1360 | radix_sort(array: ws->bucket, array_buf: ws->bucket_b, BUCKET_SIZE); |
1361 | |
1362 | for (i = 0; i < BYTE_CORE_SET_LOW; i++) |
1363 | coreset_sum += bucket[i].count; |
1364 | |
1365 | if (coreset_sum > core_set_threshold) |
1366 | return i; |
1367 | |
1368 | for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { |
1369 | coreset_sum += bucket[i].count; |
1370 | if (coreset_sum > core_set_threshold) |
1371 | break; |
1372 | } |
1373 | |
1374 | return i; |
1375 | } |
1376 | |
1377 | /* |
1378 | * Count byte values in buckets. |
1379 | * This heuristic can detect textual data (configs, xml, json, html, etc). |
1380 | * Because in most text-like data byte set is restricted to limited number of |
1381 | * possible characters, and that restriction in most cases makes data easy to |
1382 | * compress. |
1383 | * |
1384 | * @BYTE_SET_THRESHOLD - consider all data within this byte set size: |
1385 | * less - compressible |
1386 | * more - need additional analysis |
1387 | */ |
1388 | #define BYTE_SET_THRESHOLD (64) |
1389 | |
1390 | static u32 byte_set_size(const struct heuristic_ws *ws) |
1391 | { |
1392 | u32 i; |
1393 | u32 byte_set_size = 0; |
1394 | |
1395 | for (i = 0; i < BYTE_SET_THRESHOLD; i++) { |
1396 | if (ws->bucket[i].count > 0) |
1397 | byte_set_size++; |
1398 | } |
1399 | |
1400 | /* |
1401 | * Continue collecting count of byte values in buckets. If the byte |
1402 | * set size is bigger then the threshold, it's pointless to continue, |
1403 | * the detection technique would fail for this type of data. |
1404 | */ |
1405 | for (; i < BUCKET_SIZE; i++) { |
1406 | if (ws->bucket[i].count > 0) { |
1407 | byte_set_size++; |
1408 | if (byte_set_size > BYTE_SET_THRESHOLD) |
1409 | return byte_set_size; |
1410 | } |
1411 | } |
1412 | |
1413 | return byte_set_size; |
1414 | } |
1415 | |
1416 | static bool sample_repeated_patterns(struct heuristic_ws *ws) |
1417 | { |
1418 | const u32 half_of_sample = ws->sample_size / 2; |
1419 | const u8 *data = ws->sample; |
1420 | |
1421 | return memcmp(p: &data[0], q: &data[half_of_sample], size: half_of_sample) == 0; |
1422 | } |
1423 | |
1424 | static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, |
1425 | struct heuristic_ws *ws) |
1426 | { |
1427 | struct page *page; |
1428 | u64 index, index_end; |
1429 | u32 i, curr_sample_pos; |
1430 | u8 *in_data; |
1431 | |
1432 | /* |
1433 | * Compression handles the input data by chunks of 128KiB |
1434 | * (defined by BTRFS_MAX_UNCOMPRESSED) |
1435 | * |
1436 | * We do the same for the heuristic and loop over the whole range. |
1437 | * |
1438 | * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will |
1439 | * process no more than BTRFS_MAX_UNCOMPRESSED at a time. |
1440 | */ |
1441 | if (end - start > BTRFS_MAX_UNCOMPRESSED) |
1442 | end = start + BTRFS_MAX_UNCOMPRESSED; |
1443 | |
1444 | index = start >> PAGE_SHIFT; |
1445 | index_end = end >> PAGE_SHIFT; |
1446 | |
1447 | /* Don't miss unaligned end */ |
1448 | if (!PAGE_ALIGNED(end)) |
1449 | index_end++; |
1450 | |
1451 | curr_sample_pos = 0; |
1452 | while (index < index_end) { |
1453 | page = find_get_page(mapping: inode->i_mapping, offset: index); |
1454 | in_data = kmap_local_page(page); |
1455 | /* Handle case where the start is not aligned to PAGE_SIZE */ |
1456 | i = start % PAGE_SIZE; |
1457 | while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { |
1458 | /* Don't sample any garbage from the last page */ |
1459 | if (start > end - SAMPLING_READ_SIZE) |
1460 | break; |
1461 | memcpy(&ws->sample[curr_sample_pos], &in_data[i], |
1462 | SAMPLING_READ_SIZE); |
1463 | i += SAMPLING_INTERVAL; |
1464 | start += SAMPLING_INTERVAL; |
1465 | curr_sample_pos += SAMPLING_READ_SIZE; |
1466 | } |
1467 | kunmap_local(in_data); |
1468 | put_page(page); |
1469 | |
1470 | index++; |
1471 | } |
1472 | |
1473 | ws->sample_size = curr_sample_pos; |
1474 | } |
1475 | |
1476 | /* |
1477 | * Compression heuristic. |
1478 | * |
1479 | * The following types of analysis can be performed: |
1480 | * - detect mostly zero data |
1481 | * - detect data with low "byte set" size (text, etc) |
1482 | * - detect data with low/high "core byte" set |
1483 | * |
1484 | * Return non-zero if the compression should be done, 0 otherwise. |
1485 | */ |
1486 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) |
1487 | { |
1488 | struct list_head *ws_list = get_workspace(type: 0, level: 0); |
1489 | struct heuristic_ws *ws; |
1490 | u32 i; |
1491 | u8 byte; |
1492 | int ret = 0; |
1493 | |
1494 | ws = list_entry(ws_list, struct heuristic_ws, list); |
1495 | |
1496 | heuristic_collect_sample(inode, start, end, ws); |
1497 | |
1498 | if (sample_repeated_patterns(ws)) { |
1499 | ret = 1; |
1500 | goto out; |
1501 | } |
1502 | |
1503 | memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); |
1504 | |
1505 | for (i = 0; i < ws->sample_size; i++) { |
1506 | byte = ws->sample[i]; |
1507 | ws->bucket[byte].count++; |
1508 | } |
1509 | |
1510 | i = byte_set_size(ws); |
1511 | if (i < BYTE_SET_THRESHOLD) { |
1512 | ret = 2; |
1513 | goto out; |
1514 | } |
1515 | |
1516 | i = byte_core_set_size(ws); |
1517 | if (i <= BYTE_CORE_SET_LOW) { |
1518 | ret = 3; |
1519 | goto out; |
1520 | } |
1521 | |
1522 | if (i >= BYTE_CORE_SET_HIGH) { |
1523 | ret = 0; |
1524 | goto out; |
1525 | } |
1526 | |
1527 | i = shannon_entropy(ws); |
1528 | if (i <= ENTROPY_LVL_ACEPTABLE) { |
1529 | ret = 4; |
1530 | goto out; |
1531 | } |
1532 | |
1533 | /* |
1534 | * For the levels below ENTROPY_LVL_HIGH, additional analysis would be |
1535 | * needed to give green light to compression. |
1536 | * |
1537 | * For now just assume that compression at that level is not worth the |
1538 | * resources because: |
1539 | * |
1540 | * 1. it is possible to defrag the data later |
1541 | * |
1542 | * 2. the data would turn out to be hardly compressible, eg. 150 byte |
1543 | * values, every bucket has counter at level ~54. The heuristic would |
1544 | * be confused. This can happen when data have some internal repeated |
1545 | * patterns like "abbacbbc...". This can be detected by analyzing |
1546 | * pairs of bytes, which is too costly. |
1547 | */ |
1548 | if (i < ENTROPY_LVL_HIGH) { |
1549 | ret = 5; |
1550 | goto out; |
1551 | } else { |
1552 | ret = 0; |
1553 | goto out; |
1554 | } |
1555 | |
1556 | out: |
1557 | put_workspace(type: 0, ws: ws_list); |
1558 | return ret; |
1559 | } |
1560 | |
1561 | /* |
1562 | * Convert the compression suffix (eg. after "zlib" starting with ":") to |
1563 | * level, unrecognized string will set the default level |
1564 | */ |
1565 | unsigned int btrfs_compress_str2level(unsigned int type, const char *str) |
1566 | { |
1567 | unsigned int level = 0; |
1568 | int ret; |
1569 | |
1570 | if (!type) |
1571 | return 0; |
1572 | |
1573 | if (str[0] == ':') { |
1574 | ret = kstrtouint(s: str + 1, base: 10, res: &level); |
1575 | if (ret) |
1576 | level = 0; |
1577 | } |
1578 | |
1579 | level = btrfs_compress_set_level(type, level); |
1580 | |
1581 | return level; |
1582 | } |
1583 | |