compression.c revision 1c3dc173
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle.  All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/time.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/backing-dev.h>
16#include <linux/writeback.h>
17#include <linux/slab.h>
18#include <linux/sched/mm.h>
19#include <linux/log2.h>
20#include <crypto/hash.h>
21#include "misc.h"
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "volumes.h"
27#include "ordered-data.h"
28#include "compression.h"
29#include "extent_io.h"
30#include "extent_map.h"
31#include "zoned.h"
32
33static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
34
35const char* btrfs_compress_type2str(enum btrfs_compression_type type)
36{
37	switch (type) {
38	case BTRFS_COMPRESS_ZLIB:
39	case BTRFS_COMPRESS_LZO:
40	case BTRFS_COMPRESS_ZSTD:
41	case BTRFS_COMPRESS_NONE:
42		return btrfs_compress_types[type];
43	default:
44		break;
45	}
46
47	return NULL;
48}
49
50bool btrfs_compress_is_valid_type(const char *str, size_t len)
51{
52	int i;
53
54	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
55		size_t comp_len = strlen(btrfs_compress_types[i]);
56
57		if (len < comp_len)
58			continue;
59
60		if (!strncmp(btrfs_compress_types[i], str, comp_len))
61			return true;
62	}
63	return false;
64}
65
66static int compression_compress_pages(int type, struct list_head *ws,
67               struct address_space *mapping, u64 start, struct page **pages,
68               unsigned long *out_pages, unsigned long *total_in,
69               unsigned long *total_out)
70{
71	switch (type) {
72	case BTRFS_COMPRESS_ZLIB:
73		return zlib_compress_pages(ws, mapping, start, pages,
74				out_pages, total_in, total_out);
75	case BTRFS_COMPRESS_LZO:
76		return lzo_compress_pages(ws, mapping, start, pages,
77				out_pages, total_in, total_out);
78	case BTRFS_COMPRESS_ZSTD:
79		return zstd_compress_pages(ws, mapping, start, pages,
80				out_pages, total_in, total_out);
81	case BTRFS_COMPRESS_NONE:
82	default:
83		/*
84		 * This can happen when compression races with remount setting
85		 * it to 'no compress', while caller doesn't call
86		 * inode_need_compress() to check if we really need to
87		 * compress.
88		 *
89		 * Not a big deal, just need to inform caller that we
90		 * haven't allocated any pages yet.
91		 */
92		*out_pages = 0;
93		return -E2BIG;
94	}
95}
96
97static int compression_decompress_bio(int type, struct list_head *ws,
98		struct compressed_bio *cb)
99{
100	switch (type) {
101	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
102	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
103	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
104	case BTRFS_COMPRESS_NONE:
105	default:
106		/*
107		 * This can't happen, the type is validated several times
108		 * before we get here.
109		 */
110		BUG();
111	}
112}
113
114static int compression_decompress(int type, struct list_head *ws,
115               unsigned char *data_in, struct page *dest_page,
116               unsigned long start_byte, size_t srclen, size_t destlen)
117{
118	switch (type) {
119	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
120						start_byte, srclen, destlen);
121	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
122						start_byte, srclen, destlen);
123	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
124						start_byte, srclen, destlen);
125	case BTRFS_COMPRESS_NONE:
126	default:
127		/*
128		 * This can't happen, the type is validated several times
129		 * before we get here.
130		 */
131		BUG();
132	}
133}
134
135static int btrfs_decompress_bio(struct compressed_bio *cb);
136
137static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
138				      unsigned long disk_size)
139{
140	return sizeof(struct compressed_bio) +
141		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
142}
143
144static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
145				 u64 disk_start)
146{
147	struct btrfs_fs_info *fs_info = inode->root->fs_info;
148	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
149	const u32 csum_size = fs_info->csum_size;
150	const u32 sectorsize = fs_info->sectorsize;
151	struct page *page;
152	unsigned int i;
153	char *kaddr;
154	u8 csum[BTRFS_CSUM_SIZE];
155	struct compressed_bio *cb = bio->bi_private;
156	u8 *cb_sum = cb->sums;
157
158	if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
159		return 0;
160
161	shash->tfm = fs_info->csum_shash;
162
163	for (i = 0; i < cb->nr_pages; i++) {
164		u32 pg_offset;
165		u32 bytes_left = PAGE_SIZE;
166		page = cb->compressed_pages[i];
167
168		/* Determine the remaining bytes inside the page first */
169		if (i == cb->nr_pages - 1)
170			bytes_left = cb->compressed_len - i * PAGE_SIZE;
171
172		/* Hash through the page sector by sector */
173		for (pg_offset = 0; pg_offset < bytes_left;
174		     pg_offset += sectorsize) {
175			kaddr = page_address(page);
176			crypto_shash_digest(shash, kaddr + pg_offset,
177					    sectorsize, csum);
178
179			if (memcmp(&csum, cb_sum, csum_size) != 0) {
180				btrfs_print_data_csum_error(inode, disk_start,
181						csum, cb_sum, cb->mirror_num);
182				if (btrfs_io_bio(bio)->device)
183					btrfs_dev_stat_inc_and_print(
184						btrfs_io_bio(bio)->device,
185						BTRFS_DEV_STAT_CORRUPTION_ERRS);
186				return -EIO;
187			}
188			cb_sum += csum_size;
189			disk_start += sectorsize;
190		}
191	}
192	return 0;
193}
194
195/* when we finish reading compressed pages from the disk, we
196 * decompress them and then run the bio end_io routines on the
197 * decompressed pages (in the inode address space).
198 *
199 * This allows the checksumming and other IO error handling routines
200 * to work normally
201 *
202 * The compressed pages are freed here, and it must be run
203 * in process context
204 */
205static void end_compressed_bio_read(struct bio *bio)
206{
207	struct compressed_bio *cb = bio->bi_private;
208	struct inode *inode;
209	struct page *page;
210	unsigned int index;
211	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
212	int ret = 0;
213
214	if (bio->bi_status)
215		cb->errors = 1;
216
217	/* if there are more bios still pending for this compressed
218	 * extent, just exit
219	 */
220	if (!refcount_dec_and_test(&cb->pending_bios))
221		goto out;
222
223	/*
224	 * Record the correct mirror_num in cb->orig_bio so that
225	 * read-repair can work properly.
226	 */
227	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
228	cb->mirror_num = mirror;
229
230	/*
231	 * Some IO in this cb have failed, just skip checksum as there
232	 * is no way it could be correct.
233	 */
234	if (cb->errors == 1)
235		goto csum_failed;
236
237	inode = cb->inode;
238	ret = check_compressed_csum(BTRFS_I(inode), bio,
239				    bio->bi_iter.bi_sector << 9);
240	if (ret)
241		goto csum_failed;
242
243	/* ok, we're the last bio for this extent, lets start
244	 * the decompression.
245	 */
246	ret = btrfs_decompress_bio(cb);
247
248csum_failed:
249	if (ret)
250		cb->errors = 1;
251
252	/* release the compressed pages */
253	index = 0;
254	for (index = 0; index < cb->nr_pages; index++) {
255		page = cb->compressed_pages[index];
256		page->mapping = NULL;
257		put_page(page);
258	}
259
260	/* do io completion on the original bio */
261	if (cb->errors) {
262		bio_io_error(cb->orig_bio);
263	} else {
264		struct bio_vec *bvec;
265		struct bvec_iter_all iter_all;
266
267		/*
268		 * we have verified the checksum already, set page
269		 * checked so the end_io handlers know about it
270		 */
271		ASSERT(!bio_flagged(bio, BIO_CLONED));
272		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
273			SetPageChecked(bvec->bv_page);
274
275		bio_endio(cb->orig_bio);
276	}
277
278	/* finally free the cb struct */
279	kfree(cb->compressed_pages);
280	kfree(cb);
281out:
282	bio_put(bio);
283}
284
285/*
286 * Clear the writeback bits on all of the file
287 * pages for a compressed write
288 */
289static noinline void end_compressed_writeback(struct inode *inode,
290					      const struct compressed_bio *cb)
291{
292	unsigned long index = cb->start >> PAGE_SHIFT;
293	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
294	struct page *pages[16];
295	unsigned long nr_pages = end_index - index + 1;
296	int i;
297	int ret;
298
299	if (cb->errors)
300		mapping_set_error(inode->i_mapping, -EIO);
301
302	while (nr_pages > 0) {
303		ret = find_get_pages_contig(inode->i_mapping, index,
304				     min_t(unsigned long,
305				     nr_pages, ARRAY_SIZE(pages)), pages);
306		if (ret == 0) {
307			nr_pages -= 1;
308			index += 1;
309			continue;
310		}
311		for (i = 0; i < ret; i++) {
312			if (cb->errors)
313				SetPageError(pages[i]);
314			end_page_writeback(pages[i]);
315			put_page(pages[i]);
316		}
317		nr_pages -= ret;
318		index += ret;
319	}
320	/* the inode may be gone now */
321}
322
323/*
324 * do the cleanup once all the compressed pages hit the disk.
325 * This will clear writeback on the file pages and free the compressed
326 * pages.
327 *
328 * This also calls the writeback end hooks for the file pages so that
329 * metadata and checksums can be updated in the file.
330 */
331static void end_compressed_bio_write(struct bio *bio)
332{
333	struct compressed_bio *cb = bio->bi_private;
334	struct inode *inode;
335	struct page *page;
336	unsigned int index;
337
338	if (bio->bi_status)
339		cb->errors = 1;
340
341	/* if there are more bios still pending for this compressed
342	 * extent, just exit
343	 */
344	if (!refcount_dec_and_test(&cb->pending_bios))
345		goto out;
346
347	/* ok, we're the last bio for this extent, step one is to
348	 * call back into the FS and do all the end_io operations
349	 */
350	inode = cb->inode;
351	btrfs_record_physical_zoned(inode, cb->start, bio);
352	btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
353			cb->start, cb->start + cb->len - 1,
354			!cb->errors);
355
356	end_compressed_writeback(inode, cb);
357	/* note, our inode could be gone now */
358
359	/*
360	 * release the compressed pages, these came from alloc_page and
361	 * are not attached to the inode at all
362	 */
363	index = 0;
364	for (index = 0; index < cb->nr_pages; index++) {
365		page = cb->compressed_pages[index];
366		page->mapping = NULL;
367		put_page(page);
368	}
369
370	/* finally free the cb struct */
371	kfree(cb->compressed_pages);
372	kfree(cb);
373out:
374	bio_put(bio);
375}
376
377/*
378 * worker function to build and submit bios for previously compressed pages.
379 * The corresponding pages in the inode should be marked for writeback
380 * and the compressed pages should have a reference on them for dropping
381 * when the IO is complete.
382 *
383 * This also checksums the file bytes and gets things ready for
384 * the end io hooks.
385 */
386blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
387				 unsigned int len, u64 disk_start,
388				 unsigned int compressed_len,
389				 struct page **compressed_pages,
390				 unsigned int nr_pages,
391				 unsigned int write_flags,
392				 struct cgroup_subsys_state *blkcg_css)
393{
394	struct btrfs_fs_info *fs_info = inode->root->fs_info;
395	struct bio *bio = NULL;
396	struct compressed_bio *cb;
397	unsigned long bytes_left;
398	int pg_index = 0;
399	struct page *page;
400	u64 first_byte = disk_start;
401	blk_status_t ret;
402	int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
403	const bool use_append = btrfs_use_zone_append(inode, disk_start);
404	const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
405
406	WARN_ON(!PAGE_ALIGNED(start));
407	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
408	if (!cb)
409		return BLK_STS_RESOURCE;
410	refcount_set(&cb->pending_bios, 0);
411	cb->errors = 0;
412	cb->inode = &inode->vfs_inode;
413	cb->start = start;
414	cb->len = len;
415	cb->mirror_num = 0;
416	cb->compressed_pages = compressed_pages;
417	cb->compressed_len = compressed_len;
418	cb->orig_bio = NULL;
419	cb->nr_pages = nr_pages;
420
421	bio = btrfs_bio_alloc(first_byte);
422	bio->bi_opf = bio_op | write_flags;
423	bio->bi_private = cb;
424	bio->bi_end_io = end_compressed_bio_write;
425
426	if (use_append) {
427		struct btrfs_device *device;
428
429		device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
430		if (IS_ERR(device)) {
431			kfree(cb);
432			bio_put(bio);
433			return BLK_STS_NOTSUPP;
434		}
435
436		bio_set_dev(bio, device->bdev);
437	}
438
439	if (blkcg_css) {
440		bio->bi_opf |= REQ_CGROUP_PUNT;
441		kthread_associate_blkcg(blkcg_css);
442	}
443	refcount_set(&cb->pending_bios, 1);
444
445	/* create and submit bios for the compressed pages */
446	bytes_left = compressed_len;
447	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
448		int submit = 0;
449		int len = 0;
450
451		page = compressed_pages[pg_index];
452		page->mapping = inode->vfs_inode.i_mapping;
453		if (bio->bi_iter.bi_size)
454			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
455							  0);
456
457		/*
458		 * Page can only be added to bio if the current bio fits in
459		 * stripe.
460		 */
461		if (!submit) {
462			if (pg_index == 0 && use_append)
463				len = bio_add_zone_append_page(bio, page,
464							       PAGE_SIZE, 0);
465			else
466				len = bio_add_page(bio, page, PAGE_SIZE, 0);
467		}
468
469		page->mapping = NULL;
470		if (submit || len < PAGE_SIZE) {
471			/*
472			 * inc the count before we submit the bio so
473			 * we know the end IO handler won't happen before
474			 * we inc the count.  Otherwise, the cb might get
475			 * freed before we're done setting it up
476			 */
477			refcount_inc(&cb->pending_bios);
478			ret = btrfs_bio_wq_end_io(fs_info, bio,
479						  BTRFS_WQ_ENDIO_DATA);
480			BUG_ON(ret); /* -ENOMEM */
481
482			if (!skip_sum) {
483				ret = btrfs_csum_one_bio(inode, bio, start, 1);
484				BUG_ON(ret); /* -ENOMEM */
485			}
486
487			ret = btrfs_map_bio(fs_info, bio, 0);
488			if (ret) {
489				bio->bi_status = ret;
490				bio_endio(bio);
491			}
492
493			bio = btrfs_bio_alloc(first_byte);
494			bio->bi_opf = bio_op | write_flags;
495			bio->bi_private = cb;
496			bio->bi_end_io = end_compressed_bio_write;
497			if (blkcg_css)
498				bio->bi_opf |= REQ_CGROUP_PUNT;
499			/*
500			 * Use bio_add_page() to ensure the bio has at least one
501			 * page.
502			 */
503			bio_add_page(bio, page, PAGE_SIZE, 0);
504		}
505		if (bytes_left < PAGE_SIZE) {
506			btrfs_info(fs_info,
507					"bytes left %lu compress len %u nr %u",
508			       bytes_left, cb->compressed_len, cb->nr_pages);
509		}
510		bytes_left -= PAGE_SIZE;
511		first_byte += PAGE_SIZE;
512		cond_resched();
513	}
514
515	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
516	BUG_ON(ret); /* -ENOMEM */
517
518	if (!skip_sum) {
519		ret = btrfs_csum_one_bio(inode, bio, start, 1);
520		BUG_ON(ret); /* -ENOMEM */
521	}
522
523	ret = btrfs_map_bio(fs_info, bio, 0);
524	if (ret) {
525		bio->bi_status = ret;
526		bio_endio(bio);
527	}
528
529	if (blkcg_css)
530		kthread_associate_blkcg(NULL);
531
532	return 0;
533}
534
535static u64 bio_end_offset(struct bio *bio)
536{
537	struct bio_vec *last = bio_last_bvec_all(bio);
538
539	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
540}
541
542static noinline int add_ra_bio_pages(struct inode *inode,
543				     u64 compressed_end,
544				     struct compressed_bio *cb)
545{
546	unsigned long end_index;
547	unsigned long pg_index;
548	u64 last_offset;
549	u64 isize = i_size_read(inode);
550	int ret;
551	struct page *page;
552	unsigned long nr_pages = 0;
553	struct extent_map *em;
554	struct address_space *mapping = inode->i_mapping;
555	struct extent_map_tree *em_tree;
556	struct extent_io_tree *tree;
557	u64 end;
558	int misses = 0;
559
560	last_offset = bio_end_offset(cb->orig_bio);
561	em_tree = &BTRFS_I(inode)->extent_tree;
562	tree = &BTRFS_I(inode)->io_tree;
563
564	if (isize == 0)
565		return 0;
566
567	/*
568	 * For current subpage support, we only support 64K page size,
569	 * which means maximum compressed extent size (128K) is just 2x page
570	 * size.
571	 * This makes readahead less effective, so here disable readahead for
572	 * subpage for now, until full compressed write is supported.
573	 */
574	if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
575		return 0;
576
577	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
578
579	while (last_offset < compressed_end) {
580		pg_index = last_offset >> PAGE_SHIFT;
581
582		if (pg_index > end_index)
583			break;
584
585		page = xa_load(&mapping->i_pages, pg_index);
586		if (page && !xa_is_value(page)) {
587			misses++;
588			if (misses > 4)
589				break;
590			goto next;
591		}
592
593		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
594								 ~__GFP_FS));
595		if (!page)
596			break;
597
598		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
599			put_page(page);
600			goto next;
601		}
602
603		/*
604		 * at this point, we have a locked page in the page cache
605		 * for these bytes in the file.  But, we have to make
606		 * sure they map to this compressed extent on disk.
607		 */
608		ret = set_page_extent_mapped(page);
609		if (ret < 0) {
610			unlock_page(page);
611			put_page(page);
612			break;
613		}
614
615		end = last_offset + PAGE_SIZE - 1;
616		lock_extent(tree, last_offset, end);
617		read_lock(&em_tree->lock);
618		em = lookup_extent_mapping(em_tree, last_offset,
619					   PAGE_SIZE);
620		read_unlock(&em_tree->lock);
621
622		if (!em || last_offset < em->start ||
623		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
624		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
625			free_extent_map(em);
626			unlock_extent(tree, last_offset, end);
627			unlock_page(page);
628			put_page(page);
629			break;
630		}
631		free_extent_map(em);
632
633		if (page->index == end_index) {
634			size_t zero_offset = offset_in_page(isize);
635
636			if (zero_offset) {
637				int zeros;
638				zeros = PAGE_SIZE - zero_offset;
639				memzero_page(page, zero_offset, zeros);
640				flush_dcache_page(page);
641			}
642		}
643
644		ret = bio_add_page(cb->orig_bio, page,
645				   PAGE_SIZE, 0);
646
647		if (ret == PAGE_SIZE) {
648			nr_pages++;
649			put_page(page);
650		} else {
651			unlock_extent(tree, last_offset, end);
652			unlock_page(page);
653			put_page(page);
654			break;
655		}
656next:
657		last_offset += PAGE_SIZE;
658	}
659	return 0;
660}
661
662/*
663 * for a compressed read, the bio we get passed has all the inode pages
664 * in it.  We don't actually do IO on those pages but allocate new ones
665 * to hold the compressed pages on disk.
666 *
667 * bio->bi_iter.bi_sector points to the compressed extent on disk
668 * bio->bi_io_vec points to all of the inode pages
669 *
670 * After the compressed pages are read, we copy the bytes into the
671 * bio we were passed and then call the bio end_io calls
672 */
673blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
674				 int mirror_num, unsigned long bio_flags)
675{
676	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
677	struct extent_map_tree *em_tree;
678	struct compressed_bio *cb;
679	unsigned int compressed_len;
680	unsigned int nr_pages;
681	unsigned int pg_index;
682	struct page *page;
683	struct bio *comp_bio;
684	u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
685	u64 file_offset;
686	u64 em_len;
687	u64 em_start;
688	struct extent_map *em;
689	blk_status_t ret = BLK_STS_RESOURCE;
690	int faili = 0;
691	u8 *sums;
692
693	em_tree = &BTRFS_I(inode)->extent_tree;
694
695	file_offset = bio_first_bvec_all(bio)->bv_offset +
696		      page_offset(bio_first_page_all(bio));
697
698	/* we need the actual starting offset of this extent in the file */
699	read_lock(&em_tree->lock);
700	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
701	read_unlock(&em_tree->lock);
702	if (!em)
703		return BLK_STS_IOERR;
704
705	ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
706	compressed_len = em->block_len;
707	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
708	if (!cb)
709		goto out;
710
711	refcount_set(&cb->pending_bios, 0);
712	cb->errors = 0;
713	cb->inode = inode;
714	cb->mirror_num = mirror_num;
715	sums = cb->sums;
716
717	cb->start = em->orig_start;
718	em_len = em->len;
719	em_start = em->start;
720
721	free_extent_map(em);
722	em = NULL;
723
724	cb->len = bio->bi_iter.bi_size;
725	cb->compressed_len = compressed_len;
726	cb->compress_type = extent_compress_type(bio_flags);
727	cb->orig_bio = bio;
728
729	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
730	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
731				       GFP_NOFS);
732	if (!cb->compressed_pages)
733		goto fail1;
734
735	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
736		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS);
737		if (!cb->compressed_pages[pg_index]) {
738			faili = pg_index - 1;
739			ret = BLK_STS_RESOURCE;
740			goto fail2;
741		}
742	}
743	faili = nr_pages - 1;
744	cb->nr_pages = nr_pages;
745
746	add_ra_bio_pages(inode, em_start + em_len, cb);
747
748	/* include any pages we added in add_ra-bio_pages */
749	cb->len = bio->bi_iter.bi_size;
750
751	comp_bio = btrfs_bio_alloc(cur_disk_byte);
752	comp_bio->bi_opf = REQ_OP_READ;
753	comp_bio->bi_private = cb;
754	comp_bio->bi_end_io = end_compressed_bio_read;
755	refcount_set(&cb->pending_bios, 1);
756
757	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
758		u32 pg_len = PAGE_SIZE;
759		int submit = 0;
760
761		/*
762		 * To handle subpage case, we need to make sure the bio only
763		 * covers the range we need.
764		 *
765		 * If we're at the last page, truncate the length to only cover
766		 * the remaining part.
767		 */
768		if (pg_index == nr_pages - 1)
769			pg_len = min_t(u32, PAGE_SIZE,
770					compressed_len - pg_index * PAGE_SIZE);
771
772		page = cb->compressed_pages[pg_index];
773		page->mapping = inode->i_mapping;
774		page->index = em_start >> PAGE_SHIFT;
775
776		if (comp_bio->bi_iter.bi_size)
777			submit = btrfs_bio_fits_in_stripe(page, pg_len,
778							  comp_bio, 0);
779
780		page->mapping = NULL;
781		if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
782			unsigned int nr_sectors;
783
784			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
785						  BTRFS_WQ_ENDIO_DATA);
786			BUG_ON(ret); /* -ENOMEM */
787
788			/*
789			 * inc the count before we submit the bio so
790			 * we know the end IO handler won't happen before
791			 * we inc the count.  Otherwise, the cb might get
792			 * freed before we're done setting it up
793			 */
794			refcount_inc(&cb->pending_bios);
795
796			ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
797			BUG_ON(ret); /* -ENOMEM */
798
799			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
800						  fs_info->sectorsize);
801			sums += fs_info->csum_size * nr_sectors;
802
803			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
804			if (ret) {
805				comp_bio->bi_status = ret;
806				bio_endio(comp_bio);
807			}
808
809			comp_bio = btrfs_bio_alloc(cur_disk_byte);
810			comp_bio->bi_opf = REQ_OP_READ;
811			comp_bio->bi_private = cb;
812			comp_bio->bi_end_io = end_compressed_bio_read;
813
814			bio_add_page(comp_bio, page, pg_len, 0);
815		}
816		cur_disk_byte += pg_len;
817	}
818
819	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
820	BUG_ON(ret); /* -ENOMEM */
821
822	ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
823	BUG_ON(ret); /* -ENOMEM */
824
825	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
826	if (ret) {
827		comp_bio->bi_status = ret;
828		bio_endio(comp_bio);
829	}
830
831	return 0;
832
833fail2:
834	while (faili >= 0) {
835		__free_page(cb->compressed_pages[faili]);
836		faili--;
837	}
838
839	kfree(cb->compressed_pages);
840fail1:
841	kfree(cb);
842out:
843	free_extent_map(em);
844	return ret;
845}
846
847/*
848 * Heuristic uses systematic sampling to collect data from the input data
849 * range, the logic can be tuned by the following constants:
850 *
851 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
852 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
853 */
854#define SAMPLING_READ_SIZE	(16)
855#define SAMPLING_INTERVAL	(256)
856
857/*
858 * For statistical analysis of the input data we consider bytes that form a
859 * Galois Field of 256 objects. Each object has an attribute count, ie. how
860 * many times the object appeared in the sample.
861 */
862#define BUCKET_SIZE		(256)
863
864/*
865 * The size of the sample is based on a statistical sampling rule of thumb.
866 * The common way is to perform sampling tests as long as the number of
867 * elements in each cell is at least 5.
868 *
869 * Instead of 5, we choose 32 to obtain more accurate results.
870 * If the data contain the maximum number of symbols, which is 256, we obtain a
871 * sample size bound by 8192.
872 *
873 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
874 * from up to 512 locations.
875 */
876#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
877				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
878
879struct bucket_item {
880	u32 count;
881};
882
883struct heuristic_ws {
884	/* Partial copy of input data */
885	u8 *sample;
886	u32 sample_size;
887	/* Buckets store counters for each byte value */
888	struct bucket_item *bucket;
889	/* Sorting buffer */
890	struct bucket_item *bucket_b;
891	struct list_head list;
892};
893
894static struct workspace_manager heuristic_wsm;
895
896static void free_heuristic_ws(struct list_head *ws)
897{
898	struct heuristic_ws *workspace;
899
900	workspace = list_entry(ws, struct heuristic_ws, list);
901
902	kvfree(workspace->sample);
903	kfree(workspace->bucket);
904	kfree(workspace->bucket_b);
905	kfree(workspace);
906}
907
908static struct list_head *alloc_heuristic_ws(unsigned int level)
909{
910	struct heuristic_ws *ws;
911
912	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
913	if (!ws)
914		return ERR_PTR(-ENOMEM);
915
916	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
917	if (!ws->sample)
918		goto fail;
919
920	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
921	if (!ws->bucket)
922		goto fail;
923
924	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
925	if (!ws->bucket_b)
926		goto fail;
927
928	INIT_LIST_HEAD(&ws->list);
929	return &ws->list;
930fail:
931	free_heuristic_ws(&ws->list);
932	return ERR_PTR(-ENOMEM);
933}
934
935const struct btrfs_compress_op btrfs_heuristic_compress = {
936	.workspace_manager = &heuristic_wsm,
937};
938
939static const struct btrfs_compress_op * const btrfs_compress_op[] = {
940	/* The heuristic is represented as compression type 0 */
941	&btrfs_heuristic_compress,
942	&btrfs_zlib_compress,
943	&btrfs_lzo_compress,
944	&btrfs_zstd_compress,
945};
946
947static struct list_head *alloc_workspace(int type, unsigned int level)
948{
949	switch (type) {
950	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
951	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
952	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
953	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
954	default:
955		/*
956		 * This can't happen, the type is validated several times
957		 * before we get here.
958		 */
959		BUG();
960	}
961}
962
963static void free_workspace(int type, struct list_head *ws)
964{
965	switch (type) {
966	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
967	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
968	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
969	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
970	default:
971		/*
972		 * This can't happen, the type is validated several times
973		 * before we get here.
974		 */
975		BUG();
976	}
977}
978
979static void btrfs_init_workspace_manager(int type)
980{
981	struct workspace_manager *wsm;
982	struct list_head *workspace;
983
984	wsm = btrfs_compress_op[type]->workspace_manager;
985	INIT_LIST_HEAD(&wsm->idle_ws);
986	spin_lock_init(&wsm->ws_lock);
987	atomic_set(&wsm->total_ws, 0);
988	init_waitqueue_head(&wsm->ws_wait);
989
990	/*
991	 * Preallocate one workspace for each compression type so we can
992	 * guarantee forward progress in the worst case
993	 */
994	workspace = alloc_workspace(type, 0);
995	if (IS_ERR(workspace)) {
996		pr_warn(
997	"BTRFS: cannot preallocate compression workspace, will try later\n");
998	} else {
999		atomic_set(&wsm->total_ws, 1);
1000		wsm->free_ws = 1;
1001		list_add(workspace, &wsm->idle_ws);
1002	}
1003}
1004
1005static void btrfs_cleanup_workspace_manager(int type)
1006{
1007	struct workspace_manager *wsman;
1008	struct list_head *ws;
1009
1010	wsman = btrfs_compress_op[type]->workspace_manager;
1011	while (!list_empty(&wsman->idle_ws)) {
1012		ws = wsman->idle_ws.next;
1013		list_del(ws);
1014		free_workspace(type, ws);
1015		atomic_dec(&wsman->total_ws);
1016	}
1017}
1018
1019/*
1020 * This finds an available workspace or allocates a new one.
1021 * If it's not possible to allocate a new one, waits until there's one.
1022 * Preallocation makes a forward progress guarantees and we do not return
1023 * errors.
1024 */
1025struct list_head *btrfs_get_workspace(int type, unsigned int level)
1026{
1027	struct workspace_manager *wsm;
1028	struct list_head *workspace;
1029	int cpus = num_online_cpus();
1030	unsigned nofs_flag;
1031	struct list_head *idle_ws;
1032	spinlock_t *ws_lock;
1033	atomic_t *total_ws;
1034	wait_queue_head_t *ws_wait;
1035	int *free_ws;
1036
1037	wsm = btrfs_compress_op[type]->workspace_manager;
1038	idle_ws	 = &wsm->idle_ws;
1039	ws_lock	 = &wsm->ws_lock;
1040	total_ws = &wsm->total_ws;
1041	ws_wait	 = &wsm->ws_wait;
1042	free_ws	 = &wsm->free_ws;
1043
1044again:
1045	spin_lock(ws_lock);
1046	if (!list_empty(idle_ws)) {
1047		workspace = idle_ws->next;
1048		list_del(workspace);
1049		(*free_ws)--;
1050		spin_unlock(ws_lock);
1051		return workspace;
1052
1053	}
1054	if (atomic_read(total_ws) > cpus) {
1055		DEFINE_WAIT(wait);
1056
1057		spin_unlock(ws_lock);
1058		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1059		if (atomic_read(total_ws) > cpus && !*free_ws)
1060			schedule();
1061		finish_wait(ws_wait, &wait);
1062		goto again;
1063	}
1064	atomic_inc(total_ws);
1065	spin_unlock(ws_lock);
1066
1067	/*
1068	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1069	 * to turn it off here because we might get called from the restricted
1070	 * context of btrfs_compress_bio/btrfs_compress_pages
1071	 */
1072	nofs_flag = memalloc_nofs_save();
1073	workspace = alloc_workspace(type, level);
1074	memalloc_nofs_restore(nofs_flag);
1075
1076	if (IS_ERR(workspace)) {
1077		atomic_dec(total_ws);
1078		wake_up(ws_wait);
1079
1080		/*
1081		 * Do not return the error but go back to waiting. There's a
1082		 * workspace preallocated for each type and the compression
1083		 * time is bounded so we get to a workspace eventually. This
1084		 * makes our caller's life easier.
1085		 *
1086		 * To prevent silent and low-probability deadlocks (when the
1087		 * initial preallocation fails), check if there are any
1088		 * workspaces at all.
1089		 */
1090		if (atomic_read(total_ws) == 0) {
1091			static DEFINE_RATELIMIT_STATE(_rs,
1092					/* once per minute */ 60 * HZ,
1093					/* no burst */ 1);
1094
1095			if (__ratelimit(&_rs)) {
1096				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1097			}
1098		}
1099		goto again;
1100	}
1101	return workspace;
1102}
1103
1104static struct list_head *get_workspace(int type, int level)
1105{
1106	switch (type) {
1107	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1108	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1109	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1110	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1111	default:
1112		/*
1113		 * This can't happen, the type is validated several times
1114		 * before we get here.
1115		 */
1116		BUG();
1117	}
1118}
1119
1120/*
1121 * put a workspace struct back on the list or free it if we have enough
1122 * idle ones sitting around
1123 */
1124void btrfs_put_workspace(int type, struct list_head *ws)
1125{
1126	struct workspace_manager *wsm;
1127	struct list_head *idle_ws;
1128	spinlock_t *ws_lock;
1129	atomic_t *total_ws;
1130	wait_queue_head_t *ws_wait;
1131	int *free_ws;
1132
1133	wsm = btrfs_compress_op[type]->workspace_manager;
1134	idle_ws	 = &wsm->idle_ws;
1135	ws_lock	 = &wsm->ws_lock;
1136	total_ws = &wsm->total_ws;
1137	ws_wait	 = &wsm->ws_wait;
1138	free_ws	 = &wsm->free_ws;
1139
1140	spin_lock(ws_lock);
1141	if (*free_ws <= num_online_cpus()) {
1142		list_add(ws, idle_ws);
1143		(*free_ws)++;
1144		spin_unlock(ws_lock);
1145		goto wake;
1146	}
1147	spin_unlock(ws_lock);
1148
1149	free_workspace(type, ws);
1150	atomic_dec(total_ws);
1151wake:
1152	cond_wake_up(ws_wait);
1153}
1154
1155static void put_workspace(int type, struct list_head *ws)
1156{
1157	switch (type) {
1158	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1159	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1160	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1161	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1162	default:
1163		/*
1164		 * This can't happen, the type is validated several times
1165		 * before we get here.
1166		 */
1167		BUG();
1168	}
1169}
1170
1171/*
1172 * Adjust @level according to the limits of the compression algorithm or
1173 * fallback to default
1174 */
1175static unsigned int btrfs_compress_set_level(int type, unsigned level)
1176{
1177	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1178
1179	if (level == 0)
1180		level = ops->default_level;
1181	else
1182		level = min(level, ops->max_level);
1183
1184	return level;
1185}
1186
1187/*
1188 * Given an address space and start and length, compress the bytes into @pages
1189 * that are allocated on demand.
1190 *
1191 * @type_level is encoded algorithm and level, where level 0 means whatever
1192 * default the algorithm chooses and is opaque here;
1193 * - compression algo are 0-3
1194 * - the level are bits 4-7
1195 *
1196 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1197 * and returns number of actually allocated pages
1198 *
1199 * @total_in is used to return the number of bytes actually read.  It
1200 * may be smaller than the input length if we had to exit early because we
1201 * ran out of room in the pages array or because we cross the
1202 * max_out threshold.
1203 *
1204 * @total_out is an in/out parameter, must be set to the input length and will
1205 * be also used to return the total number of compressed bytes
1206 */
1207int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1208			 u64 start, struct page **pages,
1209			 unsigned long *out_pages,
1210			 unsigned long *total_in,
1211			 unsigned long *total_out)
1212{
1213	int type = btrfs_compress_type(type_level);
1214	int level = btrfs_compress_level(type_level);
1215	struct list_head *workspace;
1216	int ret;
1217
1218	level = btrfs_compress_set_level(type, level);
1219	workspace = get_workspace(type, level);
1220	ret = compression_compress_pages(type, workspace, mapping, start, pages,
1221					 out_pages, total_in, total_out);
1222	put_workspace(type, workspace);
1223	return ret;
1224}
1225
1226static int btrfs_decompress_bio(struct compressed_bio *cb)
1227{
1228	struct list_head *workspace;
1229	int ret;
1230	int type = cb->compress_type;
1231
1232	workspace = get_workspace(type, 0);
1233	ret = compression_decompress_bio(type, workspace, cb);
1234	put_workspace(type, workspace);
1235
1236	return ret;
1237}
1238
1239/*
1240 * a less complex decompression routine.  Our compressed data fits in a
1241 * single page, and we want to read a single page out of it.
1242 * start_byte tells us the offset into the compressed data we're interested in
1243 */
1244int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1245		     unsigned long start_byte, size_t srclen, size_t destlen)
1246{
1247	struct list_head *workspace;
1248	int ret;
1249
1250	workspace = get_workspace(type, 0);
1251	ret = compression_decompress(type, workspace, data_in, dest_page,
1252				     start_byte, srclen, destlen);
1253	put_workspace(type, workspace);
1254
1255	return ret;
1256}
1257
1258void __init btrfs_init_compress(void)
1259{
1260	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1261	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1262	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1263	zstd_init_workspace_manager();
1264}
1265
1266void __cold btrfs_exit_compress(void)
1267{
1268	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1269	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1270	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1271	zstd_cleanup_workspace_manager();
1272}
1273
1274/*
1275 * Copy decompressed data from working buffer to pages.
1276 *
1277 * @buf:		The decompressed data buffer
1278 * @buf_len:		The decompressed data length
1279 * @decompressed:	Number of bytes that are already decompressed inside the
1280 * 			compressed extent
1281 * @cb:			The compressed extent descriptor
1282 * @orig_bio:		The original bio that the caller wants to read for
1283 *
1284 * An easier to understand graph is like below:
1285 *
1286 * 		|<- orig_bio ->|     |<- orig_bio->|
1287 * 	|<-------      full decompressed extent      ----->|
1288 * 	|<-----------    @cb range   ---->|
1289 * 	|			|<-- @buf_len -->|
1290 * 	|<--- @decompressed --->|
1291 *
1292 * Note that, @cb can be a subpage of the full decompressed extent, but
1293 * @cb->start always has the same as the orig_file_offset value of the full
1294 * decompressed extent.
1295 *
1296 * When reading compressed extent, we have to read the full compressed extent,
1297 * while @orig_bio may only want part of the range.
1298 * Thus this function will ensure only data covered by @orig_bio will be copied
1299 * to.
1300 *
1301 * Return 0 if we have copied all needed contents for @orig_bio.
1302 * Return >0 if we need continue decompress.
1303 */
1304int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1305			      struct compressed_bio *cb, u32 decompressed)
1306{
1307	struct bio *orig_bio = cb->orig_bio;
1308	/* Offset inside the full decompressed extent */
1309	u32 cur_offset;
1310
1311	cur_offset = decompressed;
1312	/* The main loop to do the copy */
1313	while (cur_offset < decompressed + buf_len) {
1314		struct bio_vec bvec;
1315		size_t copy_len;
1316		u32 copy_start;
1317		/* Offset inside the full decompressed extent */
1318		u32 bvec_offset;
1319
1320		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1321		/*
1322		 * cb->start may underflow, but subtracting that value can still
1323		 * give us correct offset inside the full decompressed extent.
1324		 */
1325		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1326
1327		/* Haven't reached the bvec range, exit */
1328		if (decompressed + buf_len <= bvec_offset)
1329			return 1;
1330
1331		copy_start = max(cur_offset, bvec_offset);
1332		copy_len = min(bvec_offset + bvec.bv_len,
1333			       decompressed + buf_len) - copy_start;
1334		ASSERT(copy_len);
1335
1336		/*
1337		 * Extra range check to ensure we didn't go beyond
1338		 * @buf + @buf_len.
1339		 */
1340		ASSERT(copy_start - decompressed < buf_len);
1341		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1342			       buf + copy_start - decompressed, copy_len);
1343		flush_dcache_page(bvec.bv_page);
1344		cur_offset += copy_len;
1345
1346		bio_advance(orig_bio, copy_len);
1347		/* Finished the bio */
1348		if (!orig_bio->bi_iter.bi_size)
1349			return 0;
1350	}
1351	return 1;
1352}
1353
1354/*
1355 * Shannon Entropy calculation
1356 *
1357 * Pure byte distribution analysis fails to determine compressibility of data.
1358 * Try calculating entropy to estimate the average minimum number of bits
1359 * needed to encode the sampled data.
1360 *
1361 * For convenience, return the percentage of needed bits, instead of amount of
1362 * bits directly.
1363 *
1364 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1365 *			    and can be compressible with high probability
1366 *
1367 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1368 *
1369 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1370 */
1371#define ENTROPY_LVL_ACEPTABLE		(65)
1372#define ENTROPY_LVL_HIGH		(80)
1373
1374/*
1375 * For increasead precision in shannon_entropy calculation,
1376 * let's do pow(n, M) to save more digits after comma:
1377 *
1378 * - maximum int bit length is 64
1379 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1380 * - 13 * 4 = 52 < 64		-> M = 4
1381 *
1382 * So use pow(n, 4).
1383 */
1384static inline u32 ilog2_w(u64 n)
1385{
1386	return ilog2(n * n * n * n);
1387}
1388
1389static u32 shannon_entropy(struct heuristic_ws *ws)
1390{
1391	const u32 entropy_max = 8 * ilog2_w(2);
1392	u32 entropy_sum = 0;
1393	u32 p, p_base, sz_base;
1394	u32 i;
1395
1396	sz_base = ilog2_w(ws->sample_size);
1397	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1398		p = ws->bucket[i].count;
1399		p_base = ilog2_w(p);
1400		entropy_sum += p * (sz_base - p_base);
1401	}
1402
1403	entropy_sum /= ws->sample_size;
1404	return entropy_sum * 100 / entropy_max;
1405}
1406
1407#define RADIX_BASE		4U
1408#define COUNTERS_SIZE		(1U << RADIX_BASE)
1409
1410static u8 get4bits(u64 num, int shift) {
1411	u8 low4bits;
1412
1413	num >>= shift;
1414	/* Reverse order */
1415	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1416	return low4bits;
1417}
1418
1419/*
1420 * Use 4 bits as radix base
1421 * Use 16 u32 counters for calculating new position in buf array
1422 *
1423 * @array     - array that will be sorted
1424 * @array_buf - buffer array to store sorting results
1425 *              must be equal in size to @array
1426 * @num       - array size
1427 */
1428static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1429		       int num)
1430{
1431	u64 max_num;
1432	u64 buf_num;
1433	u32 counters[COUNTERS_SIZE];
1434	u32 new_addr;
1435	u32 addr;
1436	int bitlen;
1437	int shift;
1438	int i;
1439
1440	/*
1441	 * Try avoid useless loop iterations for small numbers stored in big
1442	 * counters.  Example: 48 33 4 ... in 64bit array
1443	 */
1444	max_num = array[0].count;
1445	for (i = 1; i < num; i++) {
1446		buf_num = array[i].count;
1447		if (buf_num > max_num)
1448			max_num = buf_num;
1449	}
1450
1451	buf_num = ilog2(max_num);
1452	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1453
1454	shift = 0;
1455	while (shift < bitlen) {
1456		memset(counters, 0, sizeof(counters));
1457
1458		for (i = 0; i < num; i++) {
1459			buf_num = array[i].count;
1460			addr = get4bits(buf_num, shift);
1461			counters[addr]++;
1462		}
1463
1464		for (i = 1; i < COUNTERS_SIZE; i++)
1465			counters[i] += counters[i - 1];
1466
1467		for (i = num - 1; i >= 0; i--) {
1468			buf_num = array[i].count;
1469			addr = get4bits(buf_num, shift);
1470			counters[addr]--;
1471			new_addr = counters[addr];
1472			array_buf[new_addr] = array[i];
1473		}
1474
1475		shift += RADIX_BASE;
1476
1477		/*
1478		 * Normal radix expects to move data from a temporary array, to
1479		 * the main one.  But that requires some CPU time. Avoid that
1480		 * by doing another sort iteration to original array instead of
1481		 * memcpy()
1482		 */
1483		memset(counters, 0, sizeof(counters));
1484
1485		for (i = 0; i < num; i ++) {
1486			buf_num = array_buf[i].count;
1487			addr = get4bits(buf_num, shift);
1488			counters[addr]++;
1489		}
1490
1491		for (i = 1; i < COUNTERS_SIZE; i++)
1492			counters[i] += counters[i - 1];
1493
1494		for (i = num - 1; i >= 0; i--) {
1495			buf_num = array_buf[i].count;
1496			addr = get4bits(buf_num, shift);
1497			counters[addr]--;
1498			new_addr = counters[addr];
1499			array[new_addr] = array_buf[i];
1500		}
1501
1502		shift += RADIX_BASE;
1503	}
1504}
1505
1506/*
1507 * Size of the core byte set - how many bytes cover 90% of the sample
1508 *
1509 * There are several types of structured binary data that use nearly all byte
1510 * values. The distribution can be uniform and counts in all buckets will be
1511 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1512 *
1513 * Other possibility is normal (Gaussian) distribution, where the data could
1514 * be potentially compressible, but we have to take a few more steps to decide
1515 * how much.
1516 *
1517 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1518 *                       compression algo can easy fix that
1519 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1520 *                       probability is not compressible
1521 */
1522#define BYTE_CORE_SET_LOW		(64)
1523#define BYTE_CORE_SET_HIGH		(200)
1524
1525static int byte_core_set_size(struct heuristic_ws *ws)
1526{
1527	u32 i;
1528	u32 coreset_sum = 0;
1529	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1530	struct bucket_item *bucket = ws->bucket;
1531
1532	/* Sort in reverse order */
1533	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1534
1535	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1536		coreset_sum += bucket[i].count;
1537
1538	if (coreset_sum > core_set_threshold)
1539		return i;
1540
1541	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1542		coreset_sum += bucket[i].count;
1543		if (coreset_sum > core_set_threshold)
1544			break;
1545	}
1546
1547	return i;
1548}
1549
1550/*
1551 * Count byte values in buckets.
1552 * This heuristic can detect textual data (configs, xml, json, html, etc).
1553 * Because in most text-like data byte set is restricted to limited number of
1554 * possible characters, and that restriction in most cases makes data easy to
1555 * compress.
1556 *
1557 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1558 *	less - compressible
1559 *	more - need additional analysis
1560 */
1561#define BYTE_SET_THRESHOLD		(64)
1562
1563static u32 byte_set_size(const struct heuristic_ws *ws)
1564{
1565	u32 i;
1566	u32 byte_set_size = 0;
1567
1568	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1569		if (ws->bucket[i].count > 0)
1570			byte_set_size++;
1571	}
1572
1573	/*
1574	 * Continue collecting count of byte values in buckets.  If the byte
1575	 * set size is bigger then the threshold, it's pointless to continue,
1576	 * the detection technique would fail for this type of data.
1577	 */
1578	for (; i < BUCKET_SIZE; i++) {
1579		if (ws->bucket[i].count > 0) {
1580			byte_set_size++;
1581			if (byte_set_size > BYTE_SET_THRESHOLD)
1582				return byte_set_size;
1583		}
1584	}
1585
1586	return byte_set_size;
1587}
1588
1589static bool sample_repeated_patterns(struct heuristic_ws *ws)
1590{
1591	const u32 half_of_sample = ws->sample_size / 2;
1592	const u8 *data = ws->sample;
1593
1594	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1595}
1596
1597static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1598				     struct heuristic_ws *ws)
1599{
1600	struct page *page;
1601	u64 index, index_end;
1602	u32 i, curr_sample_pos;
1603	u8 *in_data;
1604
1605	/*
1606	 * Compression handles the input data by chunks of 128KiB
1607	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1608	 *
1609	 * We do the same for the heuristic and loop over the whole range.
1610	 *
1611	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1612	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1613	 */
1614	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1615		end = start + BTRFS_MAX_UNCOMPRESSED;
1616
1617	index = start >> PAGE_SHIFT;
1618	index_end = end >> PAGE_SHIFT;
1619
1620	/* Don't miss unaligned end */
1621	if (!IS_ALIGNED(end, PAGE_SIZE))
1622		index_end++;
1623
1624	curr_sample_pos = 0;
1625	while (index < index_end) {
1626		page = find_get_page(inode->i_mapping, index);
1627		in_data = kmap_local_page(page);
1628		/* Handle case where the start is not aligned to PAGE_SIZE */
1629		i = start % PAGE_SIZE;
1630		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1631			/* Don't sample any garbage from the last page */
1632			if (start > end - SAMPLING_READ_SIZE)
1633				break;
1634			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1635					SAMPLING_READ_SIZE);
1636			i += SAMPLING_INTERVAL;
1637			start += SAMPLING_INTERVAL;
1638			curr_sample_pos += SAMPLING_READ_SIZE;
1639		}
1640		kunmap_local(in_data);
1641		put_page(page);
1642
1643		index++;
1644	}
1645
1646	ws->sample_size = curr_sample_pos;
1647}
1648
1649/*
1650 * Compression heuristic.
1651 *
1652 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1653 * quickly (compared to direct compression) detect data characteristics
1654 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1655 * data.
1656 *
1657 * The following types of analysis can be performed:
1658 * - detect mostly zero data
1659 * - detect data with low "byte set" size (text, etc)
1660 * - detect data with low/high "core byte" set
1661 *
1662 * Return non-zero if the compression should be done, 0 otherwise.
1663 */
1664int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1665{
1666	struct list_head *ws_list = get_workspace(0, 0);
1667	struct heuristic_ws *ws;
1668	u32 i;
1669	u8 byte;
1670	int ret = 0;
1671
1672	ws = list_entry(ws_list, struct heuristic_ws, list);
1673
1674	heuristic_collect_sample(inode, start, end, ws);
1675
1676	if (sample_repeated_patterns(ws)) {
1677		ret = 1;
1678		goto out;
1679	}
1680
1681	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1682
1683	for (i = 0; i < ws->sample_size; i++) {
1684		byte = ws->sample[i];
1685		ws->bucket[byte].count++;
1686	}
1687
1688	i = byte_set_size(ws);
1689	if (i < BYTE_SET_THRESHOLD) {
1690		ret = 2;
1691		goto out;
1692	}
1693
1694	i = byte_core_set_size(ws);
1695	if (i <= BYTE_CORE_SET_LOW) {
1696		ret = 3;
1697		goto out;
1698	}
1699
1700	if (i >= BYTE_CORE_SET_HIGH) {
1701		ret = 0;
1702		goto out;
1703	}
1704
1705	i = shannon_entropy(ws);
1706	if (i <= ENTROPY_LVL_ACEPTABLE) {
1707		ret = 4;
1708		goto out;
1709	}
1710
1711	/*
1712	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1713	 * needed to give green light to compression.
1714	 *
1715	 * For now just assume that compression at that level is not worth the
1716	 * resources because:
1717	 *
1718	 * 1. it is possible to defrag the data later
1719	 *
1720	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1721	 * values, every bucket has counter at level ~54. The heuristic would
1722	 * be confused. This can happen when data have some internal repeated
1723	 * patterns like "abbacbbc...". This can be detected by analyzing
1724	 * pairs of bytes, which is too costly.
1725	 */
1726	if (i < ENTROPY_LVL_HIGH) {
1727		ret = 5;
1728		goto out;
1729	} else {
1730		ret = 0;
1731		goto out;
1732	}
1733
1734out:
1735	put_workspace(0, ws_list);
1736	return ret;
1737}
1738
1739/*
1740 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1741 * level, unrecognized string will set the default level
1742 */
1743unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1744{
1745	unsigned int level = 0;
1746	int ret;
1747
1748	if (!type)
1749		return 0;
1750
1751	if (str[0] == ':') {
1752		ret = kstrtouint(str + 1, 10, &level);
1753		if (ret)
1754			level = 0;
1755	}
1756
1757	level = btrfs_compress_set_level(type, level);
1758
1759	return level;
1760}
1761