1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
3#include "checksum.h"
4#include "compress.h"
5#include "extents.h"
6#include "super-io.h"
7
8#include <linux/lz4.h>
9#include <linux/zlib.h>
10#include <linux/zstd.h>
11
12/* Bounce buffer: */
13struct bbuf {
14	void		*b;
15	enum {
16		BB_NONE,
17		BB_VMAP,
18		BB_KMALLOC,
19		BB_MEMPOOL,
20	}		type;
21	int		rw;
22};
23
24static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
25{
26	void *b;
27
28	BUG_ON(size > c->opts.encoded_extent_max);
29
30	b = kmalloc(size, GFP_NOFS|__GFP_NOWARN);
31	if (b)
32		return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
33
34	b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS);
35	if (b)
36		return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
37
38	BUG();
39}
40
41static bool bio_phys_contig(struct bio *bio, struct bvec_iter start)
42{
43	struct bio_vec bv;
44	struct bvec_iter iter;
45	void *expected_start = NULL;
46
47	__bio_for_each_bvec(bv, bio, iter, start) {
48		if (expected_start &&
49		    expected_start != page_address(bv.bv_page) + bv.bv_offset)
50			return false;
51
52		expected_start = page_address(bv.bv_page) +
53			bv.bv_offset + bv.bv_len;
54	}
55
56	return true;
57}
58
59static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
60				       struct bvec_iter start, int rw)
61{
62	struct bbuf ret;
63	struct bio_vec bv;
64	struct bvec_iter iter;
65	unsigned nr_pages = 0;
66	struct page *stack_pages[16];
67	struct page **pages = NULL;
68	void *data;
69
70	BUG_ON(start.bi_size > c->opts.encoded_extent_max);
71
72	if (!PageHighMem(bio_iter_page(bio, start)) &&
73	    bio_phys_contig(bio, start))
74		return (struct bbuf) {
75			.b = page_address(bio_iter_page(bio, start)) +
76				bio_iter_offset(bio, start),
77			.type = BB_NONE, .rw = rw
78		};
79
80	/* check if we can map the pages contiguously: */
81	__bio_for_each_segment(bv, bio, iter, start) {
82		if (iter.bi_size != start.bi_size &&
83		    bv.bv_offset)
84			goto bounce;
85
86		if (bv.bv_len < iter.bi_size &&
87		    bv.bv_offset + bv.bv_len < PAGE_SIZE)
88			goto bounce;
89
90		nr_pages++;
91	}
92
93	BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages);
94
95	pages = nr_pages > ARRAY_SIZE(stack_pages)
96		? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS)
97		: stack_pages;
98	if (!pages)
99		goto bounce;
100
101	nr_pages = 0;
102	__bio_for_each_segment(bv, bio, iter, start)
103		pages[nr_pages++] = bv.bv_page;
104
105	data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
106	if (pages != stack_pages)
107		kfree(pages);
108
109	if (data)
110		return (struct bbuf) {
111			.b = data + bio_iter_offset(bio, start),
112			.type = BB_VMAP, .rw = rw
113		};
114bounce:
115	ret = __bounce_alloc(c, start.bi_size, rw);
116
117	if (rw == READ)
118		memcpy_from_bio(ret.b, bio, start);
119
120	return ret;
121}
122
123static struct bbuf bio_map_or_bounce(struct bch_fs *c, struct bio *bio, int rw)
124{
125	return __bio_map_or_bounce(c, bio, bio->bi_iter, rw);
126}
127
128static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
129{
130	switch (buf.type) {
131	case BB_NONE:
132		break;
133	case BB_VMAP:
134		vunmap((void *) ((unsigned long) buf.b & PAGE_MASK));
135		break;
136	case BB_KMALLOC:
137		kfree(buf.b);
138		break;
139	case BB_MEMPOOL:
140		mempool_free(buf.b, &c->compression_bounce[buf.rw]);
141		break;
142	}
143}
144
145static inline void zlib_set_workspace(z_stream *strm, void *workspace)
146{
147#ifdef __KERNEL__
148	strm->workspace = workspace;
149#endif
150}
151
152static int __bio_uncompress(struct bch_fs *c, struct bio *src,
153			    void *dst_data, struct bch_extent_crc_unpacked crc)
154{
155	struct bbuf src_data = { NULL };
156	size_t src_len = src->bi_iter.bi_size;
157	size_t dst_len = crc.uncompressed_size << 9;
158	void *workspace;
159	int ret;
160
161	src_data = bio_map_or_bounce(c, src, READ);
162
163	switch (crc.compression_type) {
164	case BCH_COMPRESSION_TYPE_lz4_old:
165	case BCH_COMPRESSION_TYPE_lz4:
166		ret = LZ4_decompress_safe_partial(src_data.b, dst_data,
167						  src_len, dst_len, dst_len);
168		if (ret != dst_len)
169			goto err;
170		break;
171	case BCH_COMPRESSION_TYPE_gzip: {
172		z_stream strm = {
173			.next_in	= src_data.b,
174			.avail_in	= src_len,
175			.next_out	= dst_data,
176			.avail_out	= dst_len,
177		};
178
179		workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
180
181		zlib_set_workspace(&strm, workspace);
182		zlib_inflateInit2(&strm, -MAX_WBITS);
183		ret = zlib_inflate(&strm, Z_FINISH);
184
185		mempool_free(workspace, &c->decompress_workspace);
186
187		if (ret != Z_STREAM_END)
188			goto err;
189		break;
190	}
191	case BCH_COMPRESSION_TYPE_zstd: {
192		ZSTD_DCtx *ctx;
193		size_t real_src_len = le32_to_cpup(src_data.b);
194
195		if (real_src_len > src_len - 4)
196			goto err;
197
198		workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
199		ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
200
201		ret = zstd_decompress_dctx(ctx,
202				dst_data,	dst_len,
203				src_data.b + 4, real_src_len);
204
205		mempool_free(workspace, &c->decompress_workspace);
206
207		if (ret != dst_len)
208			goto err;
209		break;
210	}
211	default:
212		BUG();
213	}
214	ret = 0;
215out:
216	bio_unmap_or_unbounce(c, src_data);
217	return ret;
218err:
219	ret = -EIO;
220	goto out;
221}
222
223int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
224				struct bch_extent_crc_unpacked *crc)
225{
226	struct bbuf data = { NULL };
227	size_t dst_len = crc->uncompressed_size << 9;
228
229	/* bio must own its pages: */
230	BUG_ON(!bio->bi_vcnt);
231	BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs);
232
233	if (crc->uncompressed_size << 9	> c->opts.encoded_extent_max ||
234	    crc->compressed_size << 9	> c->opts.encoded_extent_max) {
235		bch_err(c, "error rewriting existing data: extent too big");
236		return -EIO;
237	}
238
239	data = __bounce_alloc(c, dst_len, WRITE);
240
241	if (__bio_uncompress(c, bio, data.b, *crc)) {
242		if (!c->opts.no_data_io)
243			bch_err(c, "error rewriting existing data: decompression error");
244		bio_unmap_or_unbounce(c, data);
245		return -EIO;
246	}
247
248	/*
249	 * XXX: don't have a good way to assert that the bio was allocated with
250	 * enough space, we depend on bch2_move_extent doing the right thing
251	 */
252	bio->bi_iter.bi_size = crc->live_size << 9;
253
254	memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));
255
256	crc->csum_type		= 0;
257	crc->compression_type	= 0;
258	crc->compressed_size	= crc->live_size;
259	crc->uncompressed_size	= crc->live_size;
260	crc->offset		= 0;
261	crc->csum		= (struct bch_csum) { 0, 0 };
262
263	bio_unmap_or_unbounce(c, data);
264	return 0;
265}
266
267int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
268		       struct bio *dst, struct bvec_iter dst_iter,
269		       struct bch_extent_crc_unpacked crc)
270{
271	struct bbuf dst_data = { NULL };
272	size_t dst_len = crc.uncompressed_size << 9;
273	int ret;
274
275	if (crc.uncompressed_size << 9	> c->opts.encoded_extent_max ||
276	    crc.compressed_size << 9	> c->opts.encoded_extent_max)
277		return -EIO;
278
279	dst_data = dst_len == dst_iter.bi_size
280		? __bio_map_or_bounce(c, dst, dst_iter, WRITE)
281		: __bounce_alloc(c, dst_len, WRITE);
282
283	ret = __bio_uncompress(c, src, dst_data.b, crc);
284	if (ret)
285		goto err;
286
287	if (dst_data.type != BB_NONE &&
288	    dst_data.type != BB_VMAP)
289		memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9));
290err:
291	bio_unmap_or_unbounce(c, dst_data);
292	return ret;
293}
294
295static int attempt_compress(struct bch_fs *c,
296			    void *workspace,
297			    void *dst, size_t dst_len,
298			    void *src, size_t src_len,
299			    struct bch_compression_opt compression)
300{
301	enum bch_compression_type compression_type =
302		__bch2_compression_opt_to_type[compression.type];
303
304	switch (compression_type) {
305	case BCH_COMPRESSION_TYPE_lz4:
306		if (compression.level < LZ4HC_MIN_CLEVEL) {
307			int len = src_len;
308			int ret = LZ4_compress_destSize(
309					src,		dst,
310					&len,		dst_len,
311					workspace);
312			if (len < src_len)
313				return -len;
314
315			return ret;
316		} else {
317			int ret = LZ4_compress_HC(
318					src,		dst,
319					src_len,	dst_len,
320					compression.level,
321					workspace);
322
323			return ret ?: -1;
324		}
325	case BCH_COMPRESSION_TYPE_gzip: {
326		z_stream strm = {
327			.next_in	= src,
328			.avail_in	= src_len,
329			.next_out	= dst,
330			.avail_out	= dst_len,
331		};
332
333		zlib_set_workspace(&strm, workspace);
334		zlib_deflateInit2(&strm,
335				  compression.level
336				  ? clamp_t(unsigned, compression.level,
337					    Z_BEST_SPEED, Z_BEST_COMPRESSION)
338				  : Z_DEFAULT_COMPRESSION,
339				  Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
340				  Z_DEFAULT_STRATEGY);
341
342		if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END)
343			return 0;
344
345		if (zlib_deflateEnd(&strm) != Z_OK)
346			return 0;
347
348		return strm.total_out;
349	}
350	case BCH_COMPRESSION_TYPE_zstd: {
351		/*
352		 * rescale:
353		 * zstd max compression level is 22, our max level is 15
354		 */
355		unsigned level = min((compression.level * 3) / 2, zstd_max_clevel());
356		ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max);
357		ZSTD_CCtx *ctx = zstd_init_cctx(workspace, c->zstd_workspace_size);
358
359		/*
360		 * ZSTD requires that when we decompress we pass in the exact
361		 * compressed size - rounding it up to the nearest sector
362		 * doesn't work, so we use the first 4 bytes of the buffer for
363		 * that.
364		 *
365		 * Additionally, the ZSTD code seems to have a bug where it will
366		 * write just past the end of the buffer - so subtract a fudge
367		 * factor (7 bytes) from the dst buffer size to account for
368		 * that.
369		 */
370		size_t len = zstd_compress_cctx(ctx,
371				dst + 4,	dst_len - 4 - 7,
372				src,		src_len,
373				&params);
374		if (zstd_is_error(len))
375			return 0;
376
377		*((__le32 *) dst) = cpu_to_le32(len);
378		return len + 4;
379	}
380	default:
381		BUG();
382	}
383}
384
385static unsigned __bio_compress(struct bch_fs *c,
386			       struct bio *dst, size_t *dst_len,
387			       struct bio *src, size_t *src_len,
388			       struct bch_compression_opt compression)
389{
390	struct bbuf src_data = { NULL }, dst_data = { NULL };
391	void *workspace;
392	enum bch_compression_type compression_type =
393		__bch2_compression_opt_to_type[compression.type];
394	unsigned pad;
395	int ret = 0;
396
397	BUG_ON(compression_type >= BCH_COMPRESSION_TYPE_NR);
398	BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type]));
399
400	/* If it's only one block, don't bother trying to compress: */
401	if (src->bi_iter.bi_size <= c->opts.block_size)
402		return BCH_COMPRESSION_TYPE_incompressible;
403
404	dst_data = bio_map_or_bounce(c, dst, WRITE);
405	src_data = bio_map_or_bounce(c, src, READ);
406
407	workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS);
408
409	*src_len = src->bi_iter.bi_size;
410	*dst_len = dst->bi_iter.bi_size;
411
412	/*
413	 * XXX: this algorithm sucks when the compression code doesn't tell us
414	 * how much would fit, like LZ4 does:
415	 */
416	while (1) {
417		if (*src_len <= block_bytes(c)) {
418			ret = -1;
419			break;
420		}
421
422		ret = attempt_compress(c, workspace,
423				       dst_data.b,	*dst_len,
424				       src_data.b,	*src_len,
425				       compression);
426		if (ret > 0) {
427			*dst_len = ret;
428			ret = 0;
429			break;
430		}
431
432		/* Didn't fit: should we retry with a smaller amount?  */
433		if (*src_len <= *dst_len) {
434			ret = -1;
435			break;
436		}
437
438		/*
439		 * If ret is negative, it's a hint as to how much data would fit
440		 */
441		BUG_ON(-ret >= *src_len);
442
443		if (ret < 0)
444			*src_len = -ret;
445		else
446			*src_len -= (*src_len - *dst_len) / 2;
447		*src_len = round_down(*src_len, block_bytes(c));
448	}
449
450	mempool_free(workspace, &c->compress_workspace[compression_type]);
451
452	if (ret)
453		goto err;
454
455	/* Didn't get smaller: */
456	if (round_up(*dst_len, block_bytes(c)) >= *src_len)
457		goto err;
458
459	pad = round_up(*dst_len, block_bytes(c)) - *dst_len;
460
461	memset(dst_data.b + *dst_len, 0, pad);
462	*dst_len += pad;
463
464	if (dst_data.type != BB_NONE &&
465	    dst_data.type != BB_VMAP)
466		memcpy_to_bio(dst, dst->bi_iter, dst_data.b);
467
468	BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size);
469	BUG_ON(!*src_len || *src_len > src->bi_iter.bi_size);
470	BUG_ON(*dst_len & (block_bytes(c) - 1));
471	BUG_ON(*src_len & (block_bytes(c) - 1));
472	ret = compression_type;
473out:
474	bio_unmap_or_unbounce(c, src_data);
475	bio_unmap_or_unbounce(c, dst_data);
476	return ret;
477err:
478	ret = BCH_COMPRESSION_TYPE_incompressible;
479	goto out;
480}
481
482unsigned bch2_bio_compress(struct bch_fs *c,
483			   struct bio *dst, size_t *dst_len,
484			   struct bio *src, size_t *src_len,
485			   unsigned compression_opt)
486{
487	unsigned orig_dst = dst->bi_iter.bi_size;
488	unsigned orig_src = src->bi_iter.bi_size;
489	unsigned compression_type;
490
491	/* Don't consume more than BCH_ENCODED_EXTENT_MAX from @src: */
492	src->bi_iter.bi_size = min_t(unsigned, src->bi_iter.bi_size,
493				     c->opts.encoded_extent_max);
494	/* Don't generate a bigger output than input: */
495	dst->bi_iter.bi_size = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
496
497	compression_type =
498		__bio_compress(c, dst, dst_len, src, src_len,
499			       bch2_compression_decode(compression_opt));
500
501	dst->bi_iter.bi_size = orig_dst;
502	src->bi_iter.bi_size = orig_src;
503	return compression_type;
504}
505
506static int __bch2_fs_compress_init(struct bch_fs *, u64);
507
508#define BCH_FEATURE_none	0
509
510static const unsigned bch2_compression_opt_to_feature[] = {
511#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_FEATURE_##t,
512	BCH_COMPRESSION_OPTS()
513#undef x
514};
515
516#undef BCH_FEATURE_none
517
518static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f)
519{
520	int ret = 0;
521
522	if ((c->sb.features & f) == f)
523		return 0;
524
525	mutex_lock(&c->sb_lock);
526
527	if ((c->sb.features & f) == f) {
528		mutex_unlock(&c->sb_lock);
529		return 0;
530	}
531
532	ret = __bch2_fs_compress_init(c, c->sb.features|f);
533	if (ret) {
534		mutex_unlock(&c->sb_lock);
535		return ret;
536	}
537
538	c->disk_sb.sb->features[0] |= cpu_to_le64(f);
539	bch2_write_super(c);
540	mutex_unlock(&c->sb_lock);
541
542	return 0;
543}
544
545int bch2_check_set_has_compressed_data(struct bch_fs *c,
546				       unsigned compression_opt)
547{
548	unsigned compression_type = bch2_compression_decode(compression_opt).type;
549
550	BUG_ON(compression_type >= ARRAY_SIZE(bch2_compression_opt_to_feature));
551
552	return compression_type
553		? __bch2_check_set_has_compressed_data(c,
554				1ULL << bch2_compression_opt_to_feature[compression_type])
555		: 0;
556}
557
558void bch2_fs_compress_exit(struct bch_fs *c)
559{
560	unsigned i;
561
562	mempool_exit(&c->decompress_workspace);
563	for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++)
564		mempool_exit(&c->compress_workspace[i]);
565	mempool_exit(&c->compression_bounce[WRITE]);
566	mempool_exit(&c->compression_bounce[READ]);
567}
568
569static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
570{
571	size_t decompress_workspace_size = 0;
572	ZSTD_parameters params = zstd_get_params(zstd_max_clevel(),
573						 c->opts.encoded_extent_max);
574
575	c->zstd_workspace_size = zstd_cctx_workspace_bound(&params.cParams);
576
577	struct {
578		unsigned			feature;
579		enum bch_compression_type	type;
580		size_t				compress_workspace;
581		size_t				decompress_workspace;
582	} compression_types[] = {
583		{ BCH_FEATURE_lz4, BCH_COMPRESSION_TYPE_lz4,
584			max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS),
585			0 },
586		{ BCH_FEATURE_gzip, BCH_COMPRESSION_TYPE_gzip,
587			zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL),
588			zlib_inflate_workspacesize(), },
589		{ BCH_FEATURE_zstd, BCH_COMPRESSION_TYPE_zstd,
590			c->zstd_workspace_size,
591			zstd_dctx_workspace_bound() },
592	}, *i;
593	bool have_compressed = false;
594
595	for (i = compression_types;
596	     i < compression_types + ARRAY_SIZE(compression_types);
597	     i++)
598		have_compressed |= (features & (1 << i->feature)) != 0;
599
600	if (!have_compressed)
601		return 0;
602
603	if (!mempool_initialized(&c->compression_bounce[READ]) &&
604	    mempool_init_kvmalloc_pool(&c->compression_bounce[READ],
605				       1, c->opts.encoded_extent_max))
606		return -BCH_ERR_ENOMEM_compression_bounce_read_init;
607
608	if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
609	    mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE],
610				       1, c->opts.encoded_extent_max))
611		return -BCH_ERR_ENOMEM_compression_bounce_write_init;
612
613	for (i = compression_types;
614	     i < compression_types + ARRAY_SIZE(compression_types);
615	     i++) {
616		decompress_workspace_size =
617			max(decompress_workspace_size, i->decompress_workspace);
618
619		if (!(features & (1 << i->feature)))
620			continue;
621
622		if (mempool_initialized(&c->compress_workspace[i->type]))
623			continue;
624
625		if (mempool_init_kvmalloc_pool(
626				&c->compress_workspace[i->type],
627				1, i->compress_workspace))
628			return -BCH_ERR_ENOMEM_compression_workspace_init;
629	}
630
631	if (!mempool_initialized(&c->decompress_workspace) &&
632	    mempool_init_kvmalloc_pool(&c->decompress_workspace,
633				       1, decompress_workspace_size))
634		return -BCH_ERR_ENOMEM_decompression_workspace_init;
635
636	return 0;
637}
638
639static u64 compression_opt_to_feature(unsigned v)
640{
641	unsigned type = bch2_compression_decode(v).type;
642
643	return BIT_ULL(bch2_compression_opt_to_feature[type]);
644}
645
646int bch2_fs_compress_init(struct bch_fs *c)
647{
648	u64 f = c->sb.features;
649
650	f |= compression_opt_to_feature(c->opts.compression);
651	f |= compression_opt_to_feature(c->opts.background_compression);
652
653	return __bch2_fs_compress_init(c, f);
654}
655
656int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res,
657			       struct printbuf *err)
658{
659	char *val = kstrdup(_val, GFP_KERNEL);
660	char *p = val, *type_str, *level_str;
661	struct bch_compression_opt opt = { 0 };
662	int ret;
663
664	if (!val)
665		return -ENOMEM;
666
667	type_str = strsep(&p, ":");
668	level_str = p;
669
670	ret = match_string(bch2_compression_opts, -1, type_str);
671	if (ret < 0 && err)
672		prt_str(err, "invalid compression type");
673	if (ret < 0)
674		goto err;
675
676	opt.type = ret;
677
678	if (level_str) {
679		unsigned level;
680
681		ret = kstrtouint(level_str, 10, &level);
682		if (!ret && !opt.type && level)
683			ret = -EINVAL;
684		if (!ret && level > 15)
685			ret = -EINVAL;
686		if (ret < 0 && err)
687			prt_str(err, "invalid compression level");
688		if (ret < 0)
689			goto err;
690
691		opt.level = level;
692	}
693
694	*res = bch2_compression_encode(opt);
695err:
696	kfree(val);
697	return ret;
698}
699
700void bch2_compression_opt_to_text(struct printbuf *out, u64 v)
701{
702	struct bch_compression_opt opt = bch2_compression_decode(v);
703
704	if (opt.type < BCH_COMPRESSION_OPT_NR)
705		prt_str(out, bch2_compression_opts[opt.type]);
706	else
707		prt_printf(out, "(unknown compression opt %u)", opt.type);
708	if (opt.level)
709		prt_printf(out, ":%u", opt.level);
710}
711
712void bch2_opt_compression_to_text(struct printbuf *out,
713				  struct bch_fs *c,
714				  struct bch_sb *sb,
715				  u64 v)
716{
717	return bch2_compression_opt_to_text(out, v);
718}
719
720int bch2_opt_compression_validate(u64 v, struct printbuf *err)
721{
722	if (!bch2_compression_opt_valid(v)) {
723		prt_printf(err, "invalid compression opt %llu", v);
724		return -BCH_ERR_invalid_sb_opt_compression;
725	}
726
727	return 0;
728}
729