lz_encoder.c revision 291125
1139749Simp///////////////////////////////////////////////////////////////////////////////
2129449Sscottl//
3129449Sscottl/// \file       lz_encoder.c
4129449Sscottl/// \brief      LZ in window
589580Smsmith///
689580Smsmith//  Authors:    Igor Pavlov
789580Smsmith//              Lasse Collin
889580Smsmith//
989580Smsmith//  This file has been put into the public domain.
1089580Smsmith//  You can do whatever you want with this file.
1189580Smsmith//
1289580Smsmith///////////////////////////////////////////////////////////////////////////////
1389580Smsmith
1489580Smsmith#include "lz_encoder.h"
1589580Smsmith#include "lz_encoder_hash.h"
1689580Smsmith
1789580Smsmith// See lz_encoder_hash.h. This is a bit hackish but avoids making
1889580Smsmith// endianness a conditional in makefiles.
1989580Smsmith#if defined(WORDS_BIGENDIAN) && !defined(HAVE_SMALL)
2089580Smsmith#	include "lz_encoder_hash_table.h"
2189580Smsmith#endif
2289580Smsmith
2389580Smsmith#include "memcmplen.h"
2489580Smsmith
2589580Smsmith
2689580Smsmithstruct lzma_coder_s {
2789580Smsmith	/// LZ-based encoder e.g. LZMA
2889580Smsmith	lzma_lz_encoder lz;
2989580Smsmith
3089580Smsmith	/// History buffer and match finder
3189580Smsmith	lzma_mf mf;
3289580Smsmith
33298955Spfg	/// Next coder in the chain
3489580Smsmith	lzma_next_coder next;
35120477Sscottl};
3689580Smsmith
3789580Smsmith
3889580Smsmith/// \brief      Moves the data in the input window to free space for new data
3989580Smsmith///
4089580Smsmith/// mf->buffer is a sliding input window, which keeps mf->keep_size_before
4189580Smsmith/// bytes of input history available all the time. Now and then we need to
42129449Sscottl/// "slide" the buffer to make space for the new data to the end of the
4389580Smsmith/// buffer. At the same time, data older than keep_size_before is dropped.
4489580Smsmith///
45119418Sobrienstatic void
46119418Sobrienmove_window(lzma_mf *mf)
4789580Smsmith{
4889580Smsmith	// Align the move to a multiple of 16 bytes. Some LZ-based encoders
4989580Smsmith	// like LZMA use the lowest bits of mf->read_pos to know the
5089580Smsmith	// alignment of the uncompressed data. We also get better speed
5189580Smsmith	// for memmove() with aligned buffers.
5289580Smsmith	assert(mf->read_pos > mf->keep_size_before);
5395533Smike	const uint32_t move_offset
5489580Smsmith		= (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
5589580Smsmith
5689580Smsmith	assert(mf->write_pos > move_offset);
5789580Smsmith	const size_t move_size = mf->write_pos - move_offset;
5889580Smsmith
5989580Smsmith	assert(move_offset + move_size <= mf->size);
6089580Smsmith
6189580Smsmith	memmove(mf->buffer, mf->buffer + move_offset, move_size);
6289580Smsmith
6389580Smsmith	mf->offset += move_offset;
6489580Smsmith	mf->read_pos -= move_offset;
6589580Smsmith	mf->read_limit -= move_offset;
6689580Smsmith	mf->write_pos -= move_offset;
6789580Smsmith
6889580Smsmith	return;
6989580Smsmith}
7089580Smsmith
7189580Smsmith
72227293Sed/// \brief      Tries to fill the input window (mf->buffer)
73156139Sscottl///
7489580Smsmith/// If we are the last encoder in the chain, our input data is in in[].
7589580Smsmith/// Otherwise we call the next filter in the chain to process in[] and
7689580Smsmith/// write its output to mf->buffer.
7789580Smsmith///
7889580Smsmith/// This function must not be called once it has returned LZMA_STREAM_END.
7989580Smsmith///
8089580Smsmithstatic lzma_ret
8189580Smsmithfill_window(lzma_coder *coder, const lzma_allocator *allocator,
8289580Smsmith		const uint8_t *in, size_t *in_pos, size_t in_size,
8389580Smsmith		lzma_action action)
8489580Smsmith{
8589580Smsmith	assert(coder->mf.read_pos <= coder->mf.write_pos);
8689580Smsmith
8789580Smsmith	// Move the sliding window if needed.
8889580Smsmith	if (coder->mf.read_pos >= coder->mf.size - coder->mf.keep_size_after)
8989580Smsmith		move_window(&coder->mf);
9089580Smsmith
9189580Smsmith	// Maybe this is ugly, but lzma_mf uses uint32_t for most things
9289580Smsmith	// (which I find cleanest), but we need size_t here when filling
9389580Smsmith	// the history window.
9489580Smsmith	size_t write_pos = coder->mf.write_pos;
9589580Smsmith	lzma_ret ret;
9689580Smsmith	if (coder->next.code == NULL) {
9789580Smsmith		// Not using a filter, simply memcpy() as much as possible.
9889580Smsmith		lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer,
9989580Smsmith				&write_pos, coder->mf.size);
10089580Smsmith
10189580Smsmith		ret = action != LZMA_RUN && *in_pos == in_size
10289580Smsmith				? LZMA_STREAM_END : LZMA_OK;
10389580Smsmith
10489580Smsmith	} else {
10589580Smsmith		ret = coder->next.code(coder->next.coder, allocator,
10689580Smsmith				in, in_pos, in_size,
10789580Smsmith				coder->mf.buffer, &write_pos,
10889580Smsmith				coder->mf.size, action);
10989580Smsmith	}
11089580Smsmith
11189580Smsmith	coder->mf.write_pos = write_pos;
11289580Smsmith
11389580Smsmith	// Silence Valgrind. lzma_memcmplen() can read extra bytes
11489580Smsmith	// and Valgrind will give warnings if those bytes are uninitialized
11589580Smsmith	// because Valgrind cannot see that the values of the uninitialized
11689580Smsmith	// bytes are eventually ignored.
11789580Smsmith	memzero(coder->mf.buffer + write_pos, LZMA_MEMCMPLEN_EXTRA);
11889580Smsmith
11989580Smsmith	// If end of stream has been reached or flushing completed, we allow
12089580Smsmith	// the encoder to process all the input (that is, read_pos is allowed
12189580Smsmith	// to reach write_pos). Otherwise we keep keep_size_after bytes
12289580Smsmith	// available as prebuffer.
12389580Smsmith	if (ret == LZMA_STREAM_END) {
12489580Smsmith		assert(*in_pos == in_size);
12589580Smsmith		ret = LZMA_OK;
12689580Smsmith		coder->mf.action = action;
12789580Smsmith		coder->mf.read_limit = coder->mf.write_pos;
12889580Smsmith
12989580Smsmith	} else if (coder->mf.write_pos > coder->mf.keep_size_after) {
13089580Smsmith		// This needs to be done conditionally, because if we got
13189580Smsmith		// only little new input, there may be too little input
13289580Smsmith		// to do any encoding yet.
13389580Smsmith		coder->mf.read_limit = coder->mf.write_pos
13489580Smsmith				- coder->mf.keep_size_after;
13589580Smsmith	}
13689580Smsmith
13789580Smsmith	// Restart the match finder after finished LZMA_SYNC_FLUSH.
13889580Smsmith	if (coder->mf.pending > 0
13989580Smsmith			&& coder->mf.read_pos < coder->mf.read_limit) {
140274487Sjhb		// Match finder may update coder->pending and expects it to
141274487Sjhb		// start from zero, so use a temporary variable.
14289580Smsmith		const uint32_t pending = coder->mf.pending;
14389580Smsmith		coder->mf.pending = 0;
14489580Smsmith
14589580Smsmith		// Rewind read_pos so that the match finder can hash
14689580Smsmith		// the pending bytes.
14789580Smsmith		assert(coder->mf.read_pos >= pending);
14889580Smsmith		coder->mf.read_pos -= pending;
14989580Smsmith
150274487Sjhb		// Call the skip function directly instead of using
15189580Smsmith		// mf_skip(), since we don't want to touch mf->read_ahead.
15289580Smsmith		coder->mf.skip(&coder->mf, pending);
15389580Smsmith	}
15489580Smsmith
15589580Smsmith	return ret;
15689580Smsmith}
15789580Smsmith
15889580Smsmith
15989580Smsmithstatic lzma_ret
16089580Smsmithlz_encode(lzma_coder *coder, const lzma_allocator *allocator,
16189580Smsmith		const uint8_t *restrict in, size_t *restrict in_pos,
16289580Smsmith		size_t in_size,
16389580Smsmith		uint8_t *restrict out, size_t *restrict out_pos,
16489580Smsmith		size_t out_size, lzma_action action)
16589580Smsmith{
16689580Smsmith	while (*out_pos < out_size
16789580Smsmith			&& (*in_pos < in_size || action != LZMA_RUN)) {
16889580Smsmith		// Read more data to coder->mf.buffer if needed.
169274487Sjhb		if (coder->mf.action == LZMA_RUN && coder->mf.read_pos
17089580Smsmith				>= coder->mf.read_limit)
171274487Sjhb			return_if_error(fill_window(coder, allocator,
17289580Smsmith					in, in_pos, in_size, action));
173274487Sjhb
174274487Sjhb		// Encode
17589580Smsmith		const lzma_ret ret = coder->lz.code(coder->lz.coder,
17689580Smsmith				&coder->mf, out, out_pos, out_size);
17789580Smsmith		if (ret != LZMA_OK) {
17889580Smsmith			// Setting this to LZMA_RUN for cases when we are
17989580Smsmith			// flushing. It doesn't matter when finishing or if
18089580Smsmith			// an error occurred.
18189580Smsmith			coder->mf.action = LZMA_RUN;
18289580Smsmith			return ret;
18389580Smsmith		}
18489580Smsmith	}
18589580Smsmith
18689580Smsmith	return LZMA_OK;
18789580Smsmith}
18889580Smsmith
18989580Smsmith
19089580Smsmithstatic bool
19189580Smsmithlz_encoder_prepare(lzma_mf *mf, const lzma_allocator *allocator,
19289580Smsmith		const lzma_lz_options *lz_options)
19389580Smsmith{
19489580Smsmith	// For now, the dictionary size is limited to 1.5 GiB. This may grow
19589580Smsmith	// in the future if needed, but it needs a little more work than just
19689580Smsmith	// changing this check.
19789580Smsmith	if (lz_options->dict_size < LZMA_DICT_SIZE_MIN
19889580Smsmith			|| lz_options->dict_size
19989580Smsmith				> (UINT32_C(1) << 30) + (UINT32_C(1) << 29)
20089580Smsmith			|| lz_options->nice_len > lz_options->match_len_max)
201114001Sscottl		return true;
20289580Smsmith
20389580Smsmith	mf->keep_size_before = lz_options->before_size + lz_options->dict_size;
204280347Smav
205280347Smav	mf->keep_size_after = lz_options->after_size
20689580Smsmith			+ lz_options->match_len_max;
20789580Smsmith
208274487Sjhb	// To avoid constant memmove()s, allocate some extra space. Since
209274487Sjhb	// memmove()s become more expensive when the size of the buffer
21089580Smsmith	// increases, we reserve more space when a large dictionary is
211274487Sjhb	// used to make the memmove() calls rarer.
212274487Sjhb	//
21389580Smsmith	// This works with dictionaries up to about 3 GiB. If bigger
21489580Smsmith	// dictionary is wanted, some extra work is needed:
21589580Smsmith	//   - Several variables in lzma_mf have to be changed from uint32_t
21689580Smsmith	//     to size_t.
21789580Smsmith	//   - Memory usage calculation needs something too, e.g. use uint64_t
218114001Sscottl	//     for mf->size.
219114001Sscottl	uint32_t reserve = lz_options->dict_size / 2;
220114001Sscottl	if (reserve > (UINT32_C(1) << 30))
221114001Sscottl		reserve /= 2;
22289580Smsmith
223114001Sscottl	reserve += (lz_options->before_size + lz_options->match_len_max
224114001Sscottl			+ lz_options->after_size) / 2 + (UINT32_C(1) << 19);
225156139Sscottl
22689580Smsmith	const uint32_t old_size = mf->size;
22789580Smsmith	mf->size = mf->keep_size_before + reserve + mf->keep_size_after;
228117126Sscottl
229274487Sjhb	// Deallocate the old history buffer if it exists but has different
230274487Sjhb	// size than what is needed now.
231274487Sjhb	if (mf->buffer != NULL && old_size != mf->size) {
232274487Sjhb		lzma_free(mf->buffer, allocator);
23389580Smsmith		mf->buffer = NULL;
23489580Smsmith	}
23589580Smsmith
23689580Smsmith	// Match finder options
237156139Sscottl	mf->match_len_max = lz_options->match_len_max;
238156139Sscottl	mf->nice_len = lz_options->nice_len;
239156139Sscottl
240274487Sjhb	// cyclic_size has to stay smaller than 2 Gi. Note that this doesn't
241274487Sjhb	// mean limiting dictionary size to less than 2 GiB. With a match
24289580Smsmith	// finder that uses multibyte resolution (hashes start at e.g. every
24389580Smsmith	// fourth byte), cyclic_size would stay below 2 Gi even when
24489580Smsmith	// dictionary size is greater than 2 GiB.
24589580Smsmith	//
24689580Smsmith	// It would be possible to allow cyclic_size >= 2 Gi, but then we
247156139Sscottl	// would need to be careful to use 64-bit types in various places
248156139Sscottl	// (size_t could do since we would need bigger than 32-bit address
249156139Sscottl	// space anyway). It would also require either zeroing a multigigabyte
25089580Smsmith	// buffer at initialization (waste of time and RAM) or allow
25189580Smsmith	// normalization in lz_encoder_mf.c to access uninitialized
25289580Smsmith	// memory to keep the code simpler. The current way is simple and
253156139Sscottl	// still allows pretty big dictionaries, so I don't expect these
25489580Smsmith	// limits to change.
25589580Smsmith	mf->cyclic_size = lz_options->dict_size + 1;
256156139Sscottl
257156139Sscottl	// Validate the match finder ID and setup the function pointers.
258156139Sscottl	switch (lz_options->match_finder) {
259274487Sjhb#ifdef HAVE_MF_HC3
260156139Sscottl	case LZMA_MF_HC3:
261156139Sscottl		mf->find = &lzma_mf_hc3_find;
26289580Smsmith		mf->skip = &lzma_mf_hc3_skip;
263156139Sscottl		break;
264156139Sscottl#endif
265156139Sscottl#ifdef HAVE_MF_HC4
266156139Sscottl	case LZMA_MF_HC4:
26789580Smsmith		mf->find = &lzma_mf_hc4_find;
268156139Sscottl		mf->skip = &lzma_mf_hc4_skip;
26989580Smsmith		break;
270156139Sscottl#endif
271274819Ssmh#ifdef HAVE_MF_BT2
272156139Sscottl	case LZMA_MF_BT2:
273274487Sjhb		mf->find = &lzma_mf_bt2_find;
274156139Sscottl		mf->skip = &lzma_mf_bt2_skip;
27589580Smsmith		break;
27689580Smsmith#endif
27789580Smsmith#ifdef HAVE_MF_BT3
27889580Smsmith	case LZMA_MF_BT3:
279274487Sjhb		mf->find = &lzma_mf_bt3_find;
28089580Smsmith		mf->skip = &lzma_mf_bt3_skip;
28189580Smsmith		break;
282274487Sjhb#endif
28389580Smsmith#ifdef HAVE_MF_BT4
28489580Smsmith	case LZMA_MF_BT4:
285274487Sjhb		mf->find = &lzma_mf_bt4_find;
286274487Sjhb		mf->skip = &lzma_mf_bt4_skip;
28789580Smsmith		break;
28889580Smsmith#endif
289156139Sscottl
29089580Smsmith	default:
29189580Smsmith		return true;
29289580Smsmith	}
293274487Sjhb
294274487Sjhb	// Calculate the sizes of mf->hash and mf->son and check that
29589580Smsmith	// nice_len is big enough for the selected match finder.
296274487Sjhb	const uint32_t hash_bytes = lz_options->match_finder & 0x0F;
29789580Smsmith	if (hash_bytes > mf->nice_len)
29889580Smsmith		return true;
29989580Smsmith
30089580Smsmith	const bool is_bt = (lz_options->match_finder & 0x10) != 0;
30189580Smsmith	uint32_t hs;
30289580Smsmith
30389580Smsmith	if (hash_bytes == 2) {
30489580Smsmith		hs = 0xFFFF;
305274487Sjhb	} else {
306274487Sjhb		// Round dictionary size up to the next 2^n - 1 so it can
30789580Smsmith		// be used as a hash mask.
308274487Sjhb		hs = lz_options->dict_size - 1;
30989580Smsmith		hs |= hs >> 1;
31089580Smsmith		hs |= hs >> 2;
31189580Smsmith		hs |= hs >> 4;
31289580Smsmith		hs |= hs >> 8;
31389580Smsmith		hs >>= 1;
31489580Smsmith		hs |= 0xFFFF;
31589580Smsmith
31689580Smsmith		if (hs > (UINT32_C(1) << 24)) {
31789580Smsmith			if (hash_bytes == 3)
31889580Smsmith				hs = (UINT32_C(1) << 24) - 1;
31989580Smsmith			else
32089580Smsmith				hs >>= 1;
32189580Smsmith		}
32289580Smsmith	}
32389580Smsmith
32489580Smsmith	mf->hash_mask = hs;
32589580Smsmith
32689580Smsmith	++hs;
32789580Smsmith	if (hash_bytes > 2)
32889580Smsmith		hs += HASH_2_SIZE;
32989580Smsmith	if (hash_bytes > 3)
33089580Smsmith		hs += HASH_3_SIZE;
33189580Smsmith/*
33289580Smsmith	No match finder uses this at the moment.
33389580Smsmith	if (mf->hash_bytes > 4)
33489580Smsmith		hs += HASH_4_SIZE;
33589580Smsmith*/
33689580Smsmith
33789580Smsmith	const uint32_t old_hash_count = mf->hash_count;
338274487Sjhb	const uint32_t old_sons_count = mf->sons_count;
339274487Sjhb	mf->hash_count = hs;
34089580Smsmith	mf->sons_count = mf->cyclic_size;
341274487Sjhb	if (is_bt)
34289580Smsmith		mf->sons_count *= 2;
34389580Smsmith
34489580Smsmith	// Deallocate the old hash array if it exists and has different size
34589580Smsmith	// than what is needed now.
34689580Smsmith	if (old_hash_count != mf->hash_count
34789580Smsmith			|| old_sons_count != mf->sons_count) {
34889580Smsmith		lzma_free(mf->hash, allocator);
34989580Smsmith		mf->hash = NULL;
35089580Smsmith
35189580Smsmith		lzma_free(mf->son, allocator);
35289580Smsmith		mf->son = NULL;
35389580Smsmith	}
35489580Smsmith
35589580Smsmith	// Maximum number of match finder cycles
35689580Smsmith	mf->depth = lz_options->depth;
35789580Smsmith	if (mf->depth == 0) {
358274487Sjhb		if (is_bt)
359274487Sjhb			mf->depth = 16 + mf->nice_len / 2;
36089580Smsmith		else
361274487Sjhb			mf->depth = 4 + mf->nice_len / 4;
36289580Smsmith	}
36389580Smsmith
36489580Smsmith	return false;
36589580Smsmith}
36689580Smsmith
36789580Smsmith
36889580Smsmithstatic bool
36989580Smsmithlz_encoder_init(lzma_mf *mf, const lzma_allocator *allocator,
37089580Smsmith		const lzma_lz_options *lz_options)
37189580Smsmith{
37289580Smsmith	// Allocate the history buffer.
373274487Sjhb	if (mf->buffer == NULL) {
374274487Sjhb		// lzma_memcmplen() is used for the dictionary buffer
375274487Sjhb		// so we need to allocate a few extra bytes to prevent
37689580Smsmith		// it from reading past the end of the buffer.
377274487Sjhb		mf->buffer = lzma_alloc(mf->size + LZMA_MEMCMPLEN_EXTRA,
37889580Smsmith				allocator);
37989580Smsmith		if (mf->buffer == NULL)
38089580Smsmith			return true;
38189580Smsmith
38289580Smsmith		// Keep Valgrind happy with lzma_memcmplen() and initialize
38389580Smsmith		// the extra bytes whose value may get read but which will
38489580Smsmith		// effectively get ignored.
38589580Smsmith		memzero(mf->buffer + mf->size, LZMA_MEMCMPLEN_EXTRA);
38689580Smsmith	}
38789580Smsmith
38889580Smsmith	// Use cyclic_size as initial mf->offset. This allows
38989580Smsmith	// avoiding a few branches in the match finders. The downside is
39089580Smsmith	// that match finder needs to be normalized more often, which may
391274487Sjhb	// hurt performance with huge dictionaries.
392274487Sjhb	mf->offset = mf->cyclic_size;
393274487Sjhb	mf->read_pos = 0;
39489580Smsmith	mf->read_ahead = 0;
395274487Sjhb	mf->read_limit = 0;
39689580Smsmith	mf->write_pos = 0;
39789580Smsmith	mf->pending = 0;
39889580Smsmith
39989580Smsmith#if UINT32_MAX >= SIZE_MAX / 4
40089580Smsmith	// Check for integer overflow. (Huge dictionaries are not
401114001Sscottl	// possible on 32-bit CPU.)
402114001Sscottl	if (mf->hash_count > SIZE_MAX / sizeof(uint32_t)
403114001Sscottl			|| mf->sons_count > SIZE_MAX / sizeof(uint32_t))
404114001Sscottl		return true;
405114001Sscottl#endif
406114001Sscottl
407114001Sscottl	// Allocate and initialize the hash table. Since EMPTY_HASH_VALUE
408114001Sscottl	// is zero, we can use lzma_alloc_zero() or memzero() for mf->hash.
409114001Sscottl	//
410114001Sscottl	// We don't need to initialize mf->son, but not doing that may
411114001Sscottl	// make Valgrind complain in normalization (see normalize() in
412254379Sjkim	// lz_encoder_mf.c). Skipping the initialization is *very* good
413114001Sscottl	// when big dictionary is used but only small amount of data gets
414114001Sscottl	// actually compressed: most of the mf->son won't get actually
415114001Sscottl	// allocated by the kernel, so we avoid wasting RAM and improve
416114001Sscottl	// initialization speed a lot.
417114001Sscottl	if (mf->hash == NULL) {
41889580Smsmith		mf->hash = lzma_alloc_zero(mf->hash_count * sizeof(uint32_t),
41989580Smsmith				allocator);
42089580Smsmith		mf->son = lzma_alloc(mf->sons_count * sizeof(uint32_t),
42189580Smsmith				allocator);
42289580Smsmith
42389580Smsmith		if (mf->hash == NULL || mf->son == NULL) {
42489580Smsmith			lzma_free(mf->hash, allocator);
42589580Smsmith			mf->hash = NULL;
42689580Smsmith
42789580Smsmith			lzma_free(mf->son, allocator);
42889580Smsmith			mf->son = NULL;
42989580Smsmith
43089580Smsmith			return true;
43189580Smsmith		}
43289580Smsmith	} else {
43389580Smsmith/*
43489580Smsmith		for (uint32_t i = 0; i < mf->hash_count; ++i)
43589580Smsmith			mf->hash[i] = EMPTY_HASH_VALUE;
43689580Smsmith*/
43789580Smsmith		memzero(mf->hash, mf->hash_count * sizeof(uint32_t));
43889580Smsmith	}
43989580Smsmith
44089580Smsmith	mf->cyclic_pos = 0;
44189580Smsmith
44289580Smsmith	// Handle preset dictionary.
44389580Smsmith	if (lz_options->preset_dict != NULL
44489580Smsmith			&& lz_options->preset_dict_size > 0) {
44589580Smsmith		// If the preset dictionary is bigger than the actual
44689580Smsmith		// dictionary, use only the tail.
44789580Smsmith		mf->write_pos = my_min(lz_options->preset_dict_size, mf->size);
44889580Smsmith		memcpy(mf->buffer, lz_options->preset_dict
44989580Smsmith				+ lz_options->preset_dict_size - mf->write_pos,
45089580Smsmith				mf->write_pos);
45189580Smsmith		mf->action = LZMA_SYNC_FLUSH;
45289580Smsmith		mf->skip(mf, mf->write_pos);
45389580Smsmith	}
454274487Sjhb
45589580Smsmith	mf->action = LZMA_RUN;
456274487Sjhb
45789580Smsmith	return false;
45889580Smsmith}
45989580Smsmith
46089580Smsmith
46189580Smsmithextern uint64_t
46289580Smsmithlzma_lz_encoder_memusage(const lzma_lz_options *lz_options)
46389580Smsmith{
46489580Smsmith	// Old buffers must not exist when calling lz_encoder_prepare().
46589580Smsmith	lzma_mf mf = {
46689580Smsmith		.buffer = NULL,
46789580Smsmith		.hash = NULL,
46889580Smsmith		.son = NULL,
46989580Smsmith		.hash_count = 0,
47089580Smsmith		.sons_count = 0,
47189580Smsmith	};
472274487Sjhb
473274487Sjhb	// Setup the size information into mf.
47489580Smsmith	if (lz_encoder_prepare(&mf, NULL, lz_options))
47589580Smsmith		return UINT64_MAX;
476274487Sjhb
477156139Sscottl	// Calculate the memory usage.
478156139Sscottl	return ((uint64_t)(mf.hash_count) + mf.sons_count) * sizeof(uint32_t)
47989580Smsmith			+ mf.size + sizeof(lzma_coder);
480156139Sscottl}
48189580Smsmith
482156139Sscottl
48389580Smsmithstatic void
48489580Smsmithlz_encoder_end(lzma_coder *coder, const lzma_allocator *allocator)
48589580Smsmith{
48689580Smsmith	lzma_next_end(&coder->next, allocator);
48789580Smsmith
48889580Smsmith	lzma_free(coder->mf.son, allocator);
48989580Smsmith	lzma_free(coder->mf.hash, allocator);
49089580Smsmith	lzma_free(coder->mf.buffer, allocator);
49189580Smsmith
49289580Smsmith	if (coder->lz.end != NULL)
49389580Smsmith		coder->lz.end(coder->lz.coder, allocator);
49489580Smsmith	else
49589580Smsmith		lzma_free(coder->lz.coder, allocator);
49689580Smsmith
49789580Smsmith	lzma_free(coder, allocator);
49889580Smsmith	return;
49989580Smsmith}
50089580Smsmith
50189580Smsmith
502156139Sscottlstatic lzma_ret
50389580Smsmithlz_encoder_update(lzma_coder *coder, const lzma_allocator *allocator,
504156139Sscottl		const lzma_filter *filters_null lzma_attribute((__unused__)),
50589580Smsmith		const lzma_filter *reversed_filters)
50689580Smsmith{
50789580Smsmith	if (coder->lz.options_update == NULL)
50889580Smsmith		return LZMA_PROG_ERROR;
50989580Smsmith
51089580Smsmith	return_if_error(coder->lz.options_update(
51189580Smsmith			coder->lz.coder, reversed_filters));
51289580Smsmith
513274487Sjhb	return lzma_next_filter_update(
514274487Sjhb			&coder->next, allocator, reversed_filters + 1);
515274487Sjhb}
516170872Sscottl
51789580Smsmith
518274487Sjhbextern lzma_ret
51989580Smsmithlzma_lz_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
52089580Smsmith		const lzma_filter_info *filters,
52189580Smsmith		lzma_ret (*lz_init)(lzma_lz_encoder *lz,
52289580Smsmith			const lzma_allocator *allocator, const void *options,
52389580Smsmith			lzma_lz_options *lz_options))
52489580Smsmith{
52589580Smsmith#ifdef HAVE_SMALL
52689580Smsmith	// We need that the CRC32 table has been initialized.
52789580Smsmith	lzma_crc32_init();
528274487Sjhb#endif
52989580Smsmith
53089580Smsmith	// Allocate and initialize the base data structure.
531274487Sjhb	if (next->coder == NULL) {
53289580Smsmith		next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
53389580Smsmith		if (next->coder == NULL)
53489580Smsmith			return LZMA_MEM_ERROR;
53589580Smsmith
53689580Smsmith		next->code = &lz_encode;
53789580Smsmith		next->end = &lz_encoder_end;
53889580Smsmith		next->update = &lz_encoder_update;
53989580Smsmith
54089580Smsmith		next->coder->lz.coder = NULL;
54189580Smsmith		next->coder->lz.code = NULL;
54289580Smsmith		next->coder->lz.end = NULL;
54389580Smsmith
54489580Smsmith		next->coder->mf.buffer = NULL;
54589580Smsmith		next->coder->mf.hash = NULL;
54689580Smsmith		next->coder->mf.son = NULL;
54789580Smsmith		next->coder->mf.hash_count = 0;
54889580Smsmith		next->coder->mf.sons_count = 0;
54989580Smsmith
55089580Smsmith		next->coder->next = LZMA_NEXT_CODER_INIT;
55189580Smsmith	}
55289580Smsmith
55389580Smsmith	// Initialize the LZ-based encoder.
55489580Smsmith	lzma_lz_options lz_options;
55589580Smsmith	return_if_error(lz_init(&next->coder->lz, allocator,
55689580Smsmith			filters[0].options, &lz_options));
55789580Smsmith
55889580Smsmith	// Setup the size information into next->coder->mf and deallocate
55989580Smsmith	// old buffers if they have wrong size.
56089580Smsmith	if (lz_encoder_prepare(&next->coder->mf, allocator, &lz_options))
56189580Smsmith		return LZMA_OPTIONS_ERROR;
56289580Smsmith
56389580Smsmith	// Allocate new buffers if needed, and do the rest of
56489580Smsmith	// the initialization.
56589580Smsmith	if (lz_encoder_init(&next->coder->mf, allocator, &lz_options))
56689580Smsmith		return LZMA_MEM_ERROR;
56789580Smsmith
56889580Smsmith	// Initialize the next filter in the chain, if any.
56989580Smsmith	return lzma_next_filter_init(&next->coder->next, allocator,
57089580Smsmith			filters + 1);
57189580Smsmith}
57289580Smsmith
573274487Sjhb
57489580Smsmithextern LZMA_API(lzma_bool)
57589580Smsmithlzma_mf_is_supported(lzma_match_finder mf)
57689580Smsmith{
57789580Smsmith	bool ret = false;
57889580Smsmith
57989580Smsmith#ifdef HAVE_MF_HC3
58089580Smsmith	if (mf == LZMA_MF_HC3)
58189580Smsmith		ret = true;
58289580Smsmith#endif
58389580Smsmith
58489580Smsmith#ifdef HAVE_MF_HC4
58589580Smsmith	if (mf == LZMA_MF_HC4)
58689580Smsmith		ret = true;
58789580Smsmith#endif
58889580Smsmith
58989580Smsmith#ifdef HAVE_MF_BT2
59089580Smsmith	if (mf == LZMA_MF_BT2)
59189580Smsmith		ret = true;
59289580Smsmith#endif
59389580Smsmith
59489580Smsmith#ifdef HAVE_MF_BT3
59589580Smsmith	if (mf == LZMA_MF_BT3)
59689580Smsmith		ret = true;
597156139Sscottl#endif
59889580Smsmith
59989580Smsmith#ifdef HAVE_MF_BT4
60089580Smsmith	if (mf == LZMA_MF_BT4)
60189580Smsmith		ret = true;
60289580Smsmith#endif
603156139Sscottl
60489580Smsmith	return ret;
605156139Sscottl}
60689580Smsmith