lz_encoder.c revision 312517
1///////////////////////////////////////////////////////////////////////////////
2//
3/// \file       lz_encoder.c
4/// \brief      LZ in window
5///
6//  Authors:    Igor Pavlov
7//              Lasse Collin
8//
9//  This file has been put into the public domain.
10//  You can do whatever you want with this file.
11//
12///////////////////////////////////////////////////////////////////////////////
13
14#include "lz_encoder.h"
15#include "lz_encoder_hash.h"
16
17// See lz_encoder_hash.h. This is a bit hackish but avoids making
18// endianness a conditional in makefiles.
19#if defined(WORDS_BIGENDIAN) && !defined(HAVE_SMALL)
20#	include "lz_encoder_hash_table.h"
21#endif
22
23#include "memcmplen.h"
24
25
26typedef struct {
27	/// LZ-based encoder e.g. LZMA
28	lzma_lz_encoder lz;
29
30	/// History buffer and match finder
31	lzma_mf mf;
32
33	/// Next coder in the chain
34	lzma_next_coder next;
35} lzma_coder;
36
37
38/// \brief      Moves the data in the input window to free space for new data
39///
40/// mf->buffer is a sliding input window, which keeps mf->keep_size_before
41/// bytes of input history available all the time. Now and then we need to
42/// "slide" the buffer to make space for the new data to the end of the
43/// buffer. At the same time, data older than keep_size_before is dropped.
44///
45static void
46move_window(lzma_mf *mf)
47{
48	// Align the move to a multiple of 16 bytes. Some LZ-based encoders
49	// like LZMA use the lowest bits of mf->read_pos to know the
50	// alignment of the uncompressed data. We also get better speed
51	// for memmove() with aligned buffers.
52	assert(mf->read_pos > mf->keep_size_before);
53	const uint32_t move_offset
54		= (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
55
56	assert(mf->write_pos > move_offset);
57	const size_t move_size = mf->write_pos - move_offset;
58
59	assert(move_offset + move_size <= mf->size);
60
61	memmove(mf->buffer, mf->buffer + move_offset, move_size);
62
63	mf->offset += move_offset;
64	mf->read_pos -= move_offset;
65	mf->read_limit -= move_offset;
66	mf->write_pos -= move_offset;
67
68	return;
69}
70
71
72/// \brief      Tries to fill the input window (mf->buffer)
73///
74/// If we are the last encoder in the chain, our input data is in in[].
75/// Otherwise we call the next filter in the chain to process in[] and
76/// write its output to mf->buffer.
77///
78/// This function must not be called once it has returned LZMA_STREAM_END.
79///
80static lzma_ret
81fill_window(lzma_coder *coder, const lzma_allocator *allocator,
82		const uint8_t *in, size_t *in_pos, size_t in_size,
83		lzma_action action)
84{
85	assert(coder->mf.read_pos <= coder->mf.write_pos);
86
87	// Move the sliding window if needed.
88	if (coder->mf.read_pos >= coder->mf.size - coder->mf.keep_size_after)
89		move_window(&coder->mf);
90
91	// Maybe this is ugly, but lzma_mf uses uint32_t for most things
92	// (which I find cleanest), but we need size_t here when filling
93	// the history window.
94	size_t write_pos = coder->mf.write_pos;
95	lzma_ret ret;
96	if (coder->next.code == NULL) {
97		// Not using a filter, simply memcpy() as much as possible.
98		lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer,
99				&write_pos, coder->mf.size);
100
101		ret = action != LZMA_RUN && *in_pos == in_size
102				? LZMA_STREAM_END : LZMA_OK;
103
104	} else {
105		ret = coder->next.code(coder->next.coder, allocator,
106				in, in_pos, in_size,
107				coder->mf.buffer, &write_pos,
108				coder->mf.size, action);
109	}
110
111	coder->mf.write_pos = write_pos;
112
113	// Silence Valgrind. lzma_memcmplen() can read extra bytes
114	// and Valgrind will give warnings if those bytes are uninitialized
115	// because Valgrind cannot see that the values of the uninitialized
116	// bytes are eventually ignored.
117	memzero(coder->mf.buffer + write_pos, LZMA_MEMCMPLEN_EXTRA);
118
119	// If end of stream has been reached or flushing completed, we allow
120	// the encoder to process all the input (that is, read_pos is allowed
121	// to reach write_pos). Otherwise we keep keep_size_after bytes
122	// available as prebuffer.
123	if (ret == LZMA_STREAM_END) {
124		assert(*in_pos == in_size);
125		ret = LZMA_OK;
126		coder->mf.action = action;
127		coder->mf.read_limit = coder->mf.write_pos;
128
129	} else if (coder->mf.write_pos > coder->mf.keep_size_after) {
130		// This needs to be done conditionally, because if we got
131		// only little new input, there may be too little input
132		// to do any encoding yet.
133		coder->mf.read_limit = coder->mf.write_pos
134				- coder->mf.keep_size_after;
135	}
136
137	// Restart the match finder after finished LZMA_SYNC_FLUSH.
138	if (coder->mf.pending > 0
139			&& coder->mf.read_pos < coder->mf.read_limit) {
140		// Match finder may update coder->pending and expects it to
141		// start from zero, so use a temporary variable.
142		const uint32_t pending = coder->mf.pending;
143		coder->mf.pending = 0;
144
145		// Rewind read_pos so that the match finder can hash
146		// the pending bytes.
147		assert(coder->mf.read_pos >= pending);
148		coder->mf.read_pos -= pending;
149
150		// Call the skip function directly instead of using
151		// mf_skip(), since we don't want to touch mf->read_ahead.
152		coder->mf.skip(&coder->mf, pending);
153	}
154
155	return ret;
156}
157
158
159static lzma_ret
160lz_encode(void *coder_ptr, const lzma_allocator *allocator,
161		const uint8_t *restrict in, size_t *restrict in_pos,
162		size_t in_size,
163		uint8_t *restrict out, size_t *restrict out_pos,
164		size_t out_size, lzma_action action)
165{
166	lzma_coder *coder = coder_ptr;
167
168	while (*out_pos < out_size
169			&& (*in_pos < in_size || action != LZMA_RUN)) {
170		// Read more data to coder->mf.buffer if needed.
171		if (coder->mf.action == LZMA_RUN && coder->mf.read_pos
172				>= coder->mf.read_limit)
173			return_if_error(fill_window(coder, allocator,
174					in, in_pos, in_size, action));
175
176		// Encode
177		const lzma_ret ret = coder->lz.code(coder->lz.coder,
178				&coder->mf, out, out_pos, out_size);
179		if (ret != LZMA_OK) {
180			// Setting this to LZMA_RUN for cases when we are
181			// flushing. It doesn't matter when finishing or if
182			// an error occurred.
183			coder->mf.action = LZMA_RUN;
184			return ret;
185		}
186	}
187
188	return LZMA_OK;
189}
190
191
192static bool
193lz_encoder_prepare(lzma_mf *mf, const lzma_allocator *allocator,
194		const lzma_lz_options *lz_options)
195{
196	// For now, the dictionary size is limited to 1.5 GiB. This may grow
197	// in the future if needed, but it needs a little more work than just
198	// changing this check.
199	if (lz_options->dict_size < LZMA_DICT_SIZE_MIN
200			|| lz_options->dict_size
201				> (UINT32_C(1) << 30) + (UINT32_C(1) << 29)
202			|| lz_options->nice_len > lz_options->match_len_max)
203		return true;
204
205	mf->keep_size_before = lz_options->before_size + lz_options->dict_size;
206
207	mf->keep_size_after = lz_options->after_size
208			+ lz_options->match_len_max;
209
210	// To avoid constant memmove()s, allocate some extra space. Since
211	// memmove()s become more expensive when the size of the buffer
212	// increases, we reserve more space when a large dictionary is
213	// used to make the memmove() calls rarer.
214	//
215	// This works with dictionaries up to about 3 GiB. If bigger
216	// dictionary is wanted, some extra work is needed:
217	//   - Several variables in lzma_mf have to be changed from uint32_t
218	//     to size_t.
219	//   - Memory usage calculation needs something too, e.g. use uint64_t
220	//     for mf->size.
221	uint32_t reserve = lz_options->dict_size / 2;
222	if (reserve > (UINT32_C(1) << 30))
223		reserve /= 2;
224
225	reserve += (lz_options->before_size + lz_options->match_len_max
226			+ lz_options->after_size) / 2 + (UINT32_C(1) << 19);
227
228	const uint32_t old_size = mf->size;
229	mf->size = mf->keep_size_before + reserve + mf->keep_size_after;
230
231	// Deallocate the old history buffer if it exists but has different
232	// size than what is needed now.
233	if (mf->buffer != NULL && old_size != mf->size) {
234		lzma_free(mf->buffer, allocator);
235		mf->buffer = NULL;
236	}
237
238	// Match finder options
239	mf->match_len_max = lz_options->match_len_max;
240	mf->nice_len = lz_options->nice_len;
241
242	// cyclic_size has to stay smaller than 2 Gi. Note that this doesn't
243	// mean limiting dictionary size to less than 2 GiB. With a match
244	// finder that uses multibyte resolution (hashes start at e.g. every
245	// fourth byte), cyclic_size would stay below 2 Gi even when
246	// dictionary size is greater than 2 GiB.
247	//
248	// It would be possible to allow cyclic_size >= 2 Gi, but then we
249	// would need to be careful to use 64-bit types in various places
250	// (size_t could do since we would need bigger than 32-bit address
251	// space anyway). It would also require either zeroing a multigigabyte
252	// buffer at initialization (waste of time and RAM) or allow
253	// normalization in lz_encoder_mf.c to access uninitialized
254	// memory to keep the code simpler. The current way is simple and
255	// still allows pretty big dictionaries, so I don't expect these
256	// limits to change.
257	mf->cyclic_size = lz_options->dict_size + 1;
258
259	// Validate the match finder ID and setup the function pointers.
260	switch (lz_options->match_finder) {
261#ifdef HAVE_MF_HC3
262	case LZMA_MF_HC3:
263		mf->find = &lzma_mf_hc3_find;
264		mf->skip = &lzma_mf_hc3_skip;
265		break;
266#endif
267#ifdef HAVE_MF_HC4
268	case LZMA_MF_HC4:
269		mf->find = &lzma_mf_hc4_find;
270		mf->skip = &lzma_mf_hc4_skip;
271		break;
272#endif
273#ifdef HAVE_MF_BT2
274	case LZMA_MF_BT2:
275		mf->find = &lzma_mf_bt2_find;
276		mf->skip = &lzma_mf_bt2_skip;
277		break;
278#endif
279#ifdef HAVE_MF_BT3
280	case LZMA_MF_BT3:
281		mf->find = &lzma_mf_bt3_find;
282		mf->skip = &lzma_mf_bt3_skip;
283		break;
284#endif
285#ifdef HAVE_MF_BT4
286	case LZMA_MF_BT4:
287		mf->find = &lzma_mf_bt4_find;
288		mf->skip = &lzma_mf_bt4_skip;
289		break;
290#endif
291
292	default:
293		return true;
294	}
295
296	// Calculate the sizes of mf->hash and mf->son and check that
297	// nice_len is big enough for the selected match finder.
298	const uint32_t hash_bytes = lz_options->match_finder & 0x0F;
299	if (hash_bytes > mf->nice_len)
300		return true;
301
302	const bool is_bt = (lz_options->match_finder & 0x10) != 0;
303	uint32_t hs;
304
305	if (hash_bytes == 2) {
306		hs = 0xFFFF;
307	} else {
308		// Round dictionary size up to the next 2^n - 1 so it can
309		// be used as a hash mask.
310		hs = lz_options->dict_size - 1;
311		hs |= hs >> 1;
312		hs |= hs >> 2;
313		hs |= hs >> 4;
314		hs |= hs >> 8;
315		hs >>= 1;
316		hs |= 0xFFFF;
317
318		if (hs > (UINT32_C(1) << 24)) {
319			if (hash_bytes == 3)
320				hs = (UINT32_C(1) << 24) - 1;
321			else
322				hs >>= 1;
323		}
324	}
325
326	mf->hash_mask = hs;
327
328	++hs;
329	if (hash_bytes > 2)
330		hs += HASH_2_SIZE;
331	if (hash_bytes > 3)
332		hs += HASH_3_SIZE;
333/*
334	No match finder uses this at the moment.
335	if (mf->hash_bytes > 4)
336		hs += HASH_4_SIZE;
337*/
338
339	const uint32_t old_hash_count = mf->hash_count;
340	const uint32_t old_sons_count = mf->sons_count;
341	mf->hash_count = hs;
342	mf->sons_count = mf->cyclic_size;
343	if (is_bt)
344		mf->sons_count *= 2;
345
346	// Deallocate the old hash array if it exists and has different size
347	// than what is needed now.
348	if (old_hash_count != mf->hash_count
349			|| old_sons_count != mf->sons_count) {
350		lzma_free(mf->hash, allocator);
351		mf->hash = NULL;
352
353		lzma_free(mf->son, allocator);
354		mf->son = NULL;
355	}
356
357	// Maximum number of match finder cycles
358	mf->depth = lz_options->depth;
359	if (mf->depth == 0) {
360		if (is_bt)
361			mf->depth = 16 + mf->nice_len / 2;
362		else
363			mf->depth = 4 + mf->nice_len / 4;
364	}
365
366	return false;
367}
368
369
370static bool
371lz_encoder_init(lzma_mf *mf, const lzma_allocator *allocator,
372		const lzma_lz_options *lz_options)
373{
374	// Allocate the history buffer.
375	if (mf->buffer == NULL) {
376		// lzma_memcmplen() is used for the dictionary buffer
377		// so we need to allocate a few extra bytes to prevent
378		// it from reading past the end of the buffer.
379		mf->buffer = lzma_alloc(mf->size + LZMA_MEMCMPLEN_EXTRA,
380				allocator);
381		if (mf->buffer == NULL)
382			return true;
383
384		// Keep Valgrind happy with lzma_memcmplen() and initialize
385		// the extra bytes whose value may get read but which will
386		// effectively get ignored.
387		memzero(mf->buffer + mf->size, LZMA_MEMCMPLEN_EXTRA);
388	}
389
390	// Use cyclic_size as initial mf->offset. This allows
391	// avoiding a few branches in the match finders. The downside is
392	// that match finder needs to be normalized more often, which may
393	// hurt performance with huge dictionaries.
394	mf->offset = mf->cyclic_size;
395	mf->read_pos = 0;
396	mf->read_ahead = 0;
397	mf->read_limit = 0;
398	mf->write_pos = 0;
399	mf->pending = 0;
400
401#if UINT32_MAX >= SIZE_MAX / 4
402	// Check for integer overflow. (Huge dictionaries are not
403	// possible on 32-bit CPU.)
404	if (mf->hash_count > SIZE_MAX / sizeof(uint32_t)
405			|| mf->sons_count > SIZE_MAX / sizeof(uint32_t))
406		return true;
407#endif
408
409	// Allocate and initialize the hash table. Since EMPTY_HASH_VALUE
410	// is zero, we can use lzma_alloc_zero() or memzero() for mf->hash.
411	//
412	// We don't need to initialize mf->son, but not doing that may
413	// make Valgrind complain in normalization (see normalize() in
414	// lz_encoder_mf.c). Skipping the initialization is *very* good
415	// when big dictionary is used but only small amount of data gets
416	// actually compressed: most of the mf->son won't get actually
417	// allocated by the kernel, so we avoid wasting RAM and improve
418	// initialization speed a lot.
419	if (mf->hash == NULL) {
420		mf->hash = lzma_alloc_zero(mf->hash_count * sizeof(uint32_t),
421				allocator);
422		mf->son = lzma_alloc(mf->sons_count * sizeof(uint32_t),
423				allocator);
424
425		if (mf->hash == NULL || mf->son == NULL) {
426			lzma_free(mf->hash, allocator);
427			mf->hash = NULL;
428
429			lzma_free(mf->son, allocator);
430			mf->son = NULL;
431
432			return true;
433		}
434	} else {
435/*
436		for (uint32_t i = 0; i < mf->hash_count; ++i)
437			mf->hash[i] = EMPTY_HASH_VALUE;
438*/
439		memzero(mf->hash, mf->hash_count * sizeof(uint32_t));
440	}
441
442	mf->cyclic_pos = 0;
443
444	// Handle preset dictionary.
445	if (lz_options->preset_dict != NULL
446			&& lz_options->preset_dict_size > 0) {
447		// If the preset dictionary is bigger than the actual
448		// dictionary, use only the tail.
449		mf->write_pos = my_min(lz_options->preset_dict_size, mf->size);
450		memcpy(mf->buffer, lz_options->preset_dict
451				+ lz_options->preset_dict_size - mf->write_pos,
452				mf->write_pos);
453		mf->action = LZMA_SYNC_FLUSH;
454		mf->skip(mf, mf->write_pos);
455	}
456
457	mf->action = LZMA_RUN;
458
459	return false;
460}
461
462
463extern uint64_t
464lzma_lz_encoder_memusage(const lzma_lz_options *lz_options)
465{
466	// Old buffers must not exist when calling lz_encoder_prepare().
467	lzma_mf mf = {
468		.buffer = NULL,
469		.hash = NULL,
470		.son = NULL,
471		.hash_count = 0,
472		.sons_count = 0,
473	};
474
475	// Setup the size information into mf.
476	if (lz_encoder_prepare(&mf, NULL, lz_options))
477		return UINT64_MAX;
478
479	// Calculate the memory usage.
480	return ((uint64_t)(mf.hash_count) + mf.sons_count) * sizeof(uint32_t)
481			+ mf.size + sizeof(lzma_coder);
482}
483
484
485static void
486lz_encoder_end(void *coder_ptr, const lzma_allocator *allocator)
487{
488	lzma_coder *coder = coder_ptr;
489
490	lzma_next_end(&coder->next, allocator);
491
492	lzma_free(coder->mf.son, allocator);
493	lzma_free(coder->mf.hash, allocator);
494	lzma_free(coder->mf.buffer, allocator);
495
496	if (coder->lz.end != NULL)
497		coder->lz.end(coder->lz.coder, allocator);
498	else
499		lzma_free(coder->lz.coder, allocator);
500
501	lzma_free(coder, allocator);
502	return;
503}
504
505
506static lzma_ret
507lz_encoder_update(void *coder_ptr, const lzma_allocator *allocator,
508		const lzma_filter *filters_null lzma_attribute((__unused__)),
509		const lzma_filter *reversed_filters)
510{
511	lzma_coder *coder = coder_ptr;
512
513	if (coder->lz.options_update == NULL)
514		return LZMA_PROG_ERROR;
515
516	return_if_error(coder->lz.options_update(
517			coder->lz.coder, reversed_filters));
518
519	return lzma_next_filter_update(
520			&coder->next, allocator, reversed_filters + 1);
521}
522
523
524extern lzma_ret
525lzma_lz_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
526		const lzma_filter_info *filters,
527		lzma_ret (*lz_init)(lzma_lz_encoder *lz,
528			const lzma_allocator *allocator, const void *options,
529			lzma_lz_options *lz_options))
530{
531#ifdef HAVE_SMALL
532	// We need that the CRC32 table has been initialized.
533	lzma_crc32_init();
534#endif
535
536	// Allocate and initialize the base data structure.
537	lzma_coder *coder = next->coder;
538	if (coder == NULL) {
539		coder = lzma_alloc(sizeof(lzma_coder), allocator);
540		if (coder == NULL)
541			return LZMA_MEM_ERROR;
542
543		next->coder = coder;
544		next->code = &lz_encode;
545		next->end = &lz_encoder_end;
546		next->update = &lz_encoder_update;
547
548		coder->lz.coder = NULL;
549		coder->lz.code = NULL;
550		coder->lz.end = NULL;
551
552		// mf.size is initialized to silence Valgrind
553		// when used on optimized binaries (GCC may reorder
554		// code in a way that Valgrind gets unhappy).
555		coder->mf.buffer = NULL;
556		coder->mf.size = 0;
557		coder->mf.hash = NULL;
558		coder->mf.son = NULL;
559		coder->mf.hash_count = 0;
560		coder->mf.sons_count = 0;
561
562		coder->next = LZMA_NEXT_CODER_INIT;
563	}
564
565	// Initialize the LZ-based encoder.
566	lzma_lz_options lz_options;
567	return_if_error(lz_init(&coder->lz, allocator,
568			filters[0].options, &lz_options));
569
570	// Setup the size information into coder->mf and deallocate
571	// old buffers if they have wrong size.
572	if (lz_encoder_prepare(&coder->mf, allocator, &lz_options))
573		return LZMA_OPTIONS_ERROR;
574
575	// Allocate new buffers if needed, and do the rest of
576	// the initialization.
577	if (lz_encoder_init(&coder->mf, allocator, &lz_options))
578		return LZMA_MEM_ERROR;
579
580	// Initialize the next filter in the chain, if any.
581	return lzma_next_filter_init(&coder->next, allocator, filters + 1);
582}
583
584
585extern LZMA_API(lzma_bool)
586lzma_mf_is_supported(lzma_match_finder mf)
587{
588	bool ret = false;
589
590#ifdef HAVE_MF_HC3
591	if (mf == LZMA_MF_HC3)
592		ret = true;
593#endif
594
595#ifdef HAVE_MF_HC4
596	if (mf == LZMA_MF_HC4)
597		ret = true;
598#endif
599
600#ifdef HAVE_MF_BT2
601	if (mf == LZMA_MF_BT2)
602		ret = true;
603#endif
604
605#ifdef HAVE_MF_BT3
606	if (mf == LZMA_MF_BT3)
607		ret = true;
608#endif
609
610#ifdef HAVE_MF_BT4
611	if (mf == LZMA_MF_BT4)
612		ret = true;
613#endif
614
615	return ret;
616}
617