Deleted Added
full compact
1///////////////////////////////////////////////////////////////////////////////
2//
3/// \file block_encoder.c
4/// \brief Encodes .xz Blocks
5//
6// Author: Lasse Collin
7//
8// This file has been put into the public domain.
9// You can do whatever you want with this file.
10//
11///////////////////////////////////////////////////////////////////////////////
12
13#include "block_encoder.h"
14#include "filter_encoder.h"
15#include "check.h"
16
17
18struct lzma_coder_s {
19 /// The filters in the chain; initialized with lzma_raw_decoder_init().
20 lzma_next_coder next;
21
22 /// Encoding options; we also write Unpadded Size, Compressed Size,
23 /// and Uncompressed Size back to this structure when the encoding
24 /// has been finished.
25 lzma_block *block;
26
27 enum {
28 SEQ_CODE,
29 SEQ_PADDING,
30 SEQ_CHECK,
31 } sequence;
32
33 /// Compressed Size calculated while encoding
34 lzma_vli compressed_size;
35
36 /// Uncompressed Size calculated while encoding
37 lzma_vli uncompressed_size;
38
39 /// Position in the Check field
40 size_t pos;
41
42 /// Check of the uncompressed data
43 lzma_check_state check;
44};
45
46
47static lzma_ret
48block_encode(lzma_coder *coder, lzma_allocator *allocator,
49 const uint8_t *restrict in, size_t *restrict in_pos,
50 size_t in_size, uint8_t *restrict out,
51 size_t *restrict out_pos, size_t out_size, lzma_action action)
52{
53 // Check that our amount of input stays in proper limits.
54 if (LZMA_VLI_MAX - coder->uncompressed_size < in_size - *in_pos)
55 return LZMA_DATA_ERROR;
56
57 switch (coder->sequence) {
58 case SEQ_CODE: {
59 const size_t in_start = *in_pos;
60 const size_t out_start = *out_pos;
61
62 const lzma_ret ret = coder->next.code(coder->next.coder,
63 allocator, in, in_pos, in_size,
64 out, out_pos, out_size, action);
65
66 const size_t in_used = *in_pos - in_start;
67 const size_t out_used = *out_pos - out_start;
68
69 if (COMPRESSED_SIZE_MAX - coder->compressed_size < out_used)
70 return LZMA_DATA_ERROR;
71
72 coder->compressed_size += out_used;
73
74 // No need to check for overflow because we have already
75 // checked it at the beginning of this function.
76 coder->uncompressed_size += in_used;
77
78 lzma_check_update(&coder->check, coder->block->check,
79 in + in_start, in_used);
80
81 if (ret != LZMA_STREAM_END || action == LZMA_SYNC_FLUSH)
82 return ret;
83
84 assert(*in_pos == in_size);
85 assert(action == LZMA_FINISH);
86
87 // Copy the values into coder->block. The caller
88 // may use this information to construct Index.
89 coder->block->compressed_size = coder->compressed_size;
90 coder->block->uncompressed_size = coder->uncompressed_size;
91
92 coder->sequence = SEQ_PADDING;
93 }
94
95 // Fall through
96
97 case SEQ_PADDING:
98 // Pad Compressed Data to a multiple of four bytes. We can
99 // use coder->compressed_size for this since we don't need
100 // it for anything else anymore.
101 while (coder->compressed_size & 3) {
102 if (*out_pos >= out_size)
103 return LZMA_OK;
104
105 out[*out_pos] = 0x00;
106 ++*out_pos;
107 ++coder->compressed_size;
108 }
109
110 if (coder->block->check == LZMA_CHECK_NONE)
111 return LZMA_STREAM_END;
112
113 lzma_check_finish(&coder->check, coder->block->check);
114
115 coder->sequence = SEQ_CHECK;
116
117 // Fall through
118
119 case SEQ_CHECK: {
120 const size_t check_size = lzma_check_size(coder->block->check);
121 lzma_bufcpy(coder->check.buffer.u8, &coder->pos, check_size,
122 out, out_pos, out_size);
123 if (coder->pos < check_size)
124 return LZMA_OK;
125
126 memcpy(coder->block->raw_check, coder->check.buffer.u8,
127 check_size);
128 return LZMA_STREAM_END;
129 }
130 }
131
132 return LZMA_PROG_ERROR;
133}
134
135
136static void
137block_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
138{
139 lzma_next_end(&coder->next, allocator);
140 lzma_free(coder, allocator);
141 return;
142}
143
144
145static lzma_ret
146block_encoder_update(lzma_coder *coder, lzma_allocator *allocator,
147 const lzma_filter *filters lzma_attribute((unused)),
147 const lzma_filter *filters lzma_attribute((__unused__)),
148 const lzma_filter *reversed_filters)
149{
150 if (coder->sequence != SEQ_CODE)
151 return LZMA_PROG_ERROR;
152
153 return lzma_next_filter_update(
154 &coder->next, allocator, reversed_filters);
155}
156
157
158extern lzma_ret
159lzma_block_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
160 lzma_block *block)
161{
162 lzma_next_coder_init(&lzma_block_encoder_init, next, allocator);
163
164 if (block == NULL)
165 return LZMA_PROG_ERROR;
166
167 // The contents of the structure may depend on the version so
168 // check the version first.
169 if (block->version != 0)
170 return LZMA_OPTIONS_ERROR;
171
172 // If the Check ID is not supported, we cannot calculate the check and
173 // thus not create a proper Block.
174 if ((unsigned int)(block->check) > LZMA_CHECK_ID_MAX)
175 return LZMA_PROG_ERROR;
176
177 if (!lzma_check_is_supported(block->check))
178 return LZMA_UNSUPPORTED_CHECK;
179
180 // Allocate and initialize *next->coder if needed.
181 if (next->coder == NULL) {
182 next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
183 if (next->coder == NULL)
184 return LZMA_MEM_ERROR;
185
186 next->code = &block_encode;
187 next->end = &block_encoder_end;
188 next->update = &block_encoder_update;
189 next->coder->next = LZMA_NEXT_CODER_INIT;
190 }
191
192 // Basic initializations
193 next->coder->sequence = SEQ_CODE;
194 next->coder->block = block;
195 next->coder->compressed_size = 0;
196 next->coder->uncompressed_size = 0;
197 next->coder->pos = 0;
198
199 // Initialize the check
200 lzma_check_init(&next->coder->check, block->check);
201
202 // Initialize the requested filters.
203 return lzma_raw_encoder_init(&next->coder->next, allocator,
204 block->filters);
205}
206
207
208extern LZMA_API(lzma_ret)
209lzma_block_encoder(lzma_stream *strm, lzma_block *block)
210{
211 lzma_next_strm_init(lzma_block_encoder_init, strm, block);
212
213 strm->internal->supported_actions[LZMA_RUN] = true;
214 strm->internal->supported_actions[LZMA_FINISH] = true;
215
216 return LZMA_OK;
217}