• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/router/busybox/archival/bz/
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Compression machinery (not incl block sorting)        ---*/
9/*---                                            compress.c ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26/* CHANGES
27 * 0.9.0    -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c   -- changed setting of nGroups in sendMTFValues()
30 *             so as to do a bit better on small files
31*/
32
33/* #include "bzlib_private.h" */
34
35/*---------------------------------------------------*/
36/*--- Bit stream I/O                              ---*/
37/*---------------------------------------------------*/
38
39/*---------------------------------------------------*/
40static
41void BZ2_bsInitWrite(EState* s)
42{
43	s->bsLive = 0;
44	s->bsBuff = 0;
45}
46
47
48/*---------------------------------------------------*/
49static NOINLINE
50void bsFinishWrite(EState* s)
51{
52	while (s->bsLive > 0) {
53		s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
54		s->numZ++;
55		s->bsBuff <<= 8;
56		s->bsLive -= 8;
57	}
58}
59
60
61/*---------------------------------------------------*/
62static
63/* Helps only on level 5, on other levels hurts. ? */
64#if CONFIG_BZIP2_FEATURE_SPEED >= 5
65ALWAYS_INLINE
66#endif
67void bsW(EState* s, int32_t n, uint32_t v)
68{
69	while (s->bsLive >= 8) {
70		s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
71		s->numZ++;
72		s->bsBuff <<= 8;
73		s->bsLive -= 8;
74	}
75	s->bsBuff |= (v << (32 - s->bsLive - n));
76	s->bsLive += n;
77}
78
79
80/*---------------------------------------------------*/
81static
82void bsPutU32(EState* s, unsigned u)
83{
84	bsW(s, 8, (u >> 24) & 0xff);
85	bsW(s, 8, (u >> 16) & 0xff);
86	bsW(s, 8, (u >>  8) & 0xff);
87	bsW(s, 8,  u        & 0xff);
88}
89
90
91/*---------------------------------------------------*/
92static
93void bsPutU16(EState* s, unsigned u)
94{
95	bsW(s, 8, (u >>  8) & 0xff);
96	bsW(s, 8,  u        & 0xff);
97}
98
99
100/*---------------------------------------------------*/
101/*--- The back end proper                         ---*/
102/*---------------------------------------------------*/
103
104/*---------------------------------------------------*/
105static
106void makeMaps_e(EState* s)
107{
108	int i;
109	s->nInUse = 0;
110	for (i = 0; i < 256; i++) {
111		if (s->inUse[i]) {
112			s->unseqToSeq[i] = s->nInUse;
113			s->nInUse++;
114		}
115	}
116}
117
118
119/*---------------------------------------------------*/
120static NOINLINE
121void generateMTFValues(EState* s)
122{
123	uint8_t yy[256];
124	int32_t i, j;
125	int32_t zPend;
126	int32_t wr;
127	int32_t EOB;
128
129	/*
130	 * After sorting (eg, here),
131	 * s->arr1[0 .. s->nblock-1] holds sorted order,
132	 * and
133	 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
134	 * holds the original block data.
135	 *
136	 * The first thing to do is generate the MTF values,
137	 * and put them in
138	 *	((uint16_t*)s->arr1)[0 .. s->nblock-1].
139	 * Because there are strictly fewer or equal MTF values
140	 * than block values, ptr values in this area are overwritten
141	 * with MTF values only when they are no longer needed.
142	 *
143	 * The final compressed bitstream is generated into the
144	 * area starting at
145	 *	&((uint8_t*)s->arr2)[s->nblock]
146	 *
147	 * These storage aliases are set up in bzCompressInit(),
148	 * except for the last one, which is arranged in
149	 * compressBlock().
150	 */
151	uint32_t* ptr   = s->ptr;
152	uint8_t*  block = s->block;
153	uint16_t* mtfv  = s->mtfv;
154
155	makeMaps_e(s);
156	EOB = s->nInUse+1;
157
158	for (i = 0; i <= EOB; i++)
159		s->mtfFreq[i] = 0;
160
161	wr = 0;
162	zPend = 0;
163	for (i = 0; i < s->nInUse; i++)
164		yy[i] = (uint8_t) i;
165
166	for (i = 0; i < s->nblock; i++) {
167		uint8_t ll_i;
168		AssertD(wr <= i, "generateMTFValues(1)");
169		j = ptr[i] - 1;
170		if (j < 0)
171			j += s->nblock;
172		ll_i = s->unseqToSeq[block[j]];
173		AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
174
175		if (yy[0] == ll_i) {
176			zPend++;
177		} else {
178			if (zPend > 0) {
179				zPend--;
180				while (1) {
181					if (zPend & 1) {
182						mtfv[wr] = BZ_RUNB; wr++;
183						s->mtfFreq[BZ_RUNB]++;
184					} else {
185						mtfv[wr] = BZ_RUNA; wr++;
186						s->mtfFreq[BZ_RUNA]++;
187					}
188					if (zPend < 2) break;
189					zPend = (uint32_t)(zPend - 2) / 2;
190					/* bbox: unsigned div is easier */
191				};
192				zPend = 0;
193			}
194			{
195				register uint8_t  rtmp;
196				register uint8_t* ryy_j;
197				register uint8_t  rll_i;
198				rtmp  = yy[1];
199				yy[1] = yy[0];
200				ryy_j = &(yy[1]);
201				rll_i = ll_i;
202				while (rll_i != rtmp) {
203					register uint8_t rtmp2;
204					ryy_j++;
205					rtmp2  = rtmp;
206					rtmp   = *ryy_j;
207					*ryy_j = rtmp2;
208				};
209				yy[0] = rtmp;
210				j = ryy_j - &(yy[0]);
211				mtfv[wr] = j+1;
212				wr++;
213				s->mtfFreq[j+1]++;
214			}
215
216		}
217	}
218
219	if (zPend > 0) {
220		zPend--;
221		while (1) {
222			if (zPend & 1) {
223				mtfv[wr] = BZ_RUNB;
224				wr++;
225				s->mtfFreq[BZ_RUNB]++;
226			} else {
227				mtfv[wr] = BZ_RUNA;
228				wr++;
229				s->mtfFreq[BZ_RUNA]++;
230			}
231			if (zPend < 2)
232				break;
233			zPend = (uint32_t)(zPend - 2) / 2;
234			/* bbox: unsigned div is easier */
235		};
236		zPend = 0;
237	}
238
239	mtfv[wr] = EOB;
240	wr++;
241	s->mtfFreq[EOB]++;
242
243	s->nMTF = wr;
244}
245
246
247/*---------------------------------------------------*/
248#define BZ_LESSER_ICOST  0
249#define BZ_GREATER_ICOST 15
250
251static NOINLINE
252void sendMTFValues(EState* s)
253{
254	int32_t v, t, i, j, gs, ge, totc, bt, bc, iter;
255	int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
256	int32_t nGroups, nBytes;
257
258	/*
259	 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
260	 * is a global since the decoder also needs it.
261	 *
262	 * int32_t  code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
263	 * int32_t  rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
264	 * are also globals only used in this proc.
265	 * Made global to keep stack frame size small.
266	 */
267#define code     sendMTFValues__code
268#define rfreq    sendMTFValues__rfreq
269#define len_pack sendMTFValues__len_pack
270
271	uint16_t cost[BZ_N_GROUPS];
272	int32_t  fave[BZ_N_GROUPS];
273
274	uint16_t* mtfv = s->mtfv;
275
276	alphaSize = s->nInUse + 2;
277	for (t = 0; t < BZ_N_GROUPS; t++)
278		for (v = 0; v < alphaSize; v++)
279			s->len[t][v] = BZ_GREATER_ICOST;
280
281	/*--- Decide how many coding tables to use ---*/
282	AssertH(s->nMTF > 0, 3001);
283	if (s->nMTF < 200)  nGroups = 2; else
284	if (s->nMTF < 600)  nGroups = 3; else
285	if (s->nMTF < 1200) nGroups = 4; else
286	if (s->nMTF < 2400) nGroups = 5; else
287	nGroups = 6;
288
289	/*--- Generate an initial set of coding tables ---*/
290	{
291		int32_t nPart, remF, tFreq, aFreq;
292
293		nPart = nGroups;
294		remF  = s->nMTF;
295		gs = 0;
296		while (nPart > 0) {
297			tFreq = remF / nPart;
298			ge = gs - 1;
299			aFreq = 0;
300			while (aFreq < tFreq && ge < alphaSize-1) {
301				ge++;
302				aFreq += s->mtfFreq[ge];
303			}
304
305			if (ge > gs
306			 && nPart != nGroups && nPart != 1
307			 && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
308			) {
309				aFreq -= s->mtfFreq[ge];
310				ge--;
311			}
312
313			for (v = 0; v < alphaSize; v++)
314				if (v >= gs && v <= ge)
315					s->len[nPart-1][v] = BZ_LESSER_ICOST;
316				else
317					s->len[nPart-1][v] = BZ_GREATER_ICOST;
318
319			nPart--;
320			gs = ge + 1;
321			remF -= aFreq;
322		}
323	}
324
325	/*
326	 * Iterate up to BZ_N_ITERS times to improve the tables.
327	 */
328	for (iter = 0; iter < BZ_N_ITERS; iter++) {
329		for (t = 0; t < nGroups; t++)
330			fave[t] = 0;
331
332		for (t = 0; t < nGroups; t++)
333			for (v = 0; v < alphaSize; v++)
334				s->rfreq[t][v] = 0;
335
336#if CONFIG_BZIP2_FEATURE_SPEED >= 5
337		/*
338		 * Set up an auxiliary length table which is used to fast-track
339		 * the common case (nGroups == 6).
340		 */
341		if (nGroups == 6) {
342			for (v = 0; v < alphaSize; v++) {
343				s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
344				s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
345				s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
346			}
347		}
348#endif
349		nSelectors = 0;
350		totc = 0;
351		gs = 0;
352		while (1) {
353			/*--- Set group start & end marks. --*/
354			if (gs >= s->nMTF)
355				break;
356			ge = gs + BZ_G_SIZE - 1;
357			if (ge >= s->nMTF)
358				ge = s->nMTF-1;
359
360			/*
361			 * Calculate the cost of this group as coded
362			 * by each of the coding tables.
363			 */
364			for (t = 0; t < nGroups; t++)
365				cost[t] = 0;
366#if CONFIG_BZIP2_FEATURE_SPEED >= 5
367			if (nGroups == 6 && 50 == ge-gs+1) {
368				/*--- fast track the common case ---*/
369				register uint32_t cost01, cost23, cost45;
370				register uint16_t icv;
371				cost01 = cost23 = cost45 = 0;
372#define BZ_ITER(nn) \
373	icv = mtfv[gs+(nn)]; \
374	cost01 += s->len_pack[icv][0]; \
375	cost23 += s->len_pack[icv][1]; \
376	cost45 += s->len_pack[icv][2];
377				BZ_ITER(0);  BZ_ITER(1);  BZ_ITER(2);  BZ_ITER(3);  BZ_ITER(4);
378				BZ_ITER(5);  BZ_ITER(6);  BZ_ITER(7);  BZ_ITER(8);  BZ_ITER(9);
379				BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
380				BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
381				BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
382				BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
383				BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
384				BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
385				BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
386				BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
387#undef BZ_ITER
388				cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
389				cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
390				cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
391
392			} else
393#endif
394			{
395				/*--- slow version which correctly handles all situations ---*/
396				for (i = gs; i <= ge; i++) {
397					uint16_t icv = mtfv[i];
398					for (t = 0; t < nGroups; t++)
399						cost[t] += s->len[t][icv];
400				}
401			}
402			/*
403			 * Find the coding table which is best for this group,
404			 * and record its identity in the selector table.
405			 */
406			/*bc = 999999999;*/
407			/*bt = -1;*/
408			bc = cost[0];
409			bt = 0;
410			for (t = 1 /*0*/; t < nGroups; t++) {
411				if (cost[t] < bc) {
412					bc = cost[t];
413					bt = t;
414				}
415			}
416			totc += bc;
417			fave[bt]++;
418			s->selector[nSelectors] = bt;
419			nSelectors++;
420
421			/*
422			 * Increment the symbol frequencies for the selected table.
423			 */
424/* 1% faster compress. +800 bytes */
425#if CONFIG_BZIP2_FEATURE_SPEED >= 4
426			if (nGroups == 6 && 50 == ge-gs+1) {
427				/*--- fast track the common case ---*/
428#define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
429				BZ_ITUR(0);  BZ_ITUR(1);  BZ_ITUR(2);  BZ_ITUR(3);  BZ_ITUR(4);
430				BZ_ITUR(5);  BZ_ITUR(6);  BZ_ITUR(7);  BZ_ITUR(8);  BZ_ITUR(9);
431				BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
432				BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
433				BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
434				BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
435				BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
436				BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
437				BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
438				BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
439#undef BZ_ITUR
440				gs = ge + 1;
441			} else
442#endif
443			{
444				/*--- slow version which correctly handles all situations ---*/
445				while (gs <= ge) {
446					s->rfreq[bt][mtfv[gs]]++;
447					gs++;
448				}
449				/* already is: gs = ge + 1; */
450			}
451		}
452
453		/*
454		 * Recompute the tables based on the accumulated frequencies.
455		 */
456		/* maxLen was changed from 20 to 17 in bzip2-1.0.3.  See
457		 * comment in huffman.c for details. */
458		for (t = 0; t < nGroups; t++)
459			BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
460	}
461
462	AssertH(nGroups < 8, 3002);
463	AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)),	3003);
464
465	/*--- Compute MTF values for the selectors. ---*/
466	{
467		uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
468
469		for (i = 0; i < nGroups; i++)
470			pos[i] = i;
471		for (i = 0; i < nSelectors; i++) {
472			ll_i = s->selector[i];
473			j = 0;
474			tmp = pos[j];
475			while (ll_i != tmp) {
476				j++;
477				tmp2 = tmp;
478				tmp = pos[j];
479				pos[j] = tmp2;
480			};
481			pos[0] = tmp;
482			s->selectorMtf[i] = j;
483		}
484	};
485
486	/*--- Assign actual codes for the tables. --*/
487	for (t = 0; t < nGroups; t++) {
488		minLen = 32;
489		maxLen = 0;
490		for (i = 0; i < alphaSize; i++) {
491			if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
492			if (s->len[t][i] < minLen) minLen = s->len[t][i];
493		}
494		AssertH(!(maxLen > 17 /*20*/), 3004);
495		AssertH(!(minLen < 1), 3005);
496		BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
497	}
498
499	/*--- Transmit the mapping table. ---*/
500	{
501		/* bbox: optimized a bit more than in bzip2 */
502		int inUse16 = 0;
503		for (i = 0; i < 16; i++) {
504			if (sizeof(long) <= 4) {
505				inUse16 = inUse16*2 +
506					((*(uint32_t*)&(s->inUse[i * 16 + 0])
507					| *(uint32_t*)&(s->inUse[i * 16 + 4])
508					| *(uint32_t*)&(s->inUse[i * 16 + 8])
509					| *(uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
510			} else { /* Our CPU can do better */
511				inUse16 = inUse16*2 +
512					((*(uint64_t*)&(s->inUse[i * 16 + 0])
513					| *(uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
514			}
515		}
516
517		nBytes = s->numZ;
518		bsW(s, 16, inUse16);
519
520		inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
521		for (i = 0; i < 16; i++) {
522			if (inUse16 < 0) {
523				unsigned v16 = 0;
524				for (j = 0; j < 16; j++)
525					v16 = v16*2 + s->inUse[i * 16 + j];
526				bsW(s, 16, v16);
527			}
528			inUse16 <<= 1;
529		}
530	}
531
532	/*--- Now the selectors. ---*/
533	nBytes = s->numZ;
534	bsW(s, 3, nGroups);
535	bsW(s, 15, nSelectors);
536	for (i = 0; i < nSelectors; i++) {
537		for (j = 0; j < s->selectorMtf[i]; j++)
538			bsW(s, 1, 1);
539		bsW(s, 1, 0);
540	}
541
542	/*--- Now the coding tables. ---*/
543	nBytes = s->numZ;
544
545	for (t = 0; t < nGroups; t++) {
546		int32_t curr = s->len[t][0];
547		bsW(s, 5, curr);
548		for (i = 0; i < alphaSize; i++) {
549			while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
550			while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
551			bsW(s, 1, 0);
552		}
553	}
554
555	/*--- And finally, the block data proper ---*/
556	nBytes = s->numZ;
557	selCtr = 0;
558	gs = 0;
559	while (1) {
560		if (gs >= s->nMTF)
561			break;
562		ge = gs + BZ_G_SIZE - 1;
563		if (ge >= s->nMTF)
564			ge = s->nMTF-1;
565		AssertH(s->selector[selCtr] < nGroups, 3006);
566
567/* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
568#if 0
569		if (nGroups == 6 && 50 == ge-gs+1) {
570			/*--- fast track the common case ---*/
571			uint16_t mtfv_i;
572			uint8_t* s_len_sel_selCtr  = &(s->len[s->selector[selCtr]][0]);
573			int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
574#define BZ_ITAH(nn) \
575	mtfv_i = mtfv[gs+(nn)]; \
576	bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
577			BZ_ITAH(0);  BZ_ITAH(1);  BZ_ITAH(2);  BZ_ITAH(3);  BZ_ITAH(4);
578			BZ_ITAH(5);  BZ_ITAH(6);  BZ_ITAH(7);  BZ_ITAH(8);  BZ_ITAH(9);
579			BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
580			BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
581			BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
582			BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
583			BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
584			BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
585			BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
586			BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
587#undef BZ_ITAH
588			gs = ge+1;
589		} else
590#endif
591		{
592			/*--- slow version which correctly handles all situations ---*/
593			/* code is bit bigger, but moves multiply out of the loop */
594			uint8_t* s_len_sel_selCtr  = &(s->len [s->selector[selCtr]][0]);
595			int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
596			while (gs <= ge) {
597				bsW(s,
598					s_len_sel_selCtr[mtfv[gs]],
599					s_code_sel_selCtr[mtfv[gs]]
600				);
601				gs++;
602			}
603			/* already is: gs = ge+1; */
604		}
605		selCtr++;
606	}
607	AssertH(selCtr == nSelectors, 3007);
608#undef code
609#undef rfreq
610#undef len_pack
611}
612
613
614/*---------------------------------------------------*/
615static
616void BZ2_compressBlock(EState* s, int is_last_block)
617{
618	if (s->nblock > 0) {
619		BZ_FINALISE_CRC(s->blockCRC);
620		s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
621		s->combinedCRC ^= s->blockCRC;
622		if (s->blockNo > 1)
623			s->numZ = 0;
624
625		BZ2_blockSort(s);
626	}
627
628	s->zbits = &((uint8_t*)s->arr2)[s->nblock];
629
630	/*-- If this is the first block, create the stream header. --*/
631	if (s->blockNo == 1) {
632		BZ2_bsInitWrite(s);
633		/*bsPutU8(s, BZ_HDR_B);*/
634		/*bsPutU8(s, BZ_HDR_Z);*/
635		/*bsPutU8(s, BZ_HDR_h);*/
636		/*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
637		bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
638	}
639
640	if (s->nblock > 0) {
641		/*bsPutU8(s, 0x31);*/
642		/*bsPutU8(s, 0x41);*/
643		/*bsPutU8(s, 0x59);*/
644		/*bsPutU8(s, 0x26);*/
645		bsPutU32(s, 0x31415926);
646		/*bsPutU8(s, 0x53);*/
647		/*bsPutU8(s, 0x59);*/
648		bsPutU16(s, 0x5359);
649
650		/*-- Now the block's CRC, so it is in a known place. --*/
651		bsPutU32(s, s->blockCRC);
652
653		/*
654		 * Now a single bit indicating (non-)randomisation.
655		 * As of version 0.9.5, we use a better sorting algorithm
656		 * which makes randomisation unnecessary.  So always set
657		 * the randomised bit to 'no'.  Of course, the decoder
658		 * still needs to be able to handle randomised blocks
659		 * so as to maintain backwards compatibility with
660		 * older versions of bzip2.
661		 */
662		bsW(s, 1, 0);
663
664		bsW(s, 24, s->origPtr);
665		generateMTFValues(s);
666		sendMTFValues(s);
667	}
668
669	/*-- If this is the last block, add the stream trailer. --*/
670	if (is_last_block) {
671		/*bsPutU8(s, 0x17);*/
672		/*bsPutU8(s, 0x72);*/
673		/*bsPutU8(s, 0x45);*/
674		/*bsPutU8(s, 0x38);*/
675		bsPutU32(s, 0x17724538);
676		/*bsPutU8(s, 0x50);*/
677		/*bsPutU8(s, 0x90);*/
678		bsPutU16(s, 0x5090);
679		bsPutU32(s, s->combinedCRC);
680		bsFinishWrite(s);
681	}
682}
683
684
685/*-------------------------------------------------------------*/
686/*--- end                                        compress.c ---*/
687/*-------------------------------------------------------------*/
688