inflate.c revision 30309
1/* 2 * Most parts of this file are not covered by: 3 * ---------------------------------------------------------------------------- 4 * "THE BEER-WARE LICENSE" (Revision 42): 5 * <phk@login.dknet.dk> wrote this file. As long as you retain this notice you 6 * can do whatever you want with this stuff. If we meet some day, and you think 7 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 8 * ---------------------------------------------------------------------------- 9 * 10 * $Id: inflate.c,v 1.9 1997/08/02 14:31:25 bde Exp $ 11 * 12 * 13 */ 14 15#include <sys/param.h> 16#include <sys/inflate.h> 17#ifdef KERNEL 18#include <sys/systm.h> 19#endif 20#include <sys/malloc.h> 21 22MALLOC_DEFINE(M_GZIP, "Gzip trees", "Gzip trees"); 23 24/* needed to make inflate() work */ 25#define uch u_char 26#define ush u_short 27#define ulg u_long 28 29/* Stuff to make inflate() work */ 30#ifdef KERNEL 31#define memzero(dest,len) bzero(dest,len) 32#endif 33#define NOMEMCPY 34#ifdef KERNEL 35#define FPRINTF printf 36#else 37extern void putstr (char *); 38#define FPRINTF putstr 39#endif 40 41#define FLUSH(x,y) { \ 42 int foo = (*x->gz_output)(x->gz_private,x->gz_slide,y); \ 43 if (foo) \ 44 return foo; \ 45 } 46 47static const int qflag = 0; 48 49#ifndef KERNEL /* want to use this file in kzip also */ 50extern unsigned char *malloc (int, int, int); 51extern void free (void*, int); 52#endif 53 54/* 55 * This came from unzip-5.12. I have changed it the flow to pass 56 * a structure pointer around, thus hopefully making it re-entrant. 57 * Poul-Henning 58 */ 59 60/* inflate.c -- put in the public domain by Mark Adler 61 version c14o, 23 August 1994 */ 62 63/* You can do whatever you like with this source file, though I would 64 prefer that if you modify it and redistribute it that you include 65 comments to that effect with your name and the date. Thank you. 66 67 History: 68 vers date who what 69 ---- --------- -------------- ------------------------------------ 70 a ~~ Feb 92 M. Adler used full (large, one-step) lookup table 71 b1 21 Mar 92 M. Adler first version with partial lookup tables 72 b2 21 Mar 92 M. Adler fixed bug in fixed-code blocks 73 b3 22 Mar 92 M. Adler sped up match copies, cleaned up some 74 b4 25 Mar 92 M. Adler added prototypes; removed window[] (now 75 is the responsibility of unzip.h--also 76 changed name to slide[]), so needs diffs 77 for unzip.c and unzip.h (this allows 78 compiling in the small model on MSDOS); 79 fixed cast of q in huft_build(); 80 b5 26 Mar 92 M. Adler got rid of unintended macro recursion. 81 b6 27 Mar 92 M. Adler got rid of nextbyte() routine. fixed 82 bug in inflate_fixed(). 83 c1 30 Mar 92 M. Adler removed lbits, dbits environment variables. 84 changed BMAX to 16 for explode. Removed 85 OUTB usage, and replaced it with flush()-- 86 this was a 20% speed improvement! Added 87 an explode.c (to replace unimplod.c) that 88 uses the huft routines here. Removed 89 register union. 90 c2 4 Apr 92 M. Adler fixed bug for file sizes a multiple of 32k. 91 c3 10 Apr 92 M. Adler reduced memory of code tables made by 92 huft_build significantly (factor of two to 93 three). 94 c4 15 Apr 92 M. Adler added NOMEMCPY do kill use of memcpy(). 95 worked around a Turbo C optimization bug. 96 c5 21 Apr 92 M. Adler added the GZ_WSIZE #define to allow reducing 97 the 32K window size for specialized 98 applications. 99 c6 31 May 92 M. Adler added some typecasts to eliminate warnings 100 c7 27 Jun 92 G. Roelofs added some more typecasts (444: MSC bug). 101 c8 5 Oct 92 J-l. Gailly added ifdef'd code to deal with PKZIP bug. 102 c9 9 Oct 92 M. Adler removed a memory error message (~line 416). 103 c10 17 Oct 92 G. Roelofs changed ULONG/UWORD/byte to ulg/ush/uch, 104 removed old inflate, renamed inflate_entry 105 to inflate, added Mark's fix to a comment. 106 c10.5 14 Dec 92 M. Adler fix up error messages for incomplete trees. 107 c11 2 Jan 93 M. Adler fixed bug in detection of incomplete 108 tables, and removed assumption that EOB is 109 the longest code (bad assumption). 110 c12 3 Jan 93 M. Adler make tables for fixed blocks only once. 111 c13 5 Jan 93 M. Adler allow all zero length codes (pkzip 2.04c 112 outputs one zero length code for an empty 113 distance tree). 114 c14 12 Mar 93 M. Adler made inflate.c standalone with the 115 introduction of inflate.h. 116 c14b 16 Jul 93 G. Roelofs added (unsigned) typecast to w at 470. 117 c14c 19 Jul 93 J. Bush changed v[N_MAX], l[288], ll[28x+3x] arrays 118 to static for Amiga. 119 c14d 13 Aug 93 J-l. Gailly de-complicatified Mark's c[*p++]++ thing. 120 c14e 8 Oct 93 G. Roelofs changed memset() to memzero(). 121 c14f 22 Oct 93 G. Roelofs renamed quietflg to qflag; made Trace() 122 conditional; added inflate_free(). 123 c14g 28 Oct 93 G. Roelofs changed l/(lx+1) macro to pointer (Cray bug) 124 c14h 7 Dec 93 C. Ghisler huft_build() optimizations. 125 c14i 9 Jan 94 A. Verheijen set fixed_t{d,l} to NULL after freeing; 126 G. Roelofs check NEXTBYTE macro for GZ_EOF. 127 c14j 23 Jan 94 G. Roelofs removed Ghisler "optimizations"; ifdef'd 128 GZ_EOF check. 129 c14k 27 Feb 94 G. Roelofs added some typecasts to avoid warnings. 130 c14l 9 Apr 94 G. Roelofs fixed split comments on preprocessor lines 131 to avoid bug in Encore compiler. 132 c14m 7 Jul 94 P. Kienitz modified to allow assembler version of 133 inflate_codes() (define ASM_INFLATECODES) 134 c14n 22 Jul 94 G. Roelofs changed fprintf to FPRINTF for DLL versions 135 c14o 23 Aug 94 C. Spieler added a newline to a debug statement; 136 G. Roelofs added another typecast to avoid MSC warning 137 */ 138 139 140/* 141 Inflate deflated (PKZIP's method 8 compressed) data. The compression 142 method searches for as much of the current string of bytes (up to a 143 length of 258) in the previous 32K bytes. If it doesn't find any 144 matches (of at least length 3), it codes the next byte. Otherwise, it 145 codes the length of the matched string and its distance backwards from 146 the current position. There is a single Huffman code that codes both 147 single bytes (called "literals") and match lengths. A second Huffman 148 code codes the distance information, which follows a length code. Each 149 length or distance code actually represents a base value and a number 150 of "extra" (sometimes zero) bits to get to add to the base value. At 151 the end of each deflated block is a special end-of-block (EOB) literal/ 152 length code. The decoding process is basically: get a literal/length 153 code; if EOB then done; if a literal, emit the decoded byte; if a 154 length then get the distance and emit the referred-to bytes from the 155 sliding window of previously emitted data. 156 157 There are (currently) three kinds of inflate blocks: stored, fixed, and 158 dynamic. The compressor outputs a chunk of data at a time and decides 159 which method to use on a chunk-by-chunk basis. A chunk might typically 160 be 32K to 64K, uncompressed. If the chunk is uncompressible, then the 161 "stored" method is used. In this case, the bytes are simply stored as 162 is, eight bits per byte, with none of the above coding. The bytes are 163 preceded by a count, since there is no longer an EOB code. 164 165 If the data is compressible, then either the fixed or dynamic methods 166 are used. In the dynamic method, the compressed data is preceded by 167 an encoding of the literal/length and distance Huffman codes that are 168 to be used to decode this block. The representation is itself Huffman 169 coded, and so is preceded by a description of that code. These code 170 descriptions take up a little space, and so for small blocks, there is 171 a predefined set of codes, called the fixed codes. The fixed method is 172 used if the block ends up smaller that way (usually for quite small 173 chunks); otherwise the dynamic method is used. In the latter case, the 174 codes are customized to the probabilities in the current block and so 175 can code it much better than the pre-determined fixed codes can. 176 177 The Huffman codes themselves are decoded using a mutli-level table 178 lookup, in order to maximize the speed of decoding plus the speed of 179 building the decoding tables. See the comments below that precede the 180 lbits and dbits tuning parameters. 181 */ 182 183 184/* 185 Notes beyond the 1.93a appnote.txt: 186 187 1. Distance pointers never point before the beginning of the output 188 stream. 189 2. Distance pointers can point back across blocks, up to 32k away. 190 3. There is an implied maximum of 7 bits for the bit length table and 191 15 bits for the actual data. 192 4. If only one code exists, then it is encoded using one bit. (Zero 193 would be more efficient, but perhaps a little confusing.) If two 194 codes exist, they are coded using one bit each (0 and 1). 195 5. There is no way of sending zero distance codes--a dummy must be 196 sent if there are none. (History: a pre 2.0 version of PKZIP would 197 store blocks with no distance codes, but this was discovered to be 198 too harsh a criterion.) Valid only for 1.93a. 2.04c does allow 199 zero distance codes, which is sent as one code of zero bits in 200 length. 201 6. There are up to 286 literal/length codes. Code 256 represents the 202 end-of-block. Note however that the static length tree defines 203 288 codes just to fill out the Huffman codes. Codes 286 and 287 204 cannot be used though, since there is no length base or extra bits 205 defined for them. Similarily, there are up to 30 distance codes. 206 However, static trees define 32 codes (all 5 bits) to fill out the 207 Huffman codes, but the last two had better not show up in the data. 208 7. Unzip can check dynamic Huffman blocks for complete code sets. 209 The exception is that a single code would not be complete (see #4). 210 8. The five bits following the block type is really the number of 211 literal codes sent minus 257. 212 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits 213 (1+6+6). Therefore, to output three times the length, you output 214 three codes (1+1+1), whereas to output four times the same length, 215 you only need two codes (1+3). Hmm. 216 10. In the tree reconstruction algorithm, Code = Code + Increment 217 only if BitLength(i) is not zero. (Pretty obvious.) 218 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) 219 12. Note: length code 284 can represent 227-258, but length code 285 220 really is 258. The last length deserves its own, short code 221 since it gets used a lot in very redundant files. The length 222 258 is special since 258 - 3 (the min match length) is 255. 223 13. The literal/length and distance code bit lengths are read as a 224 single stream of lengths. It is possible (and advantageous) for 225 a repeat code (16, 17, or 18) to go across the boundary between 226 the two sets of lengths. 227 */ 228 229 230#define PKZIP_BUG_WORKAROUND /* PKZIP 1.93a problem--live with it */ 231 232/* 233 inflate.h must supply the uch slide[GZ_WSIZE] array and the NEXTBYTE, 234 FLUSH() and memzero macros. If the window size is not 32K, it 235 should also define GZ_WSIZE. If INFMOD is defined, it can include 236 compiled functions to support the NEXTBYTE and/or FLUSH() macros. 237 There are defaults for NEXTBYTE and FLUSH() below for use as 238 examples of what those functions need to do. Normally, you would 239 also want FLUSH() to compute a crc on the data. inflate.h also 240 needs to provide these typedefs: 241 242 typedef unsigned char uch; 243 typedef unsigned short ush; 244 typedef unsigned long ulg; 245 246 This module uses the external functions malloc() and free() (and 247 probably memset() or bzero() in the memzero() macro). Their 248 prototypes are normally found in <string.h> and <stdlib.h>. 249 */ 250#define INFMOD /* tell inflate.h to include code to be 251 * compiled */ 252 253/* Huffman code lookup table entry--this entry is four bytes for machines 254 that have 16-bit pointers (e.g. PC's in the small or medium model). 255 Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 256 means that v is a literal, 16 < e < 32 means that v is a pointer to 257 the next table, which codes e - 16 bits, and lastly e == 99 indicates 258 an unused code. If a code with e == 99 is looked up, this implies an 259 error in the data. */ 260struct huft { 261 uch e; /* number of extra bits or operation */ 262 uch b; /* number of bits in this code or subcode */ 263 union { 264 ush n; /* literal, length base, or distance 265 * base */ 266 struct huft *t; /* pointer to next level of table */ 267 } v; 268}; 269 270 271/* Function prototypes */ 272static int huft_build __P((struct inflate *, unsigned *, unsigned, unsigned, const ush *, const ush *, struct huft **, int *)); 273static int huft_free __P((struct inflate *, struct huft *)); 274static int inflate_codes __P((struct inflate *, struct huft *, struct huft *, int, int)); 275static int inflate_stored __P((struct inflate *)); 276static int xinflate __P((struct inflate *)); 277static int inflate_fixed __P((struct inflate *)); 278static int inflate_dynamic __P((struct inflate *)); 279static int inflate_block __P((struct inflate *, int *)); 280 281/* The inflate algorithm uses a sliding 32K byte window on the uncompressed 282 stream to find repeated byte strings. This is implemented here as a 283 circular buffer. The index is updated simply by incrementing and then 284 and'ing with 0x7fff (32K-1). */ 285/* It is left to other modules to supply the 32K area. It is assumed 286 to be usable as if it were declared "uch slide[32768];" or as just 287 "uch *slide;" and then malloc'ed in the latter case. The definition 288 must be in unzip.h, included above. */ 289 290 291/* Tables for deflate from PKZIP's appnote.txt. */ 292 293/* Order of the bit length code lengths */ 294static const unsigned border[] = { 295 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; 296 297static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */ 298 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 299 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; 300 /* note: see note #13 above about the 258 in this list. */ 301 302static const ush cplext[] = { /* Extra bits for literal codes 257..285 */ 303 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 304 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ 305 306static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ 307 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 308 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 309 8193, 12289, 16385, 24577}; 310 311static const ush cpdext[] = { /* Extra bits for distance codes */ 312 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 313 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 314 12, 12, 13, 13}; 315 316/* And'ing with mask[n] masks the lower n bits */ 317static const ush mask[] = { 318 0x0000, 319 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, 320 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff 321}; 322 323 324/* Macros for inflate() bit peeking and grabbing. 325 The usage is: 326 327 NEEDBITS(glbl,j) 328 x = b & mask[j]; 329 DUMPBITS(j) 330 331 where NEEDBITS makes sure that b has at least j bits in it, and 332 DUMPBITS removes the bits from b. The macros use the variable k 333 for the number of bits in b. Normally, b and k are register 334 variables for speed, and are initialized at the begining of a 335 routine that uses these macros from a global bit buffer and count. 336 337 In order to not ask for more bits than there are in the compressed 338 stream, the Huffman tables are constructed to only ask for just 339 enough bits to make up the end-of-block code (value 256). Then no 340 bytes need to be "returned" to the buffer at the end of the last 341 block. See the huft_build() routine. 342 */ 343 344/* 345 * The following 2 were global variables. 346 * They are now fields of the inflate structure. 347 */ 348 349#define NEEDBITS(glbl,n) { \ 350 while(k<(n)) { \ 351 int c=(*glbl->gz_input)(glbl->gz_private); \ 352 if(c==GZ_EOF) \ 353 return 1; \ 354 b|=((ulg)c)<<k; \ 355 k+=8; \ 356 } \ 357 } 358 359#define DUMPBITS(n) {b>>=(n);k-=(n);} 360 361/* 362 Huffman code decoding is performed using a multi-level table lookup. 363 The fastest way to decode is to simply build a lookup table whose 364 size is determined by the longest code. However, the time it takes 365 to build this table can also be a factor if the data being decoded 366 is not very long. The most common codes are necessarily the 367 shortest codes, so those codes dominate the decoding time, and hence 368 the speed. The idea is you can have a shorter table that decodes the 369 shorter, more probable codes, and then point to subsidiary tables for 370 the longer codes. The time it costs to decode the longer codes is 371 then traded against the time it takes to make longer tables. 372 373 This results of this trade are in the variables lbits and dbits 374 below. lbits is the number of bits the first level table for literal/ 375 length codes can decode in one step, and dbits is the same thing for 376 the distance codes. Subsequent tables are also less than or equal to 377 those sizes. These values may be adjusted either when all of the 378 codes are shorter than that, in which case the longest code length in 379 bits is used, or when the shortest code is *longer* than the requested 380 table size, in which case the length of the shortest code in bits is 381 used. 382 383 There are two different values for the two tables, since they code a 384 different number of possibilities each. The literal/length table 385 codes 286 possible values, or in a flat code, a little over eight 386 bits. The distance table codes 30 possible values, or a little less 387 than five bits, flat. The optimum values for speed end up being 388 about one bit more than those, so lbits is 8+1 and dbits is 5+1. 389 The optimum values may differ though from machine to machine, and 390 possibly even between compilers. Your mileage may vary. 391 */ 392 393static const int lbits = 9; /* bits in base literal/length lookup table */ 394static const int dbits = 6; /* bits in base distance lookup table */ 395 396 397/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ 398#define BMAX 16 /* maximum bit length of any code (16 for 399 * explode) */ 400#define N_MAX 288 /* maximum number of codes in any set */ 401 402/* Given a list of code lengths and a maximum table size, make a set of 403 tables to decode that set of codes. Return zero on success, one if 404 the given code set is incomplete (the tables are still built in this 405 case), two if the input is invalid (all zero length codes or an 406 oversubscribed set of lengths), and three if not enough memory. 407 The code with value 256 is special, and the tables are constructed 408 so that no bits beyond that code are fetched when that code is 409 decoded. */ 410static int 411huft_build(glbl, b, n, s, d, e, t, m) 412 struct inflate *glbl; 413 unsigned *b; /* code lengths in bits (all assumed <= BMAX) */ 414 unsigned n; /* number of codes (assumed <= N_MAX) */ 415 unsigned s; /* number of simple-valued codes (0..s-1) */ 416 const ush *d; /* list of base values for non-simple codes */ 417 const ush *e; /* list of extra bits for non-simple codes */ 418 struct huft **t; /* result: starting table */ 419 int *m; /* maximum lookup bits, returns actual */ 420{ 421 unsigned a; /* counter for codes of length k */ 422 unsigned c[BMAX + 1]; /* bit length count table */ 423 unsigned el; /* length of EOB code (value 256) */ 424 unsigned f; /* i repeats in table every f entries */ 425 int g; /* maximum code length */ 426 int h; /* table level */ 427 register unsigned i; /* counter, current code */ 428 register unsigned j; /* counter */ 429 register int k; /* number of bits in current code */ 430 int lx[BMAX + 1]; /* memory for l[-1..BMAX-1] */ 431 int *l = lx + 1; /* stack of bits per table */ 432 register unsigned *p; /* pointer into c[], b[], or v[] */ 433 register struct huft *q;/* points to current table */ 434 struct huft r; /* table entry for structure assignment */ 435 struct huft *u[BMAX];/* table stack */ 436 unsigned v[N_MAX]; /* values in order of bit length */ 437 register int w; /* bits before this table == (l * h) */ 438 unsigned x[BMAX + 1]; /* bit offsets, then code stack */ 439 unsigned *xp; /* pointer into x */ 440 int y; /* number of dummy codes added */ 441 unsigned z; /* number of entries in current table */ 442 443 /* Generate counts for each bit length */ 444 el = n > 256 ? b[256] : BMAX; /* set length of EOB code, if any */ 445#ifdef KERNEL 446 memzero((char *) c, sizeof(c)); 447#else 448 for (i = 0; i < BMAX+1; i++) 449 c [i] = 0; 450#endif 451 p = b; 452 i = n; 453 do { 454 c[*p]++; 455 p++; /* assume all entries <= BMAX */ 456 } while (--i); 457 if (c[0] == n) { /* null input--all zero length codes */ 458 *t = (struct huft *) NULL; 459 *m = 0; 460 return 0; 461 } 462 /* Find minimum and maximum length, bound *m by those */ 463 for (j = 1; j <= BMAX; j++) 464 if (c[j]) 465 break; 466 k = j; /* minimum code length */ 467 if ((unsigned) *m < j) 468 *m = j; 469 for (i = BMAX; i; i--) 470 if (c[i]) 471 break; 472 g = i; /* maximum code length */ 473 if ((unsigned) *m > i) 474 *m = i; 475 476 /* Adjust last length count to fill out codes, if needed */ 477 for (y = 1 << j; j < i; j++, y <<= 1) 478 if ((y -= c[j]) < 0) 479 return 2; /* bad input: more codes than bits */ 480 if ((y -= c[i]) < 0) 481 return 2; 482 c[i] += y; 483 484 /* Generate starting offsets into the value table for each length */ 485 x[1] = j = 0; 486 p = c + 1; 487 xp = x + 2; 488 while (--i) { /* note that i == g from above */ 489 *xp++ = (j += *p++); 490 } 491 492 /* Make a table of values in order of bit lengths */ 493 p = b; 494 i = 0; 495 do { 496 if ((j = *p++) != 0) 497 v[x[j]++] = i; 498 } while (++i < n); 499 500 /* Generate the Huffman codes and for each, make the table entries */ 501 x[0] = i = 0; /* first Huffman code is zero */ 502 p = v; /* grab values in bit order */ 503 h = -1; /* no tables yet--level -1 */ 504 w = l[-1] = 0; /* no bits decoded yet */ 505 u[0] = (struct huft *) NULL; /* just to keep compilers happy */ 506 q = (struct huft *) NULL; /* ditto */ 507 z = 0; /* ditto */ 508 509 /* go through the bit lengths (k already is bits in shortest code) */ 510 for (; k <= g; k++) { 511 a = c[k]; 512 while (a--) { 513 /* 514 * here i is the Huffman code of length k bits for 515 * value *p 516 */ 517 /* make tables up to required level */ 518 while (k > w + l[h]) { 519 w += l[h++]; /* add bits already decoded */ 520 521 /* 522 * compute minimum size table less than or 523 * equal to *m bits 524 */ 525 z = (z = g - w) > (unsigned) *m ? *m : z; /* upper limit */ 526 if ((f = 1 << (j = k - w)) > a + 1) { /* try a k-w bit table *//* t 527 * oo few codes for k-w 528 * bit table */ 529 f -= a + 1; /* deduct codes from 530 * patterns left */ 531 xp = c + k; 532 while (++j < z) { /* try smaller tables up 533 * to z bits */ 534 if ((f <<= 1) <= *++xp) 535 break; /* enough codes to use 536 * up j bits */ 537 f -= *xp; /* else deduct codes 538 * from patterns */ 539 } 540 } 541 if ((unsigned) w + j > el && (unsigned) w < el) 542 j = el - w; /* make EOB code end at 543 * table */ 544 z = 1 << j; /* table entries for j-bit 545 * table */ 546 l[h] = j; /* set table size in stack */ 547 548 /* allocate and link in new table */ 549 if ((q = (struct huft *) malloc((z + 1) * sizeof(struct huft), M_GZIP, M_WAITOK)) == 550 (struct huft *) NULL) { 551 if (h) 552 huft_free(glbl, u[0]); 553 return 3; /* not enough memory */ 554 } 555 glbl->gz_hufts += z + 1; /* track memory usage */ 556 *t = q + 1; /* link to list for 557 * huft_free() */ 558 *(t = &(q->v.t)) = (struct huft *) NULL; 559 u[h] = ++q; /* table starts after link */ 560 561 /* connect to last table, if there is one */ 562 if (h) { 563 x[h] = i; /* save pattern for 564 * backing up */ 565 r.b = (uch) l[h - 1]; /* bits to dump before 566 * this table */ 567 r.e = (uch) (16 + j); /* bits in this table */ 568 r.v.t = q; /* pointer to this table */ 569 j = (i & ((1 << w) - 1)) >> (w - l[h - 1]); 570 u[h - 1][j] = r; /* connect to last table */ 571 } 572 } 573 574 /* set up table entry in r */ 575 r.b = (uch) (k - w); 576 if (p >= v + n) 577 r.e = 99; /* out of values--invalid 578 * code */ 579 else if (*p < s) { 580 r.e = (uch) (*p < 256 ? 16 : 15); /* 256 is end-of-block 581 * code */ 582 r.v.n = *p++; /* simple code is just the 583 * value */ 584 } else { 585 r.e = (uch) e[*p - s]; /* non-simple--look up 586 * in lists */ 587 r.v.n = d[*p++ - s]; 588 } 589 590 /* fill code-like entries with r */ 591 f = 1 << (k - w); 592 for (j = i >> w; j < z; j += f) 593 q[j] = r; 594 595 /* backwards increment the k-bit code i */ 596 for (j = 1 << (k - 1); i & j; j >>= 1) 597 i ^= j; 598 i ^= j; 599 600 /* backup over finished tables */ 601 while ((i & ((1 << w) - 1)) != x[h]) 602 w -= l[--h]; /* don't need to update q */ 603 } 604 } 605 606 /* return actual size of base table */ 607 *m = l[0]; 608 609 /* Return true (1) if we were given an incomplete table */ 610 return y != 0 && g != 1; 611} 612 613static int 614huft_free(glbl, t) 615 struct inflate *glbl; 616 struct huft *t; /* table to free */ 617/* Free the malloc'ed tables built by huft_build(), which makes a linked 618 list of the tables it made, with the links in a dummy first entry of 619 each table. */ 620{ 621 register struct huft *p, *q; 622 623 /* Go through linked list, freeing from the malloced (t[-1]) address. */ 624 p = t; 625 while (p != (struct huft *) NULL) { 626 q = (--p)->v.t; 627 free(p, M_GZIP); 628 p = q; 629 } 630 return 0; 631} 632 633/* inflate (decompress) the codes in a deflated (compressed) block. 634 Return an error code or zero if it all goes ok. */ 635static int 636inflate_codes(glbl, tl, td, bl, bd) 637 struct inflate *glbl; 638 struct huft *tl, *td;/* literal/length and distance decoder tables */ 639 int bl, bd; /* number of bits decoded by tl[] and td[] */ 640{ 641 register unsigned e; /* table entry flag/number of extra bits */ 642 unsigned n, d; /* length and index for copy */ 643 unsigned w; /* current window position */ 644 struct huft *t; /* pointer to table entry */ 645 unsigned ml, md; /* masks for bl and bd bits */ 646 register ulg b; /* bit buffer */ 647 register unsigned k; /* number of bits in bit buffer */ 648 649 /* make local copies of globals */ 650 b = glbl->gz_bb; /* initialize bit buffer */ 651 k = glbl->gz_bk; 652 w = glbl->gz_wp; /* initialize window position */ 653 654 /* inflate the coded data */ 655 ml = mask[bl]; /* precompute masks for speed */ 656 md = mask[bd]; 657 while (1) { /* do until end of block */ 658 NEEDBITS(glbl, (unsigned) bl) 659 if ((e = (t = tl + ((unsigned) b & ml))->e) > 16) 660 do { 661 if (e == 99) 662 return 1; 663 DUMPBITS(t->b) 664 e -= 16; 665 NEEDBITS(glbl, e) 666 } while ((e = (t = t->v.t + ((unsigned) b & mask[e]))->e) > 16); 667 DUMPBITS(t->b) 668 if (e == 16) { /* then it's a literal */ 669 glbl->gz_slide[w++] = (uch) t->v.n; 670 if (w == GZ_WSIZE) { 671 FLUSH(glbl, w); 672 w = 0; 673 } 674 } else { /* it's an EOB or a length */ 675 /* exit if end of block */ 676 if (e == 15) 677 break; 678 679 /* get length of block to copy */ 680 NEEDBITS(glbl, e) 681 n = t->v.n + ((unsigned) b & mask[e]); 682 DUMPBITS(e); 683 684 /* decode distance of block to copy */ 685 NEEDBITS(glbl, (unsigned) bd) 686 if ((e = (t = td + ((unsigned) b & md))->e) > 16) 687 do { 688 if (e == 99) 689 return 1; 690 DUMPBITS(t->b) 691 e -= 16; 692 NEEDBITS(glbl, e) 693 } while ((e = (t = t->v.t + ((unsigned) b & mask[e]))->e) > 16); 694 DUMPBITS(t->b) 695 NEEDBITS(glbl, e) 696 d = w - t->v.n - ((unsigned) b & mask[e]); 697 DUMPBITS(e) 698 /* do the copy */ 699 do { 700 n -= (e = (e = GZ_WSIZE - ((d &= GZ_WSIZE - 1) > w ? d : w)) > n ? n : e); 701#ifndef NOMEMCPY 702 if (w - d >= e) { /* (this test assumes 703 * unsigned comparison) */ 704 memcpy(glbl->gz_slide + w, glbl->gz_slide + d, e); 705 w += e; 706 d += e; 707 } else /* do it slow to avoid memcpy() 708 * overlap */ 709#endif /* !NOMEMCPY */ 710 do { 711 glbl->gz_slide[w++] = glbl->gz_slide[d++]; 712 } while (--e); 713 if (w == GZ_WSIZE) { 714 FLUSH(glbl, w); 715 w = 0; 716 } 717 } while (n); 718 } 719 } 720 721 /* restore the globals from the locals */ 722 glbl->gz_wp = w; /* restore global window pointer */ 723 glbl->gz_bb = b; /* restore global bit buffer */ 724 glbl->gz_bk = k; 725 726 /* done */ 727 return 0; 728} 729 730/* "decompress" an inflated type 0 (stored) block. */ 731static int 732inflate_stored(glbl) 733 struct inflate *glbl; 734{ 735 unsigned n; /* number of bytes in block */ 736 unsigned w; /* current window position */ 737 register ulg b; /* bit buffer */ 738 register unsigned k; /* number of bits in bit buffer */ 739 740 /* make local copies of globals */ 741 b = glbl->gz_bb; /* initialize bit buffer */ 742 k = glbl->gz_bk; 743 w = glbl->gz_wp; /* initialize window position */ 744 745 /* go to byte boundary */ 746 n = k & 7; 747 DUMPBITS(n); 748 749 /* get the length and its complement */ 750 NEEDBITS(glbl, 16) 751 n = ((unsigned) b & 0xffff); 752 DUMPBITS(16) 753 NEEDBITS(glbl, 16) 754 if (n != (unsigned) ((~b) & 0xffff)) 755 return 1; /* error in compressed data */ 756 DUMPBITS(16) 757 /* read and output the compressed data */ 758 while (n--) { 759 NEEDBITS(glbl, 8) 760 glbl->gz_slide[w++] = (uch) b; 761 if (w == GZ_WSIZE) { 762 FLUSH(glbl, w); 763 w = 0; 764 } 765 DUMPBITS(8) 766 } 767 768 /* restore the globals from the locals */ 769 glbl->gz_wp = w; /* restore global window pointer */ 770 glbl->gz_bb = b; /* restore global bit buffer */ 771 glbl->gz_bk = k; 772 return 0; 773} 774 775/* decompress an inflated type 1 (fixed Huffman codes) block. We should 776 either replace this with a custom decoder, or at least precompute the 777 Huffman tables. */ 778static int 779inflate_fixed(glbl) 780 struct inflate *glbl; 781{ 782 /* if first time, set up tables for fixed blocks */ 783 if (glbl->gz_fixed_tl == (struct huft *) NULL) { 784 int i; /* temporary variable */ 785 static unsigned l[288]; /* length list for huft_build */ 786 787 /* literal table */ 788 for (i = 0; i < 144; i++) 789 l[i] = 8; 790 for (; i < 256; i++) 791 l[i] = 9; 792 for (; i < 280; i++) 793 l[i] = 7; 794 for (; i < 288; i++) /* make a complete, but wrong code 795 * set */ 796 l[i] = 8; 797 glbl->gz_fixed_bl = 7; 798 if ((i = huft_build(glbl, l, 288, 257, cplens, cplext, 799 &glbl->gz_fixed_tl, &glbl->gz_fixed_bl)) != 0) { 800 glbl->gz_fixed_tl = (struct huft *) NULL; 801 return i; 802 } 803 /* distance table */ 804 for (i = 0; i < 30; i++) /* make an incomplete code 805 * set */ 806 l[i] = 5; 807 glbl->gz_fixed_bd = 5; 808 if ((i = huft_build(glbl, l, 30, 0, cpdist, cpdext, 809 &glbl->gz_fixed_td, &glbl->gz_fixed_bd)) > 1) { 810 huft_free(glbl, glbl->gz_fixed_tl); 811 glbl->gz_fixed_tl = (struct huft *) NULL; 812 return i; 813 } 814 } 815 /* decompress until an end-of-block code */ 816 return inflate_codes(glbl, glbl->gz_fixed_tl, glbl->gz_fixed_td, glbl->gz_fixed_bl, glbl->gz_fixed_bd) != 0; 817} 818 819/* decompress an inflated type 2 (dynamic Huffman codes) block. */ 820static int 821inflate_dynamic(glbl) 822 struct inflate *glbl; 823{ 824 int i; /* temporary variables */ 825 unsigned j; 826 unsigned l; /* last length */ 827 unsigned m; /* mask for bit lengths table */ 828 unsigned n; /* number of lengths to get */ 829 struct huft *tl; /* literal/length code table */ 830 struct huft *td; /* distance code table */ 831 int bl; /* lookup bits for tl */ 832 int bd; /* lookup bits for td */ 833 unsigned nb; /* number of bit length codes */ 834 unsigned nl; /* number of literal/length codes */ 835 unsigned nd; /* number of distance codes */ 836#ifdef PKZIP_BUG_WORKAROUND 837 unsigned ll[288 + 32]; /* literal/length and distance code 838 * lengths */ 839#else 840 unsigned ll[286 + 30]; /* literal/length and distance code 841 * lengths */ 842#endif 843 register ulg b; /* bit buffer */ 844 register unsigned k; /* number of bits in bit buffer */ 845 846 /* make local bit buffer */ 847 b = glbl->gz_bb; 848 k = glbl->gz_bk; 849 850 /* read in table lengths */ 851 NEEDBITS(glbl, 5) 852 nl = 257 + ((unsigned) b & 0x1f); /* number of 853 * literal/length codes */ 854 DUMPBITS(5) 855 NEEDBITS(glbl, 5) 856 nd = 1 + ((unsigned) b & 0x1f); /* number of distance codes */ 857 DUMPBITS(5) 858 NEEDBITS(glbl, 4) 859 nb = 4 + ((unsigned) b & 0xf); /* number of bit length codes */ 860 DUMPBITS(4) 861#ifdef PKZIP_BUG_WORKAROUND 862 if (nl > 288 || nd > 32) 863#else 864 if (nl > 286 || nd > 30) 865#endif 866 return 1; /* bad lengths */ 867 /* read in bit-length-code lengths */ 868 for (j = 0; j < nb; j++) { 869 NEEDBITS(glbl, 3) 870 ll[border[j]] = (unsigned) b & 7; 871 DUMPBITS(3) 872 } 873 for (; j < 19; j++) 874 ll[border[j]] = 0; 875 876 /* build decoding table for trees--single level, 7 bit lookup */ 877 bl = 7; 878 if ((i = huft_build(glbl, ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) { 879 if (i == 1) 880 huft_free(glbl, tl); 881 return i; /* incomplete code set */ 882 } 883 /* read in literal and distance code lengths */ 884 n = nl + nd; 885 m = mask[bl]; 886 i = l = 0; 887 while ((unsigned) i < n) { 888 NEEDBITS(glbl, (unsigned) bl) 889 j = (td = tl + ((unsigned) b & m))->b; 890 DUMPBITS(j) 891 j = td->v.n; 892 if (j < 16) /* length of code in bits (0..15) */ 893 ll[i++] = l = j; /* save last length in l */ 894 else if (j == 16) { /* repeat last length 3 to 6 times */ 895 NEEDBITS(glbl, 2) 896 j = 3 + ((unsigned) b & 3); 897 DUMPBITS(2) 898 if ((unsigned) i + j > n) 899 return 1; 900 while (j--) 901 ll[i++] = l; 902 } else if (j == 17) { /* 3 to 10 zero length codes */ 903 NEEDBITS(glbl, 3) 904 j = 3 + ((unsigned) b & 7); 905 DUMPBITS(3) 906 if ((unsigned) i + j > n) 907 return 1; 908 while (j--) 909 ll[i++] = 0; 910 l = 0; 911 } else { /* j == 18: 11 to 138 zero length codes */ 912 NEEDBITS(glbl, 7) 913 j = 11 + ((unsigned) b & 0x7f); 914 DUMPBITS(7) 915 if ((unsigned) i + j > n) 916 return 1; 917 while (j--) 918 ll[i++] = 0; 919 l = 0; 920 } 921 } 922 923 /* free decoding table for trees */ 924 huft_free(glbl, tl); 925 926 /* restore the global bit buffer */ 927 glbl->gz_bb = b; 928 glbl->gz_bk = k; 929 930 /* build the decoding tables for literal/length and distance codes */ 931 bl = lbits; 932 i = huft_build(glbl, ll, nl, 257, cplens, cplext, &tl, &bl); 933 if (i != 0) { 934 if (i == 1 && !qflag) { 935 FPRINTF("(incomplete l-tree) "); 936 huft_free(glbl, tl); 937 } 938 return i; /* incomplete code set */ 939 } 940 bd = dbits; 941 i = huft_build(glbl, ll + nl, nd, 0, cpdist, cpdext, &td, &bd); 942 if (i != 0) { 943 if (i == 1 && !qflag) { 944 FPRINTF("(incomplete d-tree) "); 945#ifdef PKZIP_BUG_WORKAROUND 946 i = 0; 947 } 948#else 949 huft_free(glbl, td); 950 } 951 huft_free(glbl, tl); 952 return i; /* incomplete code set */ 953#endif 954 } 955 /* decompress until an end-of-block code */ 956 if (inflate_codes(glbl, tl, td, bl, bd)) 957 return 1; 958 959 /* free the decoding tables, return */ 960 huft_free(glbl, tl); 961 huft_free(glbl, td); 962 return 0; 963} 964 965/* decompress an inflated block */ 966static int 967inflate_block(glbl, e) 968 struct inflate *glbl; 969 int *e; /* last block flag */ 970{ 971 unsigned t; /* block type */ 972 register ulg b; /* bit buffer */ 973 register unsigned k; /* number of bits in bit buffer */ 974 975 /* make local bit buffer */ 976 b = glbl->gz_bb; 977 k = glbl->gz_bk; 978 979 /* read in last block bit */ 980 NEEDBITS(glbl, 1) 981 * e = (int) b & 1; 982 DUMPBITS(1) 983 /* read in block type */ 984 NEEDBITS(glbl, 2) 985 t = (unsigned) b & 3; 986 DUMPBITS(2) 987 /* restore the global bit buffer */ 988 glbl->gz_bb = b; 989 glbl->gz_bk = k; 990 991 /* inflate that block type */ 992 if (t == 2) 993 return inflate_dynamic(glbl); 994 if (t == 0) 995 return inflate_stored(glbl); 996 if (t == 1) 997 return inflate_fixed(glbl); 998 /* bad block type */ 999 return 2; 1000} 1001 1002 1003 1004/* decompress an inflated entry */ 1005static int 1006xinflate(glbl) 1007 struct inflate *glbl; 1008{ 1009 int e; /* last block flag */ 1010 int r; /* result code */ 1011 unsigned h; /* maximum struct huft's malloc'ed */ 1012 1013 glbl->gz_fixed_tl = (struct huft *) NULL; 1014 1015 /* initialize window, bit buffer */ 1016 glbl->gz_wp = 0; 1017 glbl->gz_bk = 0; 1018 glbl->gz_bb = 0; 1019 1020 /* decompress until the last block */ 1021 h = 0; 1022 do { 1023 glbl->gz_hufts = 0; 1024 if ((r = inflate_block(glbl, &e)) != 0) 1025 return r; 1026 if (glbl->gz_hufts > h) 1027 h = glbl->gz_hufts; 1028 } while (!e); 1029 1030 /* flush out slide */ 1031 FLUSH(glbl, glbl->gz_wp); 1032 1033 /* return success */ 1034 return 0; 1035} 1036 1037/* Nobody uses this - why not? */ 1038int 1039inflate(glbl) 1040 struct inflate *glbl; 1041{ 1042 int i; 1043#ifdef KERNEL 1044 u_char *p = NULL; 1045 1046 if (!glbl->gz_slide) 1047 p = glbl->gz_slide = malloc(GZ_WSIZE, M_GZIP, M_WAITOK); 1048#endif 1049 if (!glbl->gz_slide) 1050#ifdef KERNEL 1051 return(ENOMEM); 1052#else 1053 return 3; /* kzip expects 3 */ 1054#endif 1055 i = xinflate(glbl); 1056 1057 if (glbl->gz_fixed_td != (struct huft *) NULL) { 1058 huft_free(glbl, glbl->gz_fixed_td); 1059 glbl->gz_fixed_td = (struct huft *) NULL; 1060 } 1061 if (glbl->gz_fixed_tl != (struct huft *) NULL) { 1062 huft_free(glbl, glbl->gz_fixed_tl); 1063 glbl->gz_fixed_tl = (struct huft *) NULL; 1064 } 1065#ifdef KERNEL 1066 if (p == glbl->gz_slide) { 1067 free(glbl->gz_slide, M_GZIP); 1068 glbl->gz_slide = NULL; 1069 } 1070#endif 1071 return i; 1072} 1073/* ----------------------- END INFLATE.C */ 1074