1 /*
2  * LZMA2 decoder
3  *
4  * Authors: Lasse Collin <lasse.collin@tukaani.org>
5  *          Igor Pavlov <http://7-zip.org/>
6  *
7  * This file has been put into the public domain.
8  * You can do whatever you want with this file.
9  */
10 #include "xz_private.h"
11 #include "xz_lzma2.h"
12 
13 /*
14  * Range decoder initialization eats the first five bytes of each LZMA chunk.
15  */
16 #define RC_INIT_BYTES 5
17 
18 /*
19  * Minimum number of usable input buffer to safely decode one LZMA symbol.
20  * The worst case is that we decode 22 bits using probabilities and 26
21  * direct bits. This may decode at maximum of 20 bytes of input. However,
22  * lzma_main() does an extra normalization before returning, thus we
23  * need to put 21 here.
24  */
25 #define LZMA_IN_REQUIRED 21
26 
27 /*
28  * Dictionary (history buffer)
29  *
30  * These are always true:
31  *    start <= pos <= full <= end
32  *    pos <= limit <= end
33  *
34  * In multi-call mode, also these are true:
35  *    end == size
36  *    size <= size_max
37  *    allocated <= size
38  *
39  * Most of these variables are size_t to support single-call mode,
40  * in which the dictionary variables address the actual output
41  * buffer directly.
42  */
43 struct dictionary {
44 	/* Beginning of the history buffer */
45 	uint8_t *buf;
46 
47 	/* Old position in buf (before decoding more data) */
48 	size_t start;
49 
50 	/* Position in buf */
51 	size_t pos;
52 
53 	/*
54 	 * How full dictionary is. This is used to detect corrupt input that
55 	 * would read beyond the beginning of the uncompressed stream.
56 	 */
57 	size_t full;
58 
59 	/* Write limit; we don't write to buf[limit] or later bytes. */
60 	size_t limit;
61 
62 	/*
63 	 * End of the dictionary buffer. In multi-call mode, this is
64 	 * the same as the dictionary size. In single-call mode, this
65 	 * indicates the size of the output buffer.
66 	 */
67 	size_t end;
68 
69 	/*
70 	 * Size of the dictionary as specified in Block Header. This is used
71 	 * together with "full" to detect corrupt input that would make us
72 	 * read beyond the beginning of the uncompressed stream.
73 	 */
74 	uint32_t size;
75 
76 	/*
77 	 * Maximum allowed dictionary size in multi-call mode.
78 	 * This is ignored in single-call mode.
79 	 */
80 	uint32_t size_max;
81 
82 	/*
83 	 * Amount of memory currently allocated for the dictionary.
84 	 * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
85 	 * size_max is always the same as the allocated size.)
86 	 */
87 	uint32_t allocated;
88 
89 	/* Operation mode */
90 	enum xz_mode mode;
91 };
92 
93 /* Range decoder */
94 struct rc_dec {
95 	uint32_t range;
96 	uint32_t code;
97 
98 	/*
99 	 * Number of initializing bytes remaining to be read
100 	 * by rc_read_init().
101 	 */
102 	uint32_t init_bytes_left;
103 
104 	/*
105 	 * Buffer from which we read our input. It can be either
106 	 * temp.buf or the caller-provided input buffer.
107 	 */
108 	const uint8_t *in;
109 	size_t in_pos;
110 	size_t in_limit;
111 };
112 
113 /* Probabilities for a length decoder. */
114 struct lzma_len_dec {
115 	/* Probability of match length being at least 10 */
116 	uint16_t choice;
117 
118 	/* Probability of match length being at least 18 */
119 	uint16_t choice2;
120 
121 	/* Probabilities for match lengths 2-9 */
122 	uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
123 
124 	/* Probabilities for match lengths 10-17 */
125 	uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
126 
127 	/* Probabilities for match lengths 18-273 */
128 	uint16_t high[LEN_HIGH_SYMBOLS];
129 };
130 
131 struct lzma_dec {
132 	/* Distances of latest four matches */
133 	uint32_t rep0;
134 	uint32_t rep1;
135 	uint32_t rep2;
136 	uint32_t rep3;
137 
138 	/* Types of the most recently seen LZMA symbols */
139 	enum lzma_state state;
140 
141 	/*
142 	 * Length of a match. This is updated so that dict_repeat can
143 	 * be called again to finish repeating the whole match.
144 	 */
145 	uint32_t len;
146 
147 	/*
148 	 * LZMA properties or related bit masks (number of literal
149 	 * context bits, a mask dervied from the number of literal
150 	 * position bits, and a mask dervied from the number
151 	 * position bits)
152 	 */
153 	uint32_t lc;
154 	uint32_t literal_pos_mask; /* (1 << lp) - 1 */
155 	uint32_t pos_mask;         /* (1 << pb) - 1 */
156 
157 	/* If 1, it's a match. Otherwise it's a single 8-bit literal. */
158 	uint16_t is_match[STATES][POS_STATES_MAX];
159 
160 	/* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
161 	uint16_t is_rep[STATES];
162 
163 	/*
164 	 * If 0, distance of a repeated match is rep0.
165 	 * Otherwise check is_rep1.
166 	 */
167 	uint16_t is_rep0[STATES];
168 
169 	/*
170 	 * If 0, distance of a repeated match is rep1.
171 	 * Otherwise check is_rep2.
172 	 */
173 	uint16_t is_rep1[STATES];
174 
175 	/* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
176 	uint16_t is_rep2[STATES];
177 
178 	/*
179 	 * If 1, the repeated match has length of one byte. Otherwise
180 	 * the length is decoded from rep_len_decoder.
181 	 */
182 	uint16_t is_rep0_long[STATES][POS_STATES_MAX];
183 
184 	/*
185 	 * Probability tree for the highest two bits of the match
186 	 * distance. There is a separate probability tree for match
187 	 * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
188 	 */
189 	uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
190 
191 	/*
192 	 * Probility trees for additional bits for match distance
193 	 * when the distance is in the range [4, 127].
194 	 */
195 	uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
196 
197 	/*
198 	 * Probability tree for the lowest four bits of a match
199 	 * distance that is equal to or greater than 128.
200 	 */
201 	uint16_t dist_align[ALIGN_SIZE];
202 
203 	/* Length of a normal match */
204 	struct lzma_len_dec match_len_dec;
205 
206 	/* Length of a repeated match */
207 	struct lzma_len_dec rep_len_dec;
208 
209 	/* Probabilities of literals */
210 	uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
211 };
212 
213 struct lzma2_dec {
214 	/* Position in xz_dec_lzma2_run(). */
215 	enum lzma2_seq {
216 		SEQ_CONTROL,
217 		SEQ_UNCOMPRESSED_1,
218 		SEQ_UNCOMPRESSED_2,
219 		SEQ_COMPRESSED_0,
220 		SEQ_COMPRESSED_1,
221 		SEQ_PROPERTIES,
222 		SEQ_LZMA_PREPARE,
223 		SEQ_LZMA_RUN,
224 		SEQ_COPY
225 	} sequence;
226 
227 	/* Next position after decoding the compressed size of the chunk. */
228 	enum lzma2_seq next_sequence;
229 
230 	/* Uncompressed size of LZMA chunk (2 MiB at maximum) */
231 	uint32_t uncompressed;
232 
233 	/*
234 	 * Compressed size of LZMA chunk or compressed/uncompressed
235 	 * size of uncompressed chunk (64 KiB at maximum)
236 	 */
237 	uint32_t compressed;
238 
239 	/*
240 	 * True if dictionary reset is needed. This is false before
241 	 * the first chunk (LZMA or uncompressed).
242 	 */
243 	bool need_dict_reset;
244 
245 	/*
246 	 * True if new LZMA properties are needed. This is false
247 	 * before the first LZMA chunk.
248 	 */
249 	bool need_props;
250 };
251 
252 struct xz_dec_lzma2 {
253 	/*
254 	 * The order below is important on x86 to reduce code size and
255 	 * it shouldn't hurt on other platforms. Everything up to and
256 	 * including lzma.pos_mask are in the first 128 bytes on x86-32,
257 	 * which allows using smaller instructions to access those
258 	 * variables. On x86-64, fewer variables fit into the first 128
259 	 * bytes, but this is still the best order without sacrificing
260 	 * the readability by splitting the structures.
261 	 */
262 	struct rc_dec rc;
263 	struct dictionary dict;
264 	struct lzma2_dec lzma2;
265 	struct lzma_dec lzma;
266 
267 	/*
268 	 * Temporary buffer which holds small number of input bytes between
269 	 * decoder calls. See lzma2_lzma() for details.
270 	 */
271 	struct {
272 		uint32_t size;
273 		uint8_t buf[3 * LZMA_IN_REQUIRED];
274 	} temp;
275 };
276 
277 /**************
278  * Dictionary *
279  **************/
280 
281 /*
282  * Reset the dictionary state. When in single-call mode, set up the beginning
283  * of the dictionary to point to the actual output buffer.
284  */
dict_reset(struct dictionary * dict,struct xz_buf * b)285 static void dict_reset(struct dictionary *dict, struct xz_buf *b)
286 {
287 	if (DEC_IS_SINGLE(dict->mode)) {
288 		dict->buf = b->out + b->out_pos;
289 		dict->end = b->out_size - b->out_pos;
290 	}
291 
292 	dict->start = 0;
293 	dict->pos = 0;
294 	dict->limit = 0;
295 	dict->full = 0;
296 }
297 
298 /* Set dictionary write limit */
dict_limit(struct dictionary * dict,size_t out_max)299 static void dict_limit(struct dictionary *dict, size_t out_max)
300 {
301 	if (dict->end - dict->pos <= out_max)
302 		dict->limit = dict->end;
303 	else
304 		dict->limit = dict->pos + out_max;
305 }
306 
307 /* Return true if at least one byte can be written into the dictionary. */
dict_has_space(const struct dictionary * dict)308 static inline bool dict_has_space(const struct dictionary *dict)
309 {
310 	return dict->pos < dict->limit;
311 }
312 
313 /*
314  * Get a byte from the dictionary at the given distance. The distance is
315  * assumed to valid, or as a special case, zero when the dictionary is
316  * still empty. This special case is needed for single-call decoding to
317  * avoid writing a '\0' to the end of the destination buffer.
318  */
dict_get(const struct dictionary * dict,uint32_t dist)319 static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
320 {
321 	size_t offset = dict->pos - dist - 1;
322 
323 	if (dist >= dict->pos)
324 		offset += dict->end;
325 
326 	return dict->full > 0 ? dict->buf[offset] : 0;
327 }
328 
329 /*
330  * Put one byte into the dictionary. It is assumed that there is space for it.
331  */
dict_put(struct dictionary * dict,uint8_t byte)332 static inline void dict_put(struct dictionary *dict, uint8_t byte)
333 {
334 	dict->buf[dict->pos++] = byte;
335 
336 	if (dict->full < dict->pos)
337 		dict->full = dict->pos;
338 }
339 
340 /*
341  * Repeat given number of bytes from the given distance. If the distance is
342  * invalid, false is returned. On success, true is returned and *len is
343  * updated to indicate how many bytes were left to be repeated.
344  */
dict_repeat(struct dictionary * dict,uint32_t * len,uint32_t dist)345 static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
346 {
347 	size_t back;
348 	uint32_t left;
349 
350 	if (dist >= dict->full || dist >= dict->size)
351 		return false;
352 
353 	left = min_t(size_t, dict->limit - dict->pos, *len);
354 	*len -= left;
355 
356 	back = dict->pos - dist - 1;
357 	if (dist >= dict->pos)
358 		back += dict->end;
359 
360 	do {
361 		dict->buf[dict->pos++] = dict->buf[back++];
362 		if (back == dict->end)
363 			back = 0;
364 	} while (--left > 0);
365 
366 	if (dict->full < dict->pos)
367 		dict->full = dict->pos;
368 
369 	return true;
370 }
371 
372 /* Copy uncompressed data as is from input to dictionary and output buffers. */
dict_uncompressed(struct dictionary * dict,struct xz_buf * b,uint32_t * left)373 static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
374 			      uint32_t *left)
375 {
376 	size_t copy_size;
377 
378 	while (*left > 0 && b->in_pos < b->in_size
379 			&& b->out_pos < b->out_size) {
380 		copy_size = min(b->in_size - b->in_pos,
381 				b->out_size - b->out_pos);
382 		if (copy_size > dict->end - dict->pos)
383 			copy_size = dict->end - dict->pos;
384 		if (copy_size > *left)
385 			copy_size = *left;
386 
387 		*left -= copy_size;
388 
389 		memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
390 		dict->pos += copy_size;
391 
392 		if (dict->full < dict->pos)
393 			dict->full = dict->pos;
394 
395 		if (DEC_IS_MULTI(dict->mode)) {
396 			if (dict->pos == dict->end)
397 				dict->pos = 0;
398 
399 			memcpy(b->out + b->out_pos, b->in + b->in_pos,
400 					copy_size);
401 		}
402 
403 		dict->start = dict->pos;
404 
405 		b->out_pos += copy_size;
406 		b->in_pos += copy_size;
407 	}
408 }
409 
410 /*
411  * Flush pending data from dictionary to b->out. It is assumed that there is
412  * enough space in b->out. This is guaranteed because caller uses dict_limit()
413  * before decoding data into the dictionary.
414  */
dict_flush(struct dictionary * dict,struct xz_buf * b)415 static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
416 {
417 	size_t copy_size = dict->pos - dict->start;
418 
419 	if (DEC_IS_MULTI(dict->mode)) {
420 		if (dict->pos == dict->end)
421 			dict->pos = 0;
422 
423 		memcpy(b->out + b->out_pos, dict->buf + dict->start,
424 				copy_size);
425 	}
426 
427 	dict->start = dict->pos;
428 	b->out_pos += copy_size;
429 	return copy_size;
430 }
431 
432 /*****************
433  * Range decoder *
434  *****************/
435 
436 /* Reset the range decoder. */
rc_reset(struct rc_dec * rc)437 static void rc_reset(struct rc_dec *rc)
438 {
439 	rc->range = (uint32_t)-1;
440 	rc->code = 0;
441 	rc->init_bytes_left = RC_INIT_BYTES;
442 }
443 
444 /*
445  * Read the first five initial bytes into rc->code if they haven't been
446  * read already. (Yes, the first byte gets completely ignored.)
447  */
rc_read_init(struct rc_dec * rc,struct xz_buf * b)448 static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b)
449 {
450 	while (rc->init_bytes_left > 0) {
451 		if (b->in_pos == b->in_size)
452 			return false;
453 
454 		rc->code = (rc->code << 8) + b->in[b->in_pos++];
455 		--rc->init_bytes_left;
456 	}
457 
458 	return true;
459 }
460 
461 /* Return true if there may not be enough input for the next decoding loop. */
rc_limit_exceeded(const struct rc_dec * rc)462 static inline bool rc_limit_exceeded(const struct rc_dec *rc)
463 {
464 	return rc->in_pos > rc->in_limit;
465 }
466 
467 /*
468  * Return true if it is possible (from point of view of range decoder) that
469  * we have reached the end of the LZMA chunk.
470  */
rc_is_finished(const struct rc_dec * rc)471 static inline bool rc_is_finished(const struct rc_dec *rc)
472 {
473 	return rc->code == 0;
474 }
475 
476 /* Read the next input byte if needed. */
rc_normalize(struct rc_dec * rc)477 static __always_inline void rc_normalize(struct rc_dec *rc)
478 {
479 	if (rc->range < RC_TOP_VALUE) {
480 		rc->range <<= RC_SHIFT_BITS;
481 		rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
482 	}
483 }
484 
485 /*
486  * Decode one bit. In some versions, this function has been splitted in three
487  * functions so that the compiler is supposed to be able to more easily avoid
488  * an extra branch. In this particular version of the LZMA decoder, this
489  * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
490  * on x86). Using a non-splitted version results in nicer looking code too.
491  *
492  * NOTE: This must return an int. Do not make it return a bool or the speed
493  * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
494  * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
495  */
rc_bit(struct rc_dec * rc,uint16_t * prob)496 static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
497 {
498 	uint32_t bound;
499 	int bit;
500 
501 	rc_normalize(rc);
502 	bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
503 	if (rc->code < bound) {
504 		rc->range = bound;
505 		*prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
506 		bit = 0;
507 	} else {
508 		rc->range -= bound;
509 		rc->code -= bound;
510 		*prob -= *prob >> RC_MOVE_BITS;
511 		bit = 1;
512 	}
513 
514 	return bit;
515 }
516 
517 /* Decode a bittree starting from the most significant bit. */
rc_bittree(struct rc_dec * rc,uint16_t * probs,uint32_t limit)518 static __always_inline uint32_t rc_bittree(struct rc_dec *rc,
519 					   uint16_t *probs, uint32_t limit)
520 {
521 	uint32_t symbol = 1;
522 
523 	do {
524 		if (rc_bit(rc, &probs[symbol]))
525 			symbol = (symbol << 1) + 1;
526 		else
527 			symbol <<= 1;
528 	} while (symbol < limit);
529 
530 	return symbol;
531 }
532 
533 /* Decode a bittree starting from the least significant bit. */
rc_bittree_reverse(struct rc_dec * rc,uint16_t * probs,uint32_t * dest,uint32_t limit)534 static __always_inline void rc_bittree_reverse(struct rc_dec *rc,
535 					       uint16_t *probs,
536 					       uint32_t *dest, uint32_t limit)
537 {
538 	uint32_t symbol = 1;
539 	uint32_t i = 0;
540 
541 	do {
542 		if (rc_bit(rc, &probs[symbol])) {
543 			symbol = (symbol << 1) + 1;
544 			*dest += 1 << i;
545 		} else {
546 			symbol <<= 1;
547 		}
548 	} while (++i < limit);
549 }
550 
551 /* Decode direct bits (fixed fifty-fifty probability) */
rc_direct(struct rc_dec * rc,uint32_t * dest,uint32_t limit)552 static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
553 {
554 	uint32_t mask;
555 
556 	do {
557 		rc_normalize(rc);
558 		rc->range >>= 1;
559 		rc->code -= rc->range;
560 		mask = (uint32_t)0 - (rc->code >> 31);
561 		rc->code += rc->range & mask;
562 		*dest = (*dest << 1) + (mask + 1);
563 	} while (--limit > 0);
564 }
565 
566 /********
567  * LZMA *
568  ********/
569 
570 /* Get pointer to literal coder probability array. */
lzma_literal_probs(struct xz_dec_lzma2 * s)571 static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
572 {
573 	uint32_t prev_byte = dict_get(&s->dict, 0);
574 	uint32_t low = prev_byte >> (8 - s->lzma.lc);
575 	uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
576 	return s->lzma.literal[low + high];
577 }
578 
579 /* Decode a literal (one 8-bit byte) */
lzma_literal(struct xz_dec_lzma2 * s)580 static void lzma_literal(struct xz_dec_lzma2 *s)
581 {
582 	uint16_t *probs;
583 	uint32_t symbol;
584 	uint32_t match_byte;
585 	uint32_t match_bit;
586 	uint32_t offset;
587 	uint32_t i;
588 
589 	probs = lzma_literal_probs(s);
590 
591 	if (lzma_state_is_literal(s->lzma.state)) {
592 		symbol = rc_bittree(&s->rc, probs, 0x100);
593 	} else {
594 		symbol = 1;
595 		match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
596 		offset = 0x100;
597 
598 		do {
599 			match_bit = match_byte & offset;
600 			match_byte <<= 1;
601 			i = offset + match_bit + symbol;
602 
603 			if (rc_bit(&s->rc, &probs[i])) {
604 				symbol = (symbol << 1) + 1;
605 				offset &= match_bit;
606 			} else {
607 				symbol <<= 1;
608 				offset &= ~match_bit;
609 			}
610 		} while (symbol < 0x100);
611 	}
612 
613 	dict_put(&s->dict, (uint8_t)symbol);
614 	lzma_state_literal(&s->lzma.state);
615 }
616 
617 /* Decode the length of the match into s->lzma.len. */
lzma_len(struct xz_dec_lzma2 * s,struct lzma_len_dec * l,uint32_t pos_state)618 static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
619 		     uint32_t pos_state)
620 {
621 	uint16_t *probs;
622 	uint32_t limit;
623 
624 	if (!rc_bit(&s->rc, &l->choice)) {
625 		probs = l->low[pos_state];
626 		limit = LEN_LOW_SYMBOLS;
627 		s->lzma.len = MATCH_LEN_MIN;
628 	} else {
629 		if (!rc_bit(&s->rc, &l->choice2)) {
630 			probs = l->mid[pos_state];
631 			limit = LEN_MID_SYMBOLS;
632 			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
633 		} else {
634 			probs = l->high;
635 			limit = LEN_HIGH_SYMBOLS;
636 			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
637 					+ LEN_MID_SYMBOLS;
638 		}
639 	}
640 
641 	s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
642 }
643 
644 /* Decode a match. The distance will be stored in s->lzma.rep0. */
lzma_match(struct xz_dec_lzma2 * s,uint32_t pos_state)645 static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
646 {
647 	uint16_t *probs;
648 	uint32_t dist_slot;
649 	uint32_t limit;
650 
651 	lzma_state_match(&s->lzma.state);
652 
653 	s->lzma.rep3 = s->lzma.rep2;
654 	s->lzma.rep2 = s->lzma.rep1;
655 	s->lzma.rep1 = s->lzma.rep0;
656 
657 	lzma_len(s, &s->lzma.match_len_dec, pos_state);
658 
659 	probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
660 	dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
661 
662 	if (dist_slot < DIST_MODEL_START) {
663 		s->lzma.rep0 = dist_slot;
664 	} else {
665 		limit = (dist_slot >> 1) - 1;
666 		s->lzma.rep0 = 2 + (dist_slot & 1);
667 
668 		if (dist_slot < DIST_MODEL_END) {
669 			s->lzma.rep0 <<= limit;
670 			probs = s->lzma.dist_special + s->lzma.rep0
671 					- dist_slot - 1;
672 			rc_bittree_reverse(&s->rc, probs,
673 					&s->lzma.rep0, limit);
674 		} else {
675 			rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
676 			s->lzma.rep0 <<= ALIGN_BITS;
677 			rc_bittree_reverse(&s->rc, s->lzma.dist_align,
678 					&s->lzma.rep0, ALIGN_BITS);
679 		}
680 	}
681 }
682 
683 /*
684  * Decode a repeated match. The distance is one of the four most recently
685  * seen matches. The distance will be stored in s->lzma.rep0.
686  */
lzma_rep_match(struct xz_dec_lzma2 * s,uint32_t pos_state)687 static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
688 {
689 	uint32_t tmp;
690 
691 	if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
692 		if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
693 				s->lzma.state][pos_state])) {
694 			lzma_state_short_rep(&s->lzma.state);
695 			s->lzma.len = 1;
696 			return;
697 		}
698 	} else {
699 		if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
700 			tmp = s->lzma.rep1;
701 		} else {
702 			if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
703 				tmp = s->lzma.rep2;
704 			} else {
705 				tmp = s->lzma.rep3;
706 				s->lzma.rep3 = s->lzma.rep2;
707 			}
708 
709 			s->lzma.rep2 = s->lzma.rep1;
710 		}
711 
712 		s->lzma.rep1 = s->lzma.rep0;
713 		s->lzma.rep0 = tmp;
714 	}
715 
716 	lzma_state_long_rep(&s->lzma.state);
717 	lzma_len(s, &s->lzma.rep_len_dec, pos_state);
718 }
719 
720 /* LZMA decoder core */
lzma_main(struct xz_dec_lzma2 * s)721 static bool lzma_main(struct xz_dec_lzma2 *s)
722 {
723 	uint32_t pos_state;
724 
725 	/*
726 	 * If the dictionary was reached during the previous call, try to
727 	 * finish the possibly pending repeat in the dictionary.
728 	 */
729 	if (dict_has_space(&s->dict) && s->lzma.len > 0)
730 		dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
731 
732 	/*
733 	 * Decode more LZMA symbols. One iteration may consume up to
734 	 * LZMA_IN_REQUIRED - 1 bytes.
735 	 */
736 	while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
737 		pos_state = s->dict.pos & s->lzma.pos_mask;
738 
739 		if (!rc_bit(&s->rc, &s->lzma.is_match[
740 				s->lzma.state][pos_state])) {
741 			lzma_literal(s);
742 		} else {
743 			if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
744 				lzma_rep_match(s, pos_state);
745 			else
746 				lzma_match(s, pos_state);
747 
748 			if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
749 				return false;
750 		}
751 	}
752 
753 	/*
754 	 * Having the range decoder always normalized when we are outside
755 	 * this function makes it easier to correctly handle end of the chunk.
756 	 */
757 	rc_normalize(&s->rc);
758 
759 	return true;
760 }
761 
762 /*
763  * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
764  * here, because LZMA state may be reset without resetting the dictionary.
765  */
lzma_reset(struct xz_dec_lzma2 * s)766 static void lzma_reset(struct xz_dec_lzma2 *s)
767 {
768 	uint16_t *probs;
769 	size_t i;
770 
771 	s->lzma.state = STATE_LIT_LIT;
772 	s->lzma.rep0 = 0;
773 	s->lzma.rep1 = 0;
774 	s->lzma.rep2 = 0;
775 	s->lzma.rep3 = 0;
776 
777 	/*
778 	 * All probabilities are initialized to the same value. This hack
779 	 * makes the code smaller by avoiding a separate loop for each
780 	 * probability array.
781 	 *
782 	 * This could be optimized so that only that part of literal
783 	 * probabilities that are actually required. In the common case
784 	 * we would write 12 KiB less.
785 	 */
786 	probs = s->lzma.is_match[0];
787 	for (i = 0; i < PROBS_TOTAL; ++i)
788 		probs[i] = RC_BIT_MODEL_TOTAL / 2;
789 
790 	rc_reset(&s->rc);
791 }
792 
793 /*
794  * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
795  * from the decoded lp and pb values. On success, the LZMA decoder state is
796  * reset and true is returned.
797  */
lzma_props(struct xz_dec_lzma2 * s,uint8_t props)798 static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
799 {
800 	if (props > (4 * 5 + 4) * 9 + 8)
801 		return false;
802 
803 	s->lzma.pos_mask = 0;
804 	while (props >= 9 * 5) {
805 		props -= 9 * 5;
806 		++s->lzma.pos_mask;
807 	}
808 
809 	s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
810 
811 	s->lzma.literal_pos_mask = 0;
812 	while (props >= 9) {
813 		props -= 9;
814 		++s->lzma.literal_pos_mask;
815 	}
816 
817 	s->lzma.lc = props;
818 
819 	if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
820 		return false;
821 
822 	s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
823 
824 	lzma_reset(s);
825 
826 	return true;
827 }
828 
829 /*********
830  * LZMA2 *
831  *********/
832 
833 /*
834  * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
835  * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
836  * wrapper function takes care of making the LZMA decoder's assumption safe.
837  *
838  * As long as there is plenty of input left to be decoded in the current LZMA
839  * chunk, we decode directly from the caller-supplied input buffer until
840  * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
841  * s->temp.buf, which (hopefully) gets filled on the next call to this
842  * function. We decode a few bytes from the temporary buffer so that we can
843  * continue decoding from the caller-supplied input buffer again.
844  */
lzma2_lzma(struct xz_dec_lzma2 * s,struct xz_buf * b)845 static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
846 {
847 	size_t in_avail;
848 	uint32_t tmp;
849 
850 	in_avail = b->in_size - b->in_pos;
851 	if (s->temp.size > 0 || s->lzma2.compressed == 0) {
852 		tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
853 		if (tmp > s->lzma2.compressed - s->temp.size)
854 			tmp = s->lzma2.compressed - s->temp.size;
855 		if (tmp > in_avail)
856 			tmp = in_avail;
857 
858 		memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
859 
860 		if (s->temp.size + tmp == s->lzma2.compressed) {
861 			memzero(s->temp.buf + s->temp.size + tmp,
862 					sizeof(s->temp.buf)
863 						- s->temp.size - tmp);
864 			s->rc.in_limit = s->temp.size + tmp;
865 		} else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
866 			s->temp.size += tmp;
867 			b->in_pos += tmp;
868 			return true;
869 		} else {
870 			s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
871 		}
872 
873 		s->rc.in = s->temp.buf;
874 		s->rc.in_pos = 0;
875 
876 		if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
877 			return false;
878 
879 		s->lzma2.compressed -= s->rc.in_pos;
880 
881 		if (s->rc.in_pos < s->temp.size) {
882 			s->temp.size -= s->rc.in_pos;
883 			memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
884 					s->temp.size);
885 			return true;
886 		}
887 
888 		b->in_pos += s->rc.in_pos - s->temp.size;
889 		s->temp.size = 0;
890 	}
891 
892 	in_avail = b->in_size - b->in_pos;
893 	if (in_avail >= LZMA_IN_REQUIRED) {
894 		s->rc.in = b->in;
895 		s->rc.in_pos = b->in_pos;
896 
897 		if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
898 			s->rc.in_limit = b->in_pos + s->lzma2.compressed;
899 		else
900 			s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
901 
902 		if (!lzma_main(s))
903 			return false;
904 
905 		in_avail = s->rc.in_pos - b->in_pos;
906 		if (in_avail > s->lzma2.compressed)
907 			return false;
908 
909 		s->lzma2.compressed -= in_avail;
910 		b->in_pos = s->rc.in_pos;
911 	}
912 
913 	in_avail = b->in_size - b->in_pos;
914 	if (in_avail < LZMA_IN_REQUIRED) {
915 		if (in_avail > s->lzma2.compressed)
916 			in_avail = s->lzma2.compressed;
917 
918 		memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
919 		s->temp.size = in_avail;
920 		b->in_pos += in_avail;
921 	}
922 
923 	return true;
924 }
925 
926 /*
927  * Take care of the LZMA2 control layer, and forward the job of actual LZMA
928  * decoding or copying of uncompressed chunks to other functions.
929  */
xz_dec_lzma2_run(struct xz_dec_lzma2 * s,struct xz_buf * b)930 XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
931 				       struct xz_buf *b)
932 {
933 	uint32_t tmp;
934 
935 	while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
936 		switch (s->lzma2.sequence) {
937 		case SEQ_CONTROL:
938 			/*
939 			 * LZMA2 control byte
940 			 *
941 			 * Exact values:
942 			 *   0x00   End marker
943 			 *   0x01   Dictionary reset followed by
944 			 *          an uncompressed chunk
945 			 *   0x02   Uncompressed chunk (no dictionary reset)
946 			 *
947 			 * Highest three bits (s->control & 0xE0):
948 			 *   0xE0   Dictionary reset, new properties and state
949 			 *          reset, followed by LZMA compressed chunk
950 			 *   0xC0   New properties and state reset, followed
951 			 *          by LZMA compressed chunk (no dictionary
952 			 *          reset)
953 			 *   0xA0   State reset using old properties,
954 			 *          followed by LZMA compressed chunk (no
955 			 *          dictionary reset)
956 			 *   0x80   LZMA chunk (no dictionary or state reset)
957 			 *
958 			 * For LZMA compressed chunks, the lowest five bits
959 			 * (s->control & 1F) are the highest bits of the
960 			 * uncompressed size (bits 16-20).
961 			 *
962 			 * A new LZMA2 stream must begin with a dictionary
963 			 * reset. The first LZMA chunk must set new
964 			 * properties and reset the LZMA state.
965 			 *
966 			 * Values that don't match anything described above
967 			 * are invalid and we return XZ_DATA_ERROR.
968 			 */
969 			tmp = b->in[b->in_pos++];
970 
971 			if (tmp == 0x00)
972 				return XZ_STREAM_END;
973 
974 			if (tmp >= 0xE0 || tmp == 0x01) {
975 				s->lzma2.need_props = true;
976 				s->lzma2.need_dict_reset = false;
977 				dict_reset(&s->dict, b);
978 			} else if (s->lzma2.need_dict_reset) {
979 				return XZ_DATA_ERROR;
980 			}
981 
982 			if (tmp >= 0x80) {
983 				s->lzma2.uncompressed = (tmp & 0x1F) << 16;
984 				s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
985 
986 				if (tmp >= 0xC0) {
987 					/*
988 					 * When there are new properties,
989 					 * state reset is done at
990 					 * SEQ_PROPERTIES.
991 					 */
992 					s->lzma2.need_props = false;
993 					s->lzma2.next_sequence
994 							= SEQ_PROPERTIES;
995 
996 				} else if (s->lzma2.need_props) {
997 					return XZ_DATA_ERROR;
998 
999 				} else {
1000 					s->lzma2.next_sequence
1001 							= SEQ_LZMA_PREPARE;
1002 					if (tmp >= 0xA0)
1003 						lzma_reset(s);
1004 				}
1005 			} else {
1006 				if (tmp > 0x02)
1007 					return XZ_DATA_ERROR;
1008 
1009 				s->lzma2.sequence = SEQ_COMPRESSED_0;
1010 				s->lzma2.next_sequence = SEQ_COPY;
1011 			}
1012 
1013 			break;
1014 
1015 		case SEQ_UNCOMPRESSED_1:
1016 			s->lzma2.uncompressed
1017 					+= (uint32_t)b->in[b->in_pos++] << 8;
1018 			s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
1019 			break;
1020 
1021 		case SEQ_UNCOMPRESSED_2:
1022 			s->lzma2.uncompressed
1023 					+= (uint32_t)b->in[b->in_pos++] + 1;
1024 			s->lzma2.sequence = SEQ_COMPRESSED_0;
1025 			break;
1026 
1027 		case SEQ_COMPRESSED_0:
1028 			s->lzma2.compressed
1029 					= (uint32_t)b->in[b->in_pos++] << 8;
1030 			s->lzma2.sequence = SEQ_COMPRESSED_1;
1031 			break;
1032 
1033 		case SEQ_COMPRESSED_1:
1034 			s->lzma2.compressed
1035 					+= (uint32_t)b->in[b->in_pos++] + 1;
1036 			s->lzma2.sequence = s->lzma2.next_sequence;
1037 			break;
1038 
1039 		case SEQ_PROPERTIES:
1040 			if (!lzma_props(s, b->in[b->in_pos++]))
1041 				return XZ_DATA_ERROR;
1042 
1043 			s->lzma2.sequence = SEQ_LZMA_PREPARE;
1044 
1045 		/* Fall through */
1046 
1047 		case SEQ_LZMA_PREPARE:
1048 			if (s->lzma2.compressed < RC_INIT_BYTES)
1049 				return XZ_DATA_ERROR;
1050 
1051 			if (!rc_read_init(&s->rc, b))
1052 				return XZ_OK;
1053 
1054 			s->lzma2.compressed -= RC_INIT_BYTES;
1055 			s->lzma2.sequence = SEQ_LZMA_RUN;
1056 
1057 		/* Fall through */
1058 
1059 		case SEQ_LZMA_RUN:
1060 			/*
1061 			 * Set dictionary limit to indicate how much we want
1062 			 * to be encoded at maximum. Decode new data into the
1063 			 * dictionary. Flush the new data from dictionary to
1064 			 * b->out. Check if we finished decoding this chunk.
1065 			 * In case the dictionary got full but we didn't fill
1066 			 * the output buffer yet, we may run this loop
1067 			 * multiple times without changing s->lzma2.sequence.
1068 			 */
1069 			dict_limit(&s->dict, min_t(size_t,
1070 					b->out_size - b->out_pos,
1071 					s->lzma2.uncompressed));
1072 			if (!lzma2_lzma(s, b))
1073 				return XZ_DATA_ERROR;
1074 
1075 			s->lzma2.uncompressed -= dict_flush(&s->dict, b);
1076 
1077 			if (s->lzma2.uncompressed == 0) {
1078 				if (s->lzma2.compressed > 0 || s->lzma.len > 0
1079 						|| !rc_is_finished(&s->rc))
1080 					return XZ_DATA_ERROR;
1081 
1082 				rc_reset(&s->rc);
1083 				s->lzma2.sequence = SEQ_CONTROL;
1084 
1085 			} else if (b->out_pos == b->out_size
1086 					|| (b->in_pos == b->in_size
1087 						&& s->temp.size
1088 						< s->lzma2.compressed)) {
1089 				return XZ_OK;
1090 			}
1091 
1092 			break;
1093 
1094 		case SEQ_COPY:
1095 			dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
1096 			if (s->lzma2.compressed > 0)
1097 				return XZ_OK;
1098 
1099 			s->lzma2.sequence = SEQ_CONTROL;
1100 			break;
1101 		}
1102 	}
1103 
1104 	return XZ_OK;
1105 }
1106 
xz_dec_lzma2_create(enum xz_mode mode,uint32_t dict_max)1107 XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
1108 						   uint32_t dict_max)
1109 {
1110 	struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
1111 	if (s == NULL)
1112 		return NULL;
1113 
1114 	s->dict.mode = mode;
1115 	s->dict.size_max = dict_max;
1116 
1117 	if (DEC_IS_PREALLOC(mode)) {
1118 		s->dict.buf = vmalloc(dict_max);
1119 		if (s->dict.buf == NULL) {
1120 			kfree(s);
1121 			return NULL;
1122 		}
1123 	} else if (DEC_IS_DYNALLOC(mode)) {
1124 		s->dict.buf = NULL;
1125 		s->dict.allocated = 0;
1126 	}
1127 	return s;
1128 }
1129 
xz_dec_lzma2_reset(struct xz_dec_lzma2 * s,uint8_t props)1130 XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
1131 {
1132 	/* This limits dictionary size to 3 GiB to keep parsing simpler. */
1133 	if (props > 39)
1134 		return XZ_OPTIONS_ERROR;
1135 
1136 	s->dict.size = 2 + (props & 1);
1137 	s->dict.size <<= (props >> 1) + 11;
1138 
1139 	if (DEC_IS_MULTI(s->dict.mode)) {
1140 		if (s->dict.size > s->dict.size_max)
1141 			return XZ_MEMLIMIT_ERROR;
1142 
1143 		s->dict.end = s->dict.size;
1144 
1145 		if (DEC_IS_DYNALLOC(s->dict.mode)) {
1146 			if (s->dict.allocated < s->dict.size) {
1147 				vfree(s->dict.buf);
1148 				s->dict.buf = vmalloc(s->dict.size);
1149 				if (s->dict.buf == NULL) {
1150 					s->dict.allocated = 0;
1151 					return XZ_MEM_ERROR;
1152 				}
1153 			}
1154 		}
1155 	}
1156 
1157 	s->lzma.len = 0;
1158 
1159 	s->lzma2.sequence = SEQ_CONTROL;
1160 	s->lzma2.need_dict_reset = true;
1161 
1162 	s->temp.size = 0;
1163 
1164 	return XZ_OK;
1165 }
1166 
xz_dec_lzma2_end(struct xz_dec_lzma2 * s)1167 XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
1168 {
1169 	if (DEC_IS_MULTI(s->dict.mode))
1170 		vfree(s->dict.buf);
1171 
1172 	kfree(s);
1173 }
1174