root/lib/xz/xz_dec_lzma2.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dict_reset
  2. dict_limit
  3. dict_has_space
  4. dict_get
  5. dict_put
  6. dict_repeat
  7. dict_uncompressed
  8. dict_flush
  9. rc_reset
  10. rc_read_init
  11. rc_limit_exceeded
  12. rc_is_finished
  13. rc_normalize
  14. rc_bit
  15. rc_bittree
  16. rc_bittree_reverse
  17. rc_direct
  18. lzma_literal_probs
  19. lzma_literal
  20. lzma_len
  21. lzma_match
  22. lzma_rep_match
  23. lzma_main
  24. lzma_reset
  25. lzma_props
  26. lzma2_lzma
  27. xz_dec_lzma2_run
  28. xz_dec_lzma2_create
  29. xz_dec_lzma2_reset
  30. xz_dec_lzma2_end

   1 /*
   2  * LZMA2 decoder
   3  *
   4  * Authors: Lasse Collin <lasse.collin@tukaani.org>
   5  *          Igor Pavlov <http://7-zip.org/>
   6  *
   7  * This file has been put into the public domain.
   8  * You can do whatever you want with this file.
   9  */
  10 
  11 #include "xz_private.h"
  12 #include "xz_lzma2.h"
  13 
  14 /*
  15  * Range decoder initialization eats the first five bytes of each LZMA chunk.
  16  */
  17 #define RC_INIT_BYTES 5
  18 
  19 /*
  20  * Minimum number of usable input buffer to safely decode one LZMA symbol.
  21  * The worst case is that we decode 22 bits using probabilities and 26
  22  * direct bits. This may decode at maximum of 20 bytes of input. However,
  23  * lzma_main() does an extra normalization before returning, thus we
  24  * need to put 21 here.
  25  */
  26 #define LZMA_IN_REQUIRED 21
  27 
  28 /*
  29  * Dictionary (history buffer)
  30  *
  31  * These are always true:
  32  *    start <= pos <= full <= end
  33  *    pos <= limit <= end
  34  *
  35  * In multi-call mode, also these are true:
  36  *    end == size
  37  *    size <= size_max
  38  *    allocated <= size
  39  *
  40  * Most of these variables are size_t to support single-call mode,
  41  * in which the dictionary variables address the actual output
  42  * buffer directly.
  43  */
  44 struct dictionary {
  45         /* Beginning of the history buffer */
  46         uint8_t *buf;
  47 
  48         /* Old position in buf (before decoding more data) */
  49         size_t start;
  50 
  51         /* Position in buf */
  52         size_t pos;
  53 
  54         /*
  55          * How full dictionary is. This is used to detect corrupt input that
  56          * would read beyond the beginning of the uncompressed stream.
  57          */
  58         size_t full;
  59 
  60         /* Write limit; we don't write to buf[limit] or later bytes. */
  61         size_t limit;
  62 
  63         /*
  64          * End of the dictionary buffer. In multi-call mode, this is
  65          * the same as the dictionary size. In single-call mode, this
  66          * indicates the size of the output buffer.
  67          */
  68         size_t end;
  69 
  70         /*
  71          * Size of the dictionary as specified in Block Header. This is used
  72          * together with "full" to detect corrupt input that would make us
  73          * read beyond the beginning of the uncompressed stream.
  74          */
  75         uint32_t size;
  76 
  77         /*
  78          * Maximum allowed dictionary size in multi-call mode.
  79          * This is ignored in single-call mode.
  80          */
  81         uint32_t size_max;
  82 
  83         /*
  84          * Amount of memory currently allocated for the dictionary.
  85          * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
  86          * size_max is always the same as the allocated size.)
  87          */
  88         uint32_t allocated;
  89 
  90         /* Operation mode */
  91         enum xz_mode mode;
  92 };
  93 
  94 /* Range decoder */
  95 struct rc_dec {
  96         uint32_t range;
  97         uint32_t code;
  98 
  99         /*
 100          * Number of initializing bytes remaining to be read
 101          * by rc_read_init().
 102          */
 103         uint32_t init_bytes_left;
 104 
 105         /*
 106          * Buffer from which we read our input. It can be either
 107          * temp.buf or the caller-provided input buffer.
 108          */
 109         const uint8_t *in;
 110         size_t in_pos;
 111         size_t in_limit;
 112 };
 113 
 114 /* Probabilities for a length decoder. */
 115 struct lzma_len_dec {
 116         /* Probability of match length being at least 10 */
 117         uint16_t choice;
 118 
 119         /* Probability of match length being at least 18 */
 120         uint16_t choice2;
 121 
 122         /* Probabilities for match lengths 2-9 */
 123         uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
 124 
 125         /* Probabilities for match lengths 10-17 */
 126         uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
 127 
 128         /* Probabilities for match lengths 18-273 */
 129         uint16_t high[LEN_HIGH_SYMBOLS];
 130 };
 131 
 132 struct lzma_dec {
 133         /* Distances of latest four matches */
 134         uint32_t rep0;
 135         uint32_t rep1;
 136         uint32_t rep2;
 137         uint32_t rep3;
 138 
 139         /* Types of the most recently seen LZMA symbols */
 140         enum lzma_state state;
 141 
 142         /*
 143          * Length of a match. This is updated so that dict_repeat can
 144          * be called again to finish repeating the whole match.
 145          */
 146         uint32_t len;
 147 
 148         /*
 149          * LZMA properties or related bit masks (number of literal
 150          * context bits, a mask dervied from the number of literal
 151          * position bits, and a mask dervied from the number
 152          * position bits)
 153          */
 154         uint32_t lc;
 155         uint32_t literal_pos_mask; /* (1 << lp) - 1 */
 156         uint32_t pos_mask;         /* (1 << pb) - 1 */
 157 
 158         /* If 1, it's a match. Otherwise it's a single 8-bit literal. */
 159         uint16_t is_match[STATES][POS_STATES_MAX];
 160 
 161         /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
 162         uint16_t is_rep[STATES];
 163 
 164         /*
 165          * If 0, distance of a repeated match is rep0.
 166          * Otherwise check is_rep1.
 167          */
 168         uint16_t is_rep0[STATES];
 169 
 170         /*
 171          * If 0, distance of a repeated match is rep1.
 172          * Otherwise check is_rep2.
 173          */
 174         uint16_t is_rep1[STATES];
 175 
 176         /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
 177         uint16_t is_rep2[STATES];
 178 
 179         /*
 180          * If 1, the repeated match has length of one byte. Otherwise
 181          * the length is decoded from rep_len_decoder.
 182          */
 183         uint16_t is_rep0_long[STATES][POS_STATES_MAX];
 184 
 185         /*
 186          * Probability tree for the highest two bits of the match
 187          * distance. There is a separate probability tree for match
 188          * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
 189          */
 190         uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
 191 
 192         /*
 193          * Probility trees for additional bits for match distance
 194          * when the distance is in the range [4, 127].
 195          */
 196         uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
 197 
 198         /*
 199          * Probability tree for the lowest four bits of a match
 200          * distance that is equal to or greater than 128.
 201          */
 202         uint16_t dist_align[ALIGN_SIZE];
 203 
 204         /* Length of a normal match */
 205         struct lzma_len_dec match_len_dec;
 206 
 207         /* Length of a repeated match */
 208         struct lzma_len_dec rep_len_dec;
 209 
 210         /* Probabilities of literals */
 211         uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
 212 };
 213 
 214 struct lzma2_dec {
 215         /* Position in xz_dec_lzma2_run(). */
 216         enum lzma2_seq {
 217                 SEQ_CONTROL,
 218                 SEQ_UNCOMPRESSED_1,
 219                 SEQ_UNCOMPRESSED_2,
 220                 SEQ_COMPRESSED_0,
 221                 SEQ_COMPRESSED_1,
 222                 SEQ_PROPERTIES,
 223                 SEQ_LZMA_PREPARE,
 224                 SEQ_LZMA_RUN,
 225                 SEQ_COPY
 226         } sequence;
 227 
 228         /* Next position after decoding the compressed size of the chunk. */
 229         enum lzma2_seq next_sequence;
 230 
 231         /* Uncompressed size of LZMA chunk (2 MiB at maximum) */
 232         uint32_t uncompressed;
 233 
 234         /*
 235          * Compressed size of LZMA chunk or compressed/uncompressed
 236          * size of uncompressed chunk (64 KiB at maximum)
 237          */
 238         uint32_t compressed;
 239 
 240         /*
 241          * True if dictionary reset is needed. This is false before
 242          * the first chunk (LZMA or uncompressed).
 243          */
 244         bool need_dict_reset;
 245 
 246         /*
 247          * True if new LZMA properties are needed. This is false
 248          * before the first LZMA chunk.
 249          */
 250         bool need_props;
 251 };
 252 
 253 struct xz_dec_lzma2 {
 254         /*
 255          * The order below is important on x86 to reduce code size and
 256          * it shouldn't hurt on other platforms. Everything up to and
 257          * including lzma.pos_mask are in the first 128 bytes on x86-32,
 258          * which allows using smaller instructions to access those
 259          * variables. On x86-64, fewer variables fit into the first 128
 260          * bytes, but this is still the best order without sacrificing
 261          * the readability by splitting the structures.
 262          */
 263         struct rc_dec rc;
 264         struct dictionary dict;
 265         struct lzma2_dec lzma2;
 266         struct lzma_dec lzma;
 267 
 268         /*
 269          * Temporary buffer which holds small number of input bytes between
 270          * decoder calls. See lzma2_lzma() for details.
 271          */
 272         struct {
 273                 uint32_t size;
 274                 uint8_t buf[3 * LZMA_IN_REQUIRED];
 275         } temp;
 276 };
 277 
 278 /**************
 279  * Dictionary *
 280  **************/
 281 
 282 /*
 283  * Reset the dictionary state. When in single-call mode, set up the beginning
 284  * of the dictionary to point to the actual output buffer.
 285  */
 286 static void dict_reset(struct dictionary *dict, struct xz_buf *b)
 287 {
 288         if (DEC_IS_SINGLE(dict->mode)) {
 289                 dict->buf = b->out + b->out_pos;
 290                 dict->end = b->out_size - b->out_pos;
 291         }
 292 
 293         dict->start = 0;
 294         dict->pos = 0;
 295         dict->limit = 0;
 296         dict->full = 0;
 297 }
 298 
 299 /* Set dictionary write limit */
 300 static void dict_limit(struct dictionary *dict, size_t out_max)
 301 {
 302         if (dict->end - dict->pos <= out_max)
 303                 dict->limit = dict->end;
 304         else
 305                 dict->limit = dict->pos + out_max;
 306 }
 307 
 308 /* Return true if at least one byte can be written into the dictionary. */
 309 static inline bool dict_has_space(const struct dictionary *dict)
 310 {
 311         return dict->pos < dict->limit;
 312 }
 313 
 314 /*
 315  * Get a byte from the dictionary at the given distance. The distance is
 316  * assumed to valid, or as a special case, zero when the dictionary is
 317  * still empty. This special case is needed for single-call decoding to
 318  * avoid writing a '\0' to the end of the destination buffer.
 319  */
 320 static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
 321 {
 322         size_t offset = dict->pos - dist - 1;
 323 
 324         if (dist >= dict->pos)
 325                 offset += dict->end;
 326 
 327         return dict->full > 0 ? dict->buf[offset] : 0;
 328 }
 329 
 330 /*
 331  * Put one byte into the dictionary. It is assumed that there is space for it.
 332  */
 333 static inline void dict_put(struct dictionary *dict, uint8_t byte)
 334 {
 335         dict->buf[dict->pos++] = byte;
 336 
 337         if (dict->full < dict->pos)
 338                 dict->full = dict->pos;
 339 }
 340 
 341 /*
 342  * Repeat given number of bytes from the given distance. If the distance is
 343  * invalid, false is returned. On success, true is returned and *len is
 344  * updated to indicate how many bytes were left to be repeated.
 345  */
 346 static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
 347 {
 348         size_t back;
 349         uint32_t left;
 350 
 351         if (dist >= dict->full || dist >= dict->size)
 352                 return false;
 353 
 354         left = min_t(size_t, dict->limit - dict->pos, *len);
 355         *len -= left;
 356 
 357         back = dict->pos - dist - 1;
 358         if (dist >= dict->pos)
 359                 back += dict->end;
 360 
 361         do {
 362                 dict->buf[dict->pos++] = dict->buf[back++];
 363                 if (back == dict->end)
 364                         back = 0;
 365         } while (--left > 0);
 366 
 367         if (dict->full < dict->pos)
 368                 dict->full = dict->pos;
 369 
 370         return true;
 371 }
 372 
 373 /* Copy uncompressed data as is from input to dictionary and output buffers. */
 374 static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
 375                               uint32_t *left)
 376 {
 377         size_t copy_size;
 378 
 379         while (*left > 0 && b->in_pos < b->in_size
 380                         && b->out_pos < b->out_size) {
 381                 copy_size = min(b->in_size - b->in_pos,
 382                                 b->out_size - b->out_pos);
 383                 if (copy_size > dict->end - dict->pos)
 384                         copy_size = dict->end - dict->pos;
 385                 if (copy_size > *left)
 386                         copy_size = *left;
 387 
 388                 *left -= copy_size;
 389 
 390                 memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
 391                 dict->pos += copy_size;
 392 
 393                 if (dict->full < dict->pos)
 394                         dict->full = dict->pos;
 395 
 396                 if (DEC_IS_MULTI(dict->mode)) {
 397                         if (dict->pos == dict->end)
 398                                 dict->pos = 0;
 399 
 400                         memcpy(b->out + b->out_pos, b->in + b->in_pos,
 401                                         copy_size);
 402                 }
 403 
 404                 dict->start = dict->pos;
 405 
 406                 b->out_pos += copy_size;
 407                 b->in_pos += copy_size;
 408         }
 409 }
 410 
 411 /*
 412  * Flush pending data from dictionary to b->out. It is assumed that there is
 413  * enough space in b->out. This is guaranteed because caller uses dict_limit()
 414  * before decoding data into the dictionary.
 415  */
 416 static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
 417 {
 418         size_t copy_size = dict->pos - dict->start;
 419 
 420         if (DEC_IS_MULTI(dict->mode)) {
 421                 if (dict->pos == dict->end)
 422                         dict->pos = 0;
 423 
 424                 memcpy(b->out + b->out_pos, dict->buf + dict->start,
 425                                 copy_size);
 426         }
 427 
 428         dict->start = dict->pos;
 429         b->out_pos += copy_size;
 430         return copy_size;
 431 }
 432 
 433 /*****************
 434  * Range decoder *
 435  *****************/
 436 
 437 /* Reset the range decoder. */
 438 static void rc_reset(struct rc_dec *rc)
 439 {
 440         rc->range = (uint32_t)-1;
 441         rc->code = 0;
 442         rc->init_bytes_left = RC_INIT_BYTES;
 443 }
 444 
 445 /*
 446  * Read the first five initial bytes into rc->code if they haven't been
 447  * read already. (Yes, the first byte gets completely ignored.)
 448  */
 449 static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b)
 450 {
 451         while (rc->init_bytes_left > 0) {
 452                 if (b->in_pos == b->in_size)
 453                         return false;
 454 
 455                 rc->code = (rc->code << 8) + b->in[b->in_pos++];
 456                 --rc->init_bytes_left;
 457         }
 458 
 459         return true;
 460 }
 461 
 462 /* Return true if there may not be enough input for the next decoding loop. */
 463 static inline bool rc_limit_exceeded(const struct rc_dec *rc)
 464 {
 465         return rc->in_pos > rc->in_limit;
 466 }
 467 
 468 /*
 469  * Return true if it is possible (from point of view of range decoder) that
 470  * we have reached the end of the LZMA chunk.
 471  */
 472 static inline bool rc_is_finished(const struct rc_dec *rc)
 473 {
 474         return rc->code == 0;
 475 }
 476 
 477 /* Read the next input byte if needed. */
 478 static __always_inline void rc_normalize(struct rc_dec *rc)
 479 {
 480         if (rc->range < RC_TOP_VALUE) {
 481                 rc->range <<= RC_SHIFT_BITS;
 482                 rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
 483         }
 484 }
 485 
 486 /*
 487  * Decode one bit. In some versions, this function has been splitted in three
 488  * functions so that the compiler is supposed to be able to more easily avoid
 489  * an extra branch. In this particular version of the LZMA decoder, this
 490  * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
 491  * on x86). Using a non-splitted version results in nicer looking code too.
 492  *
 493  * NOTE: This must return an int. Do not make it return a bool or the speed
 494  * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
 495  * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
 496  */
 497 static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
 498 {
 499         uint32_t bound;
 500         int bit;
 501 
 502         rc_normalize(rc);
 503         bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
 504         if (rc->code < bound) {
 505                 rc->range = bound;
 506                 *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
 507                 bit = 0;
 508         } else {
 509                 rc->range -= bound;
 510                 rc->code -= bound;
 511                 *prob -= *prob >> RC_MOVE_BITS;
 512                 bit = 1;
 513         }
 514 
 515         return bit;
 516 }
 517 
 518 /* Decode a bittree starting from the most significant bit. */
 519 static __always_inline uint32_t rc_bittree(struct rc_dec *rc,
 520                                            uint16_t *probs, uint32_t limit)
 521 {
 522         uint32_t symbol = 1;
 523 
 524         do {
 525                 if (rc_bit(rc, &probs[symbol]))
 526                         symbol = (symbol << 1) + 1;
 527                 else
 528                         symbol <<= 1;
 529         } while (symbol < limit);
 530 
 531         return symbol;
 532 }
 533 
 534 /* Decode a bittree starting from the least significant bit. */
 535 static __always_inline void rc_bittree_reverse(struct rc_dec *rc,
 536                                                uint16_t *probs,
 537                                                uint32_t *dest, uint32_t limit)
 538 {
 539         uint32_t symbol = 1;
 540         uint32_t i = 0;
 541 
 542         do {
 543                 if (rc_bit(rc, &probs[symbol])) {
 544                         symbol = (symbol << 1) + 1;
 545                         *dest += 1 << i;
 546                 } else {
 547                         symbol <<= 1;
 548                 }
 549         } while (++i < limit);
 550 }
 551 
 552 /* Decode direct bits (fixed fifty-fifty probability) */
 553 static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
 554 {
 555         uint32_t mask;
 556 
 557         do {
 558                 rc_normalize(rc);
 559                 rc->range >>= 1;
 560                 rc->code -= rc->range;
 561                 mask = (uint32_t)0 - (rc->code >> 31);
 562                 rc->code += rc->range & mask;
 563                 *dest = (*dest << 1) + (mask + 1);
 564         } while (--limit > 0);
 565 }
 566 
 567 /********
 568  * LZMA *
 569  ********/
 570 
 571 /* Get pointer to literal coder probability array. */
 572 static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
 573 {
 574         uint32_t prev_byte = dict_get(&s->dict, 0);
 575         uint32_t low = prev_byte >> (8 - s->lzma.lc);
 576         uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
 577         return s->lzma.literal[low + high];
 578 }
 579 
 580 /* Decode a literal (one 8-bit byte) */
 581 static void lzma_literal(struct xz_dec_lzma2 *s)
 582 {
 583         uint16_t *probs;
 584         uint32_t symbol;
 585         uint32_t match_byte;
 586         uint32_t match_bit;
 587         uint32_t offset;
 588         uint32_t i;
 589 
 590         probs = lzma_literal_probs(s);
 591 
 592         if (lzma_state_is_literal(s->lzma.state)) {
 593                 symbol = rc_bittree(&s->rc, probs, 0x100);
 594         } else {
 595                 symbol = 1;
 596                 match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
 597                 offset = 0x100;
 598 
 599                 do {
 600                         match_bit = match_byte & offset;
 601                         match_byte <<= 1;
 602                         i = offset + match_bit + symbol;
 603 
 604                         if (rc_bit(&s->rc, &probs[i])) {
 605                                 symbol = (symbol << 1) + 1;
 606                                 offset &= match_bit;
 607                         } else {
 608                                 symbol <<= 1;
 609                                 offset &= ~match_bit;
 610                         }
 611                 } while (symbol < 0x100);
 612         }
 613 
 614         dict_put(&s->dict, (uint8_t)symbol);
 615         lzma_state_literal(&s->lzma.state);
 616 }
 617 
 618 /* Decode the length of the match into s->lzma.len. */
 619 static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
 620                      uint32_t pos_state)
 621 {
 622         uint16_t *probs;
 623         uint32_t limit;
 624 
 625         if (!rc_bit(&s->rc, &l->choice)) {
 626                 probs = l->low[pos_state];
 627                 limit = LEN_LOW_SYMBOLS;
 628                 s->lzma.len = MATCH_LEN_MIN;
 629         } else {
 630                 if (!rc_bit(&s->rc, &l->choice2)) {
 631                         probs = l->mid[pos_state];
 632                         limit = LEN_MID_SYMBOLS;
 633                         s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
 634                 } else {
 635                         probs = l->high;
 636                         limit = LEN_HIGH_SYMBOLS;
 637                         s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
 638                                         + LEN_MID_SYMBOLS;
 639                 }
 640         }
 641 
 642         s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
 643 }
 644 
 645 /* Decode a match. The distance will be stored in s->lzma.rep0. */
 646 static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
 647 {
 648         uint16_t *probs;
 649         uint32_t dist_slot;
 650         uint32_t limit;
 651 
 652         lzma_state_match(&s->lzma.state);
 653 
 654         s->lzma.rep3 = s->lzma.rep2;
 655         s->lzma.rep2 = s->lzma.rep1;
 656         s->lzma.rep1 = s->lzma.rep0;
 657 
 658         lzma_len(s, &s->lzma.match_len_dec, pos_state);
 659 
 660         probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
 661         dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
 662 
 663         if (dist_slot < DIST_MODEL_START) {
 664                 s->lzma.rep0 = dist_slot;
 665         } else {
 666                 limit = (dist_slot >> 1) - 1;
 667                 s->lzma.rep0 = 2 + (dist_slot & 1);
 668 
 669                 if (dist_slot < DIST_MODEL_END) {
 670                         s->lzma.rep0 <<= limit;
 671                         probs = s->lzma.dist_special + s->lzma.rep0
 672                                         - dist_slot - 1;
 673                         rc_bittree_reverse(&s->rc, probs,
 674                                         &s->lzma.rep0, limit);
 675                 } else {
 676                         rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
 677                         s->lzma.rep0 <<= ALIGN_BITS;
 678                         rc_bittree_reverse(&s->rc, s->lzma.dist_align,
 679                                         &s->lzma.rep0, ALIGN_BITS);
 680                 }
 681         }
 682 }
 683 
 684 /*
 685  * Decode a repeated match. The distance is one of the four most recently
 686  * seen matches. The distance will be stored in s->lzma.rep0.
 687  */
 688 static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
 689 {
 690         uint32_t tmp;
 691 
 692         if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
 693                 if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
 694                                 s->lzma.state][pos_state])) {
 695                         lzma_state_short_rep(&s->lzma.state);
 696                         s->lzma.len = 1;
 697                         return;
 698                 }
 699         } else {
 700                 if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
 701                         tmp = s->lzma.rep1;
 702                 } else {
 703                         if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
 704                                 tmp = s->lzma.rep2;
 705                         } else {
 706                                 tmp = s->lzma.rep3;
 707                                 s->lzma.rep3 = s->lzma.rep2;
 708                         }
 709 
 710                         s->lzma.rep2 = s->lzma.rep1;
 711                 }
 712 
 713                 s->lzma.rep1 = s->lzma.rep0;
 714                 s->lzma.rep0 = tmp;
 715         }
 716 
 717         lzma_state_long_rep(&s->lzma.state);
 718         lzma_len(s, &s->lzma.rep_len_dec, pos_state);
 719 }
 720 
 721 /* LZMA decoder core */
 722 static bool lzma_main(struct xz_dec_lzma2 *s)
 723 {
 724         uint32_t pos_state;
 725 
 726         /*
 727          * If the dictionary was reached during the previous call, try to
 728          * finish the possibly pending repeat in the dictionary.
 729          */
 730         if (dict_has_space(&s->dict) && s->lzma.len > 0)
 731                 dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
 732 
 733         /*
 734          * Decode more LZMA symbols. One iteration may consume up to
 735          * LZMA_IN_REQUIRED - 1 bytes.
 736          */
 737         while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
 738                 pos_state = s->dict.pos & s->lzma.pos_mask;
 739 
 740                 if (!rc_bit(&s->rc, &s->lzma.is_match[
 741                                 s->lzma.state][pos_state])) {
 742                         lzma_literal(s);
 743                 } else {
 744                         if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
 745                                 lzma_rep_match(s, pos_state);
 746                         else
 747                                 lzma_match(s, pos_state);
 748 
 749                         if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
 750                                 return false;
 751                 }
 752         }
 753 
 754         /*
 755          * Having the range decoder always normalized when we are outside
 756          * this function makes it easier to correctly handle end of the chunk.
 757          */
 758         rc_normalize(&s->rc);
 759 
 760         return true;
 761 }
 762 
 763 /*
 764  * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
 765  * here, because LZMA state may be reset without resetting the dictionary.
 766  */
 767 static void lzma_reset(struct xz_dec_lzma2 *s)
 768 {
 769         uint16_t *probs;
 770         size_t i;
 771 
 772         s->lzma.state = STATE_LIT_LIT;
 773         s->lzma.rep0 = 0;
 774         s->lzma.rep1 = 0;
 775         s->lzma.rep2 = 0;
 776         s->lzma.rep3 = 0;
 777 
 778         /*
 779          * All probabilities are initialized to the same value. This hack
 780          * makes the code smaller by avoiding a separate loop for each
 781          * probability array.
 782          *
 783          * This could be optimized so that only that part of literal
 784          * probabilities that are actually required. In the common case
 785          * we would write 12 KiB less.
 786          */
 787         probs = s->lzma.is_match[0];
 788         for (i = 0; i < PROBS_TOTAL; ++i)
 789                 probs[i] = RC_BIT_MODEL_TOTAL / 2;
 790 
 791         rc_reset(&s->rc);
 792 }
 793 
 794 /*
 795  * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
 796  * from the decoded lp and pb values. On success, the LZMA decoder state is
 797  * reset and true is returned.
 798  */
 799 static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
 800 {
 801         if (props > (4 * 5 + 4) * 9 + 8)
 802                 return false;
 803 
 804         s->lzma.pos_mask = 0;
 805         while (props >= 9 * 5) {
 806                 props -= 9 * 5;
 807                 ++s->lzma.pos_mask;
 808         }
 809 
 810         s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
 811 
 812         s->lzma.literal_pos_mask = 0;
 813         while (props >= 9) {
 814                 props -= 9;
 815                 ++s->lzma.literal_pos_mask;
 816         }
 817 
 818         s->lzma.lc = props;
 819 
 820         if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
 821                 return false;
 822 
 823         s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
 824 
 825         lzma_reset(s);
 826 
 827         return true;
 828 }
 829 
 830 /*********
 831  * LZMA2 *
 832  *********/
 833 
 834 /*
 835  * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
 836  * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
 837  * wrapper function takes care of making the LZMA decoder's assumption safe.
 838  *
 839  * As long as there is plenty of input left to be decoded in the current LZMA
 840  * chunk, we decode directly from the caller-supplied input buffer until
 841  * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
 842  * s->temp.buf, which (hopefully) gets filled on the next call to this
 843  * function. We decode a few bytes from the temporary buffer so that we can
 844  * continue decoding from the caller-supplied input buffer again.
 845  */
 846 static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
 847 {
 848         size_t in_avail;
 849         uint32_t tmp;
 850 
 851         in_avail = b->in_size - b->in_pos;
 852         if (s->temp.size > 0 || s->lzma2.compressed == 0) {
 853                 tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
 854                 if (tmp > s->lzma2.compressed - s->temp.size)
 855                         tmp = s->lzma2.compressed - s->temp.size;
 856                 if (tmp > in_avail)
 857                         tmp = in_avail;
 858 
 859                 memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
 860 
 861                 if (s->temp.size + tmp == s->lzma2.compressed) {
 862                         memzero(s->temp.buf + s->temp.size + tmp,
 863                                         sizeof(s->temp.buf)
 864                                                 - s->temp.size - tmp);
 865                         s->rc.in_limit = s->temp.size + tmp;
 866                 } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
 867                         s->temp.size += tmp;
 868                         b->in_pos += tmp;
 869                         return true;
 870                 } else {
 871                         s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
 872                 }
 873 
 874                 s->rc.in = s->temp.buf;
 875                 s->rc.in_pos = 0;
 876 
 877                 if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
 878                         return false;
 879 
 880                 s->lzma2.compressed -= s->rc.in_pos;
 881 
 882                 if (s->rc.in_pos < s->temp.size) {
 883                         s->temp.size -= s->rc.in_pos;
 884                         memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
 885                                         s->temp.size);
 886                         return true;
 887                 }
 888 
 889                 b->in_pos += s->rc.in_pos - s->temp.size;
 890                 s->temp.size = 0;
 891         }
 892 
 893         in_avail = b->in_size - b->in_pos;
 894         if (in_avail >= LZMA_IN_REQUIRED) {
 895                 s->rc.in = b->in;
 896                 s->rc.in_pos = b->in_pos;
 897 
 898                 if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
 899                         s->rc.in_limit = b->in_pos + s->lzma2.compressed;
 900                 else
 901                         s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
 902 
 903                 if (!lzma_main(s))
 904                         return false;
 905 
 906                 in_avail = s->rc.in_pos - b->in_pos;
 907                 if (in_avail > s->lzma2.compressed)
 908                         return false;
 909 
 910                 s->lzma2.compressed -= in_avail;
 911                 b->in_pos = s->rc.in_pos;
 912         }
 913 
 914         in_avail = b->in_size - b->in_pos;
 915         if (in_avail < LZMA_IN_REQUIRED) {
 916                 if (in_avail > s->lzma2.compressed)
 917                         in_avail = s->lzma2.compressed;
 918 
 919                 memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
 920                 s->temp.size = in_avail;
 921                 b->in_pos += in_avail;
 922         }
 923 
 924         return true;
 925 }
 926 
 927 /*
 928  * Take care of the LZMA2 control layer, and forward the job of actual LZMA
 929  * decoding or copying of uncompressed chunks to other functions.
 930  */
 931 XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
 932                                        struct xz_buf *b)
 933 {
 934         uint32_t tmp;
 935 
 936         while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
 937                 switch (s->lzma2.sequence) {
 938                 case SEQ_CONTROL:
 939                         /*
 940                          * LZMA2 control byte
 941                          *
 942                          * Exact values:
 943                          *   0x00   End marker
 944                          *   0x01   Dictionary reset followed by
 945                          *          an uncompressed chunk
 946                          *   0x02   Uncompressed chunk (no dictionary reset)
 947                          *
 948                          * Highest three bits (s->control & 0xE0):
 949                          *   0xE0   Dictionary reset, new properties and state
 950                          *          reset, followed by LZMA compressed chunk
 951                          *   0xC0   New properties and state reset, followed
 952                          *          by LZMA compressed chunk (no dictionary
 953                          *          reset)
 954                          *   0xA0   State reset using old properties,
 955                          *          followed by LZMA compressed chunk (no
 956                          *          dictionary reset)
 957                          *   0x80   LZMA chunk (no dictionary or state reset)
 958                          *
 959                          * For LZMA compressed chunks, the lowest five bits
 960                          * (s->control & 1F) are the highest bits of the
 961                          * uncompressed size (bits 16-20).
 962                          *
 963                          * A new LZMA2 stream must begin with a dictionary
 964                          * reset. The first LZMA chunk must set new
 965                          * properties and reset the LZMA state.
 966                          *
 967                          * Values that don't match anything described above
 968                          * are invalid and we return XZ_DATA_ERROR.
 969                          */
 970                         tmp = b->in[b->in_pos++];
 971 
 972                         if (tmp == 0x00)
 973                                 return XZ_STREAM_END;
 974 
 975                         if (tmp >= 0xE0 || tmp == 0x01) {
 976                                 s->lzma2.need_props = true;
 977                                 s->lzma2.need_dict_reset = false;
 978                                 dict_reset(&s->dict, b);
 979                         } else if (s->lzma2.need_dict_reset) {
 980                                 return XZ_DATA_ERROR;
 981                         }
 982 
 983                         if (tmp >= 0x80) {
 984                                 s->lzma2.uncompressed = (tmp & 0x1F) << 16;
 985                                 s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
 986 
 987                                 if (tmp >= 0xC0) {
 988                                         /*
 989                                          * When there are new properties,
 990                                          * state reset is done at
 991                                          * SEQ_PROPERTIES.
 992                                          */
 993                                         s->lzma2.need_props = false;
 994                                         s->lzma2.next_sequence
 995                                                         = SEQ_PROPERTIES;
 996 
 997                                 } else if (s->lzma2.need_props) {
 998                                         return XZ_DATA_ERROR;
 999 
1000                                 } else {
1001                                         s->lzma2.next_sequence
1002                                                         = SEQ_LZMA_PREPARE;
1003                                         if (tmp >= 0xA0)
1004                                                 lzma_reset(s);
1005                                 }
1006                         } else {
1007                                 if (tmp > 0x02)
1008                                         return XZ_DATA_ERROR;
1009 
1010                                 s->lzma2.sequence = SEQ_COMPRESSED_0;
1011                                 s->lzma2.next_sequence = SEQ_COPY;
1012                         }
1013 
1014                         break;
1015 
1016                 case SEQ_UNCOMPRESSED_1:
1017                         s->lzma2.uncompressed
1018                                         += (uint32_t)b->in[b->in_pos++] << 8;
1019                         s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
1020                         break;
1021 
1022                 case SEQ_UNCOMPRESSED_2:
1023                         s->lzma2.uncompressed
1024                                         += (uint32_t)b->in[b->in_pos++] + 1;
1025                         s->lzma2.sequence = SEQ_COMPRESSED_0;
1026                         break;
1027 
1028                 case SEQ_COMPRESSED_0:
1029                         s->lzma2.compressed
1030                                         = (uint32_t)b->in[b->in_pos++] << 8;
1031                         s->lzma2.sequence = SEQ_COMPRESSED_1;
1032                         break;
1033 
1034                 case SEQ_COMPRESSED_1:
1035                         s->lzma2.compressed
1036                                         += (uint32_t)b->in[b->in_pos++] + 1;
1037                         s->lzma2.sequence = s->lzma2.next_sequence;
1038                         break;
1039 
1040                 case SEQ_PROPERTIES:
1041                         if (!lzma_props(s, b->in[b->in_pos++]))
1042                                 return XZ_DATA_ERROR;
1043 
1044                         s->lzma2.sequence = SEQ_LZMA_PREPARE;
1045 
1046                 /* Fall through */
1047 
1048                 case SEQ_LZMA_PREPARE:
1049                         if (s->lzma2.compressed < RC_INIT_BYTES)
1050                                 return XZ_DATA_ERROR;
1051 
1052                         if (!rc_read_init(&s->rc, b))
1053                                 return XZ_OK;
1054 
1055                         s->lzma2.compressed -= RC_INIT_BYTES;
1056                         s->lzma2.sequence = SEQ_LZMA_RUN;
1057 
1058                 /* Fall through */
1059 
1060                 case SEQ_LZMA_RUN:
1061                         /*
1062                          * Set dictionary limit to indicate how much we want
1063                          * to be encoded at maximum. Decode new data into the
1064                          * dictionary. Flush the new data from dictionary to
1065                          * b->out. Check if we finished decoding this chunk.
1066                          * In case the dictionary got full but we didn't fill
1067                          * the output buffer yet, we may run this loop
1068                          * multiple times without changing s->lzma2.sequence.
1069                          */
1070                         dict_limit(&s->dict, min_t(size_t,
1071                                         b->out_size - b->out_pos,
1072                                         s->lzma2.uncompressed));
1073                         if (!lzma2_lzma(s, b))
1074                                 return XZ_DATA_ERROR;
1075 
1076                         s->lzma2.uncompressed -= dict_flush(&s->dict, b);
1077 
1078                         if (s->lzma2.uncompressed == 0) {
1079                                 if (s->lzma2.compressed > 0 || s->lzma.len > 0
1080                                                 || !rc_is_finished(&s->rc))
1081                                         return XZ_DATA_ERROR;
1082 
1083                                 rc_reset(&s->rc);
1084                                 s->lzma2.sequence = SEQ_CONTROL;
1085 
1086                         } else if (b->out_pos == b->out_size
1087                                         || (b->in_pos == b->in_size
1088                                                 && s->temp.size
1089                                                 < s->lzma2.compressed)) {
1090                                 return XZ_OK;
1091                         }
1092 
1093                         break;
1094 
1095                 case SEQ_COPY:
1096                         dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
1097                         if (s->lzma2.compressed > 0)
1098                                 return XZ_OK;
1099 
1100                         s->lzma2.sequence = SEQ_CONTROL;
1101                         break;
1102                 }
1103         }
1104 
1105         return XZ_OK;
1106 }
1107 
1108 XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
1109                                                    uint32_t dict_max)
1110 {
1111         struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
1112         if (s == NULL)
1113                 return NULL;
1114 
1115         s->dict.mode = mode;
1116         s->dict.size_max = dict_max;
1117 
1118         if (DEC_IS_PREALLOC(mode)) {
1119                 s->dict.buf = vmalloc(dict_max);
1120                 if (s->dict.buf == NULL) {
1121                         kfree(s);
1122                         return NULL;
1123                 }
1124         } else if (DEC_IS_DYNALLOC(mode)) {
1125                 s->dict.buf = NULL;
1126                 s->dict.allocated = 0;
1127         }
1128 
1129         return s;
1130 }
1131 
1132 XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
1133 {
1134         /* This limits dictionary size to 3 GiB to keep parsing simpler. */
1135         if (props > 39)
1136                 return XZ_OPTIONS_ERROR;
1137 
1138         s->dict.size = 2 + (props & 1);
1139         s->dict.size <<= (props >> 1) + 11;
1140 
1141         if (DEC_IS_MULTI(s->dict.mode)) {
1142                 if (s->dict.size > s->dict.size_max)
1143                         return XZ_MEMLIMIT_ERROR;
1144 
1145                 s->dict.end = s->dict.size;
1146 
1147                 if (DEC_IS_DYNALLOC(s->dict.mode)) {
1148                         if (s->dict.allocated < s->dict.size) {
1149                                 s->dict.allocated = s->dict.size;
1150                                 vfree(s->dict.buf);
1151                                 s->dict.buf = vmalloc(s->dict.size);
1152                                 if (s->dict.buf == NULL) {
1153                                         s->dict.allocated = 0;
1154                                         return XZ_MEM_ERROR;
1155                                 }
1156                         }
1157                 }
1158         }
1159 
1160         s->lzma.len = 0;
1161 
1162         s->lzma2.sequence = SEQ_CONTROL;
1163         s->lzma2.need_dict_reset = true;
1164 
1165         s->temp.size = 0;
1166 
1167         return XZ_OK;
1168 }
1169 
1170 XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
1171 {
1172         if (DEC_IS_MULTI(s->dict.mode))
1173                 vfree(s->dict.buf);
1174 
1175         kfree(s);
1176 }

/* [<][>][^][v][top][bottom][index][help] */