GRASS GIS 8 Programmer's Manual 8.3.2(2024)-exported
Loading...
Searching...
No Matches
lz4.c
Go to the documentation of this file.
1/*
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2017, Yann Collet.
4
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - LZ4 homepage : http://www.lz4.org
32 - LZ4 source repository : https://github.com/lz4/lz4
33 */
34
35/*-************************************
36 * Tuning parameters
37 **************************************/
38/*
39 * LZ4_HEAPMODE :
40 * Select how default compression functions will allocate memory for their hash
41 * table, in memory stack (0:default, fastest), or in memory heap (1:requires
42 * malloc()).
43 */
44#ifndef LZ4_HEAPMODE
45#define LZ4_HEAPMODE 0
46#endif
47
48/*
49 * ACCELERATION_DEFAULT :
50 * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
51 */
52#define ACCELERATION_DEFAULT 1
53
54/*-************************************
55 * CPU Feature Detection
56 **************************************/
57/* LZ4_FORCE_MEMORY_ACCESS
58 * By default, access to unaligned memory is controlled by `memcpy()`, which is
59 * safe and portable. Unfortunately, on some target/compiler combinations, the
60 * generated assembly is sub-optimal. The below switch allow to select different
61 * access method for improved performance.
62 * Method 0 (default) : use `memcpy()`. Safe and portable.
63 * Method 1 : `__packed` statement. It depends on compiler extension
64 * (ie, not portable).
65 * This method is safe if your compiler supports it, and *generally*
66 * as fast or faster than `memcpy`.
67 * Method 2 : direct access. This method is portable but violate C standard.
68 * It can generate buggy code on targets which assembly generation
69 * depends on alignment. But in some circumstances, it's the only
70 * known way to get the most performance (ie GCC + ARMv6)
71 * See
72 * https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html
73 * for details.
74 * Prefer these methods in priority order (0 > 1 > 2)
75 */
76#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
77#if defined(__GNUC__) && \
78 (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
79 defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
80 defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
81#define LZ4_FORCE_MEMORY_ACCESS 2
82#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
83#define LZ4_FORCE_MEMORY_ACCESS 1
84#endif
85#endif
86
87/*
88 * LZ4_FORCE_SW_BITCOUNT
89 * Define this parameter if your target system or compiler does not support
90 * hardware bit count
91 */
92#if defined(_MSC_VER) && \
93 defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware \
94 bit count */
95#define LZ4_FORCE_SW_BITCOUNT
96#endif
97
98/*-************************************
99 * Dependency
100 **************************************/
101#define LZ4_STATIC_LINKING_ONLY
102#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to \
103 LZ4_decompress_safe_withPrefix64k */
104#include "lz4.h"
105/* see also "memory routines" below */
106
107/*-************************************
108 * Compiler Options
109 **************************************/
110#ifdef _MSC_VER /* Visual Studio */
111#include <intrin.h>
112#pragma warning( \
113 disable : 4127) /* disable: C4127: conditional expression is constant */
114#pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) \
115 */
116#endif /* _MSC_VER */
117
118#ifndef LZ4_FORCE_INLINE
119#ifdef _MSC_VER /* Visual Studio */
120#define LZ4_FORCE_INLINE static __forceinline
121#else
122#if defined(__cplusplus) || \
123 defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
124#ifdef __GNUC__
125#define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
126#else
127#define LZ4_FORCE_INLINE static inline
128#endif
129#else
130#define LZ4_FORCE_INLINE static
131#endif /* __STDC_VERSION__ */
132#endif /* _MSC_VER */
133#endif /* LZ4_FORCE_INLINE */
134
135/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
136 * Gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy,
137 * together with a simple 8-byte copy loop as a fall-back path.
138 * However, this optimization hurts the decompression speed by >30%,
139 * because the execution does not go to the optimized loop
140 * for typical compressible data, and all of the preamble checks
141 * before going to the fall-back path become useless overhead.
142 * This optimization happens only with the -O3 flag, and -O2 generates
143 * a simple 8-byte copy loop.
144 * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy
145 * functions are annotated with __attribute__((optimize("O2"))),
146 * and also LZ4_wildCopy is forcibly inlined, so that the O2 attribute
147 * of LZ4_wildCopy does not affect the compression speed.
148 */
149#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__)
150#define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
151#define LZ4_FORCE_O2_INLINE_GCC_PPC64LE \
152 __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
153#else
154#define LZ4_FORCE_O2_GCC_PPC64LE
155#define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
156#endif
157
158#if (defined(__GNUC__) && (__GNUC__ >= 3)) || \
159 (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \
160 defined(__clang__)
161#define expect(expr, value) (__builtin_expect((expr), (value)))
162#else
163#define expect(expr, value) (expr)
164#endif
165
166#ifndef likely
167#define likely(expr) expect((expr) != 0, 1)
168#endif
169#ifndef unlikely
170#define unlikely(expr) expect((expr) != 0, 0)
171#endif
172
173/*-************************************
174 * Memory routines
175 **************************************/
176#include <stdlib.h> /* malloc, calloc, free */
177#define ALLOC(s) malloc(s)
178#define ALLOC_AND_ZERO(s) calloc(1, s)
179#define FREEMEM(p) free(p)
180#include <string.h> /* memset, memcpy */
181#define MEM_INIT(p, v, s) memset((p), (v), (s))
182
183/*-************************************
184 * Basic Types
185 **************************************/
186#if defined(__cplusplus) || \
187 (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
188#include <stdint.h>
189typedef uint8_t BYTE;
190typedef uint16_t U16;
191typedef uint32_t U32;
192typedef int32_t S32;
193typedef uint64_t U64;
194typedef uintptr_t uptrval;
195#else
196typedef unsigned char BYTE;
197typedef unsigned short U16;
198typedef unsigned int U32;
199typedef signed int S32;
200typedef unsigned long long U64;
201typedef size_t uptrval; /* generally true, except OpenVMS-64 */
202#endif
203
204#if defined(__x86_64__)
205typedef U64 reg_t; /* 64-bits in x32 mode */
206#else
207typedef size_t reg_t; /* 32-bits in x32 mode */
208#endif
209
210/*-************************************
211 * Reading and writing into memory
212 **************************************/
213static unsigned LZ4_isLittleEndian(void)
214{
215 const union {
216 U32 u;
217 BYTE c[4];
218 } one = {1}; /* don't use static : performance detrimental */
219 return one.c[0];
220}
221
222#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 2)
223/* lie to the compiler about data alignment; use with caution */
224
225static U16 LZ4_read16(const void *memPtr)
226{
227 return *(const U16 *)memPtr;
228}
229
230static U32 LZ4_read32(const void *memPtr)
231{
232 return *(const U32 *)memPtr;
233}
234
235static reg_t LZ4_read_ARCH(const void *memPtr)
236{
237 return *(const reg_t *)memPtr;
238}
239
240static void LZ4_write16(void *memPtr, U16 value)
241{
242 *(U16 *)memPtr = value;
243}
244
245static void LZ4_write32(void *memPtr, U32 value)
246{
247 *(U32 *)memPtr = value;
248}
249
250#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 1)
251
252/* __pack instructions are safer, but compiler specific, hence potentially
253 * problematic for some compilers */
254/* currently only defined for gcc and icc */
255typedef union {
256 U16 u16;
257 U32 u32;
258 reg_t uArch;
259} __attribute__((packed)) unalign;
260
261static U16 LZ4_read16(const void *ptr)
262{
263 return ((const unalign *)ptr)->u16;
264}
265
266static U32 LZ4_read32(const void *ptr)
267{
268 return ((const unalign *)ptr)->u32;
269}
270
271static reg_t LZ4_read_ARCH(const void *ptr)
272{
273 return ((const unalign *)ptr)->uArch;
274}
275
276static void LZ4_write16(void *memPtr, U16 value)
277{
278 ((unalign *)memPtr)->u16 = value;
279}
280
281static void LZ4_write32(void *memPtr, U32 value)
282{
283 ((unalign *)memPtr)->u32 = value;
284}
285
286#else /* safe and portable access through memcpy() */
287
288static U16 LZ4_read16(const void *memPtr)
289{
290 U16 val;
291
292 memcpy(&val, memPtr, sizeof(val));
293 return val;
294}
295
296static U32 LZ4_read32(const void *memPtr)
297{
298 U32 val;
299
300 memcpy(&val, memPtr, sizeof(val));
301 return val;
302}
303
304static reg_t LZ4_read_ARCH(const void *memPtr)
305{
306 reg_t val;
307
308 memcpy(&val, memPtr, sizeof(val));
309 return val;
310}
311
312static void LZ4_write16(void *memPtr, U16 value)
313{
314 memcpy(memPtr, &value, sizeof(value));
315}
316
317static void LZ4_write32(void *memPtr, U32 value)
318{
319 memcpy(memPtr, &value, sizeof(value));
320}
321
322#endif /* LZ4_FORCE_MEMORY_ACCESS */
323
324static U16 LZ4_readLE16(const void *memPtr)
325{
326 if (LZ4_isLittleEndian()) {
327 return LZ4_read16(memPtr);
328 }
329 else {
330 const BYTE *p = (const BYTE *)memPtr;
331
332 return (U16)((U16)p[0] + (p[1] << 8));
333 }
334}
335
336static void LZ4_writeLE16(void *memPtr, U16 value)
337{
338 if (LZ4_isLittleEndian()) {
339 LZ4_write16(memPtr, value);
340 }
341 else {
342 BYTE *p = (BYTE *)memPtr;
343
344 p[0] = (BYTE)value;
345 p[1] = (BYTE)(value >> 8);
346 }
347}
348
349/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd
350 */
352void LZ4_wildCopy(void *dstPtr, const void *srcPtr, void *dstEnd)
353{
354 BYTE *d = (BYTE *)dstPtr;
355 const BYTE *s = (const BYTE *)srcPtr;
356 BYTE *const e = (BYTE *)dstEnd;
357
358 do {
359 memcpy(d, s, 8);
360 d += 8;
361 s += 8;
362 } while (d < e);
363}
364
365/*-************************************
366 * Common Constants
367 **************************************/
368#define MINMATCH 4
370#define WILDCOPYLENGTH 8
371#define LASTLITERALS 5
372#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
373static const int LZ4_minLength = (MFLIMIT + 1);
375#define KB *(1 << 10)
376#define MB *(1 << 20)
377#define GB *(1U << 30)
379#define MAXD_LOG 16
380#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
382#define ML_BITS 4
383#define ML_MASK ((1U << ML_BITS) - 1)
384#define RUN_BITS (8 - ML_BITS)
385#define RUN_MASK ((1U << RUN_BITS) - 1)
386
387/*-************************************
388 * Error detection
389 **************************************/
390#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 1)
391#include <assert.h>
392#else
393#ifndef assert
394#define assert(condition) ((void)0)
395#endif
396#endif
398#define LZ4_STATIC_ASSERT(c) \
399 { \
400 enum { LZ4_static_assert = 1 / (int)(!!(c)) }; \
401 } /* use after variable declarations */
402
403#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2)
404#include <stdio.h>
405static int g_debuglog_enable = 1;
406
407#define DEBUGLOG(l, ...) \
408 { \
409 if ((g_debuglog_enable) && (l <= LZ4_DEBUG)) { \
410 fprintf(stderr, __FILE__ ": "); \
411 fprintf(stderr, __VA_ARGS__); \
412 fprintf(stderr, " \n"); \
413 } \
414 }
415#else
416#define DEBUGLOG(l, ...) \
417 { \
418 } /* disabled */
419#endif
420
421/*-************************************
422 * Common functions
423 **************************************/
424static unsigned LZ4_NbCommonBytes(reg_t val)
425{
426 if (LZ4_isLittleEndian()) {
427 if (sizeof(val) == 8) {
428#if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
429 unsigned long r = 0;
430
431 _BitScanForward64(&r, (U64)val);
432 return (int)(r >> 3);
433#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \
434 !defined(LZ4_FORCE_SW_BITCOUNT)
435 return (__builtin_ctzll((U64)val) >> 3);
436#else
437 static const int DeBruijnBytePos[64] = {
438 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7,
439 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7,
440 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6,
441 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7};
442 return DeBruijnBytePos[((U64)((val & -(long long)val) *
443 0x0218A392CDABBD3FULL)) >>
444 58];
445#endif
446 }
447 else { /* 32 bits */
448#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
449 unsigned long r;
450
451 _BitScanForward(&r, (U32)val);
452 return (int)(r >> 3);
453#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \
454 !defined(LZ4_FORCE_SW_BITCOUNT)
455 return (__builtin_ctz((U32)val) >> 3);
456#else
457 static const int DeBruijnBytePos[32] = {
458 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1,
459 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1};
460 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >>
461 27];
462#endif
463 }
464 }
465 else { /* Big Endian CPU */
466 if (sizeof(val) == 8) { /* 64-bits */
467#if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
468 unsigned long r = 0;
469
470 _BitScanReverse64(&r, val);
471 return (unsigned)(r >> 3);
472#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \
473 !defined(LZ4_FORCE_SW_BITCOUNT)
474 return (__builtin_clzll((U64)val) >> 3);
475#else
476 static const U32 by32 =
477 sizeof(val) *
478 4; /* 32 on 64 bits (goal), 16 on 32 bits.
479 Just to avoid some static analyzer complaining about shift
480 by 32 on 32-bits target. Note that this code path is never
481 triggered in 32-bits mode. */
482 unsigned r;
483
484 if (!(val >> by32)) {
485 r = 4;
486 }
487 else {
488 r = 0;
489 val >>= by32;
490 }
491 if (!(val >> 16)) {
492 r += 2;
493 val >>= 8;
494 }
495 else {
496 val >>= 24;
497 }
498 r += (!val);
499 return r;
500#endif
501 }
502 else { /* 32 bits */
503#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
504 unsigned long r = 0;
505
506 _BitScanReverse(&r, (unsigned long)val);
507 return (unsigned)(r >> 3);
508#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \
509 !defined(LZ4_FORCE_SW_BITCOUNT)
510 return (__builtin_clz((U32)val) >> 3);
511#else
512 unsigned r;
513
514 if (!(val >> 16)) {
515 r = 2;
516 val >>= 8;
517 }
518 else {
519 r = 0;
520 val >>= 24;
521 }
522 r += (!val);
523 return r;
524#endif
525 }
526 }
527}
529#define STEPSIZE sizeof(reg_t)
531unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
532{
533 const BYTE *const pStart = pIn;
534
535 if (likely(pIn < pInLimit - (STEPSIZE - 1))) {
536 reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
537
538 if (!diff) {
539 pIn += STEPSIZE;
540 pMatch += STEPSIZE;
541 }
542 else {
543 return LZ4_NbCommonBytes(diff);
544 }
545 }
546
547 while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
548 reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
549
550 if (!diff) {
551 pIn += STEPSIZE;
552 pMatch += STEPSIZE;
553 continue;
554 }
555 pIn += LZ4_NbCommonBytes(diff);
556 return (unsigned)(pIn - pStart);
557 }
558
559 if ((STEPSIZE == 8) && (pIn < (pInLimit - 3)) &&
560 (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
561 pIn += 4;
562 pMatch += 4;
563 }
564 if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
565 pIn += 2;
566 pMatch += 2;
567 }
568 if ((pIn < pInLimit) && (*pMatch == *pIn))
569 pIn++;
570 return (unsigned)(pIn - pStart);
571}
572
573#ifndef LZ4_COMMONDEFS_ONLY
574
575/*-************************************
576 * Local Constants
577 **************************************/
578static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT - 1));
579static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run
580 slower on incompressible data */
581
582/*-************************************
583 * Local Structures and types
584 **************************************/
585typedef enum {
587 limitedOutput = 1,
590typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
591
592/**
593 * This enum distinguishes several different modes of accessing previous
594 * content in the stream.
595 *
596 * - noDict : There is no preceding content.
597 * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
598 * blob being compressed are valid and refer to the preceding
599 * content (of length ctx->dictSize), which is available
600 * contiguously preceding in memory the content currently
601 * being compressed.
602 * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
603 * else in memory, starting at ctx->dictionary with length
604 * ctx->dictSize.
605 * - usingDictCtx : Like usingExtDict, but everything concerning the preceding
606 * content is in a separate context, pointed to by
607 * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
608 * entries in the current context that refer to positions
609 * preceding the beginning of the current compression are
610 * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
611 * ->dictSize describe the location and size of the preceding
612 * content, and matches are found by looking in the ctx
613 * ->dictCtx->hashTable.
621typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
624typedef enum { full = 0, partial = 1 } earlyEnd_directive;
625
626/*-************************************
627 * Local Utils
628 **************************************/
629int LZ4_versionNumber(void)
630{
631 return LZ4_VERSION_NUMBER;
632}
634const char *LZ4_versionString(void)
635{
636 return LZ4_VERSION_STRING;
637}
639int LZ4_compressBound(int isize)
640{
641 return LZ4_COMPRESSBOUND(isize);
642}
644int LZ4_sizeofState(void)
645{
646 return LZ4_STREAMSIZE;
647}
648
649/*-******************************
650 * Compression functions
651 ********************************/
652static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
653{
654 if (tableType == byU16)
655 return ((sequence * 2654435761U) >>
656 ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
657 else
658 return ((sequence * 2654435761U) >> ((MINMATCH * 8) - LZ4_HASHLOG));
659}
660
661static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
662{
663 static const U64 prime5bytes = 889523592379ULL;
664 static const U64 prime8bytes = 11400714785074694791ULL;
665 const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG;
666
667 if (LZ4_isLittleEndian())
668 return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
669 else
670 return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
671}
673LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void *const p,
674 tableType_t const tableType)
675{
676 if ((sizeof(reg_t) == 8) && (tableType != byU16))
677 return LZ4_hash5(LZ4_read_ARCH(p), tableType);
678 return LZ4_hash4(LZ4_read32(p), tableType);
679}
680
681static void LZ4_putIndexOnHash(U32 idx, U32 h, void *tableBase,
682 tableType_t const tableType)
683{
684 switch (tableType) {
685 default: /* fallthrough */
686 case clearedTable: /* fallthrough */
687 case byPtr: { /* illegal! */
688 assert(0);
689 return;
690 }
691 case byU32: {
692 U32 *hashTable = (U32 *)tableBase;
693
694 hashTable[h] = idx;
695 return;
696 }
697 case byU16: {
698 U16 *hashTable = (U16 *)tableBase;
699
700 assert(idx < 65536);
701 hashTable[h] = (U16)idx;
702 return;
703 }
704 }
705}
706
707static void LZ4_putPositionOnHash(const BYTE *p, U32 h, void *tableBase,
708 tableType_t const tableType,
709 const BYTE *srcBase)
710{
711 switch (tableType) {
712 case clearedTable: { /* illegal! */
713 assert(0);
714 return;
715 }
716 case byPtr: {
717 const BYTE **hashTable = (const BYTE **)tableBase;
718
719 hashTable[h] = p;
720 return;
721 }
722 case byU32: {
723 U32 *hashTable = (U32 *)tableBase;
724
725 hashTable[h] = (U32)(p - srcBase);
726 return;
727 }
728 case byU16: {
729 U16 *hashTable = (U16 *)tableBase;
730
731 hashTable[h] = (U16)(p - srcBase);
732 return;
733 }
734 }
735}
737LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase,
738 tableType_t tableType,
739 const BYTE *srcBase)
740{
741 U32 const h = LZ4_hashPosition(p, tableType);
742
743 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
744}
745
746/* LZ4_getIndexOnHash() :
747 * Index of match position registered in hash table.
748 * hash position must be calculated by using base+index, or dictBase+index.
749 * Assumption 1 : only valid if tableType == byU32 or byU16.
750 * Assumption 2 : h is presumed valid (within limits of hash table)
751 */
752static U32 LZ4_getIndexOnHash(U32 h, const void *tableBase,
753 tableType_t tableType)
754{
756 if (tableType == byU32) {
757 const U32 *const hashTable = (const U32 *)tableBase;
758
759 assert(h < (1U << (LZ4_MEMORY_USAGE - 2)));
760 return hashTable[h];
761 }
762 if (tableType == byU16) {
763 const U16 *const hashTable = (const U16 *)tableBase;
764
765 assert(h < (1U << (LZ4_MEMORY_USAGE - 1)));
766 return hashTable[h];
767 }
768 assert(0);
769 return 0; /* forbidden case */
770}
771
772static const BYTE *LZ4_getPositionOnHash(U32 h, const void *tableBase,
773 tableType_t tableType,
774 const BYTE *srcBase)
775{
776 if (tableType == byPtr) {
777 const BYTE *const *hashTable = (const BYTE *const *)tableBase;
778
779 return hashTable[h];
780 }
781 if (tableType == byU32) {
782 const U32 *const hashTable = (const U32 *)tableBase;
783
784 return hashTable[h] + srcBase;
785 }
786 {
787 const U16 *const hashTable = (const U16 *)tableBase;
788
789 return hashTable[h] + srcBase;
790 } /* default, to ensure a return */
791}
794 const void *tableBase,
795 tableType_t tableType,
796 const BYTE *srcBase)
797{
798 U32 const h = LZ4_hashPosition(p, tableType);
799
800 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
801}
804 const int inputSize,
805 const tableType_t tableType)
806{
807 /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
808 * therefore safe to use no matter what mode we're in. Otherwise, we figure
809 * out if it's safe to leave as is or whether it needs to be reset.
810 */
811 if (cctx->tableType != clearedTable) {
812 if (cctx->tableType != tableType ||
813 (tableType == byU16 &&
814 cctx->currentOffset + inputSize >= 0xFFFFU) ||
815 (tableType == byU32 && cctx->currentOffset > 1 GB) ||
816 tableType == byPtr || inputSize >= 4 KB) {
817 DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
819 cctx->currentOffset = 0;
820 cctx->tableType = clearedTable;
821 }
822 else {
823 DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
824 }
825 }
826
827 /* Adding a gap, so all previous entries are > MAX_DISTANCE back, is faster
828 * than compressing without a gap. However, compressing with
829 * currentOffset == 0 is faster still, so we preserve that case.
830 */
831 if (cctx->currentOffset != 0 && tableType == byU32) {
832 DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
833 cctx->currentOffset += 64 KB;
834 }
835
836 /* Finally, clear history */
837 cctx->dictCtx = NULL;
838 cctx->dictionary = NULL;
839 cctx->dictSize = 0;
840}
841
842/** LZ4_compress_generic() :
843 inlined, to ensure branches are decided at compilation time */
845 LZ4_stream_t_internal *const cctx, const char *const source,
846 char *const dest, const int inputSize,
847 int *inputConsumed, /* only written when outputLimited == fillOutput */
848 const int maxOutputSize, const limitedOutput_directive outputLimited,
849 const tableType_t tableType, const dict_directive dictDirective,
850 const dictIssue_directive dictIssue, const U32 acceleration)
851{
852 const BYTE *ip = (const BYTE *)source;
853
854 U32 const startIndex = cctx->currentOffset;
855 const BYTE *base = (const BYTE *)source - startIndex;
856 const BYTE *lowLimit;
857
858 const LZ4_stream_t_internal *dictCtx =
859 (const LZ4_stream_t_internal *)cctx->dictCtx;
860 const BYTE *const dictionary =
861 dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
862 const U32 dictSize =
863 dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
864 const U32 dictDelta = (dictDirective == usingDictCtx)
865 ? startIndex - dictCtx->currentOffset
866 : 0; /* make indexes in dictCtx comparable with
867 index in current context */
868
869 int const maybe_extMem =
870 (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
871 U32 const prefixIdxLimit =
872 startIndex - dictSize; /* used when dictDirective == dictSmall */
873 const BYTE *const dictEnd = dictionary + dictSize;
874 const BYTE *anchor = (const BYTE *)source;
875 const BYTE *const iend = ip + inputSize;
876 const BYTE *const mflimitPlusOne = iend - MFLIMIT + 1;
877 const BYTE *const matchlimit = iend - LASTLITERALS;
878
879 /* the dictCtx currentOffset is indexed on the start of the dictionary,
880 * while a dictionary in the current context precedes the currentOffset */
881 const BYTE *dictBase = dictDirective == usingDictCtx
882 ? dictionary + dictSize - dictCtx->currentOffset
883 : dictionary + dictSize - startIndex;
884
885 BYTE *op = (BYTE *)dest;
886 BYTE *const olimit = op + maxOutputSize;
887
888 U32 offset = 0;
889 U32 forwardH;
890
891 DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize,
892 tableType);
893 /* Init conditions */
894 if (outputLimited == fillOutput && maxOutputSize < 1)
895 return 0; /* Impossible to store anything */
896 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE)
897 return 0; /* Unsupported inputSize, too large (or negative) */
898 if ((tableType == byU16) && (inputSize >= LZ4_64Klimit))
899 return 0; /* Size too large (not within 64K limit) */
900 if (tableType == byPtr)
901 assert(dictDirective ==
902 noDict); /* only supported use case with byPtr */
903 assert(acceleration >= 1);
904
905 lowLimit =
906 (const BYTE *)source - (dictDirective == withPrefix64k ? dictSize : 0);
907
908 /* Update context state */
909 if (dictDirective == usingDictCtx) {
910 /* Subsequent linked blocks can't use the dictionary. */
911 /* Instead, they use the block we just compressed. */
912 cctx->dictCtx = NULL;
913 cctx->dictSize = (U32)inputSize;
914 }
915 else {
916 cctx->dictSize += (U32)inputSize;
917 }
918 cctx->currentOffset += (U32)inputSize;
919 cctx->tableType = tableType;
920
921 if (inputSize < LZ4_minLength)
922 goto _last_literals; /* Input too small, no compression (all literals)
923 */
924
925 /* First Byte */
926 LZ4_putPosition(ip, cctx->hashTable, tableType, base);
927 ip++;
928 forwardH = LZ4_hashPosition(ip, tableType);
929
930 /* Main Loop */
931 for (;;) {
932 const BYTE *match;
933 BYTE *token;
934
935 /* Find a match */
936 if (tableType == byPtr) {
937 const BYTE *forwardIp = ip;
938 unsigned step = 1;
939 unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
940
941 do {
942 U32 const h = forwardH;
943
944 ip = forwardIp;
945 forwardIp += step;
946 step = (searchMatchNb++ >> LZ4_skipTrigger);
947
948 if (unlikely(forwardIp > mflimitPlusOne))
949 goto _last_literals;
950 assert(ip < mflimitPlusOne);
951
952 match =
953 LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
954 forwardH = LZ4_hashPosition(forwardIp, tableType);
955 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
956
957 } while ((match + MAX_DISTANCE < ip) ||
958 (LZ4_read32(match) != LZ4_read32(ip)));
959 }
960 else { /* byU32, byU16 */
961
962 const BYTE *forwardIp = ip;
963 unsigned step = 1;
964 unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
965
966 do {
967 U32 const h = forwardH;
968 U32 const current = (U32)(forwardIp - base);
969 U32 matchIndex =
970 LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
971 assert(matchIndex <= current);
972 assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
973 ip = forwardIp;
974 forwardIp += step;
975 step = (searchMatchNb++ >> LZ4_skipTrigger);
976
977 if (unlikely(forwardIp > mflimitPlusOne))
978 goto _last_literals;
979 assert(ip < mflimitPlusOne);
980
981 if (dictDirective == usingDictCtx) {
982 if (matchIndex < startIndex) {
983 /* there was no match, try the dictionary */
984 assert(tableType == byU32);
985 matchIndex =
986 LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
987 match = dictBase + matchIndex;
988 matchIndex +=
989 dictDelta; /* make dictCtx index comparable with
990 current context */
991 lowLimit = dictionary;
992 }
993 else {
994 match = base + matchIndex;
995 lowLimit = (const BYTE *)source;
996 }
997 }
998 else if (dictDirective == usingExtDict) {
999 if (matchIndex < startIndex) {
1000 DEBUGLOG(7,
1001 "extDict candidate: matchIndex=%5u < "
1002 "startIndex=%5u",
1003 matchIndex, startIndex);
1004 assert(startIndex - matchIndex >= MINMATCH);
1005 match = dictBase + matchIndex;
1006 lowLimit = dictionary;
1007 }
1008 else {
1009 match = base + matchIndex;
1010 lowLimit = (const BYTE *)source;
1011 }
1012 }
1013 else { /* single continuous memory segment */
1014 match = base + matchIndex;
1015 }
1016 forwardH = LZ4_hashPosition(forwardIp, tableType);
1017 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1018
1019 if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit))
1020 continue; /* match outside of valid area */
1021 assert(matchIndex < current);
1022 if ((tableType != byU16) &&
1023 (matchIndex + MAX_DISTANCE < current))
1024 continue; /* too far */
1025 if (tableType == byU16)
1026 assert((current - matchIndex) <=
1027 MAX_DISTANCE); /* too_far presumed impossible with
1028 byU16 */
1029
1030 if (LZ4_read32(match) == LZ4_read32(ip)) {
1031 if (maybe_extMem)
1032 offset = current - matchIndex;
1033 break; /* match found */
1034 }
1035
1036 } while (1);
1037 }
1038
1039 /* Catch up */
1040 while (((ip > anchor) & (match > lowLimit)) &&
1041 (unlikely(ip[-1] == match[-1]))) {
1042 ip--;
1043 match--;
1044 }
1045
1046 /* Encode Literals */
1047 {
1048 unsigned const litLength = (unsigned)(ip - anchor);
1049
1050 token = op++;
1051 if ((outputLimited ==
1052 limitedOutput) && /* Check output buffer overflow */
1053 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) +
1054 (litLength / 255) >
1055 olimit)))
1056 return 0;
1057 if ((outputLimited == fillOutput) &&
1058 (unlikely(
1059 op + (litLength + 240) / 255 /* litlen */ +
1060 litLength /* literals */ + 2 /* offset */ +
1061 1 /* token */ + MFLIMIT - MINMATCH
1062 /* min last literals so last match is <= end - MFLIMIT */
1063 > olimit))) {
1064 op--;
1065 goto _last_literals;
1066 }
1067 if (litLength >= RUN_MASK) {
1068 int len = (int)litLength - RUN_MASK;
1069
1070 *token = (RUN_MASK << ML_BITS);
1071 for (; len >= 255; len -= 255)
1072 *op++ = 255;
1073 *op++ = (BYTE)len;
1074 }
1075 else
1076 *token = (BYTE)(litLength << ML_BITS);
1077
1078 /* Copy Literals */
1079 LZ4_wildCopy(op, anchor, op + litLength);
1080 op += litLength;
1081 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1082 (int)(anchor - (const BYTE *)source), litLength,
1083 (int)(ip - (const BYTE *)source));
1084 }
1085
1086 _next_match:
1087 /* at this stage, the following variables must be correctly set :
1088 * - ip : at start of LZ operation
1089 * - match : at start of previous pattern occurrence; can be within
1090 * current prefix, or within extDict
1091 * - offset : if maybe_ext_memSegment==1 (constant)
1092 * - lowLimit : must be == dictionary to mean "match is within extDict";
1093 * must be == source otherwise
1094 * - token and *token : position to write 4-bits for match length;
1095 * higher 4-bits for literal length supposed already written
1096 */
1097
1098 if ((outputLimited == fillOutput) &&
1099 (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH
1100 /* min last literals so last match is <= end - MFLIMIT */
1101 > olimit)) {
1102 /* the match was too close to the end, rewind and go to last
1103 * literals */
1104 op = token;
1105 goto _last_literals;
1106 }
1107
1108 /* Encode Offset */
1109 if (maybe_extMem) { /* static test */
1110 DEBUGLOG(6, " with offset=%u (ext if > %i)", offset,
1111 (int)(ip - (const BYTE *)source));
1112 assert(offset <= MAX_DISTANCE && offset > 0);
1113 LZ4_writeLE16(op, (U16)offset);
1114 op += 2;
1115 }
1116 else {
1117 DEBUGLOG(6, " with offset=%u (same segment)",
1118 (U32)(ip - match));
1119 assert(ip - match <= MAX_DISTANCE);
1120 LZ4_writeLE16(op, (U16)(ip - match));
1121 op += 2;
1122 }
1123
1124 /* Encode MatchLength */
1125 {
1126 unsigned matchCode;
1127
1128 if ((dictDirective == usingExtDict ||
1129 dictDirective == usingDictCtx) &&
1130 (lowLimit == dictionary) /* match within extDict */) {
1131 const BYTE *limit = ip + (dictEnd - match);
1132
1133 assert(dictEnd > match);
1134 if (limit > matchlimit)
1135 limit = matchlimit;
1136 matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, limit);
1137 ip += MINMATCH + matchCode;
1138 if (ip == limit) {
1139 unsigned const more =
1140 LZ4_count(limit, (const BYTE *)source, matchlimit);
1141 matchCode += more;
1142 ip += more;
1143 }
1144 DEBUGLOG(6,
1145 " with matchLength=%u starting in extDict",
1146 matchCode + MINMATCH);
1147 }
1148 else {
1149 matchCode =
1150 LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit);
1151 ip += MINMATCH + matchCode;
1152 DEBUGLOG(6, " with matchLength=%u",
1153 matchCode + MINMATCH);
1154 }
1155
1156 if ((outputLimited) && /* Check output buffer overflow */
1157 (unlikely(op + (1 + LASTLITERALS) + (matchCode >> 8) >
1158 olimit))) {
1159 if (outputLimited == limitedOutput)
1160 return 0;
1161 if (outputLimited == fillOutput) {
1162 /* Match description too long : reduce it */
1163 U32 newMatchCode =
1164 15 /* in token */ -
1165 1 /* to avoid needing a zero byte */ +
1166 ((U32)(olimit - op) - 2 - 1 - LASTLITERALS) * 255;
1167 ip -= matchCode - newMatchCode;
1168 matchCode = newMatchCode;
1169 }
1170 }
1171 if (matchCode >= ML_MASK) {
1172 *token += ML_MASK;
1173 matchCode -= ML_MASK;
1174 LZ4_write32(op, 0xFFFFFFFF);
1175 while (matchCode >= 4 * 255) {
1176 op += 4;
1177 LZ4_write32(op, 0xFFFFFFFF);
1178 matchCode -= 4 * 255;
1179 }
1180 op += matchCode / 255;
1181 *op++ = (BYTE)(matchCode % 255);
1182 }
1183 else
1184 *token += (BYTE)(matchCode);
1185 }
1186
1187 anchor = ip;
1188
1189 /* Test end of chunk */
1190 if (ip >= mflimitPlusOne)
1191 break;
1192
1193 /* Fill table */
1194 LZ4_putPosition(ip - 2, cctx->hashTable, tableType, base);
1195
1196 /* Test next position */
1197 if (tableType == byPtr) {
1198
1199 match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
1200 LZ4_putPosition(ip, cctx->hashTable, tableType, base);
1201 if ((match + MAX_DISTANCE >= ip) &&
1202 (LZ4_read32(match) == LZ4_read32(ip))) {
1203 token = op++;
1204 *token = 0;
1205 goto _next_match;
1206 }
1207 }
1208 else { /* byU32, byU16 */
1209
1210 U32 const h = LZ4_hashPosition(ip, tableType);
1211 U32 const current = (U32)(ip - base);
1212 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1213 assert(matchIndex < current);
1214 if (dictDirective == usingDictCtx) {
1215 if (matchIndex < startIndex) {
1216 /* there was no match, try the dictionary */
1217 matchIndex =
1218 LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1219 match = dictBase + matchIndex;
1220 lowLimit =
1221 dictionary; /* required for match length counter */
1222 matchIndex += dictDelta;
1223 }
1224 else {
1225 match = base + matchIndex;
1226 lowLimit = (const BYTE *)
1227 source; /* required for match length counter */
1228 }
1229 }
1230 else if (dictDirective == usingExtDict) {
1231 if (matchIndex < startIndex) {
1232 match = dictBase + matchIndex;
1233 lowLimit =
1234 dictionary; /* required for match length counter */
1235 }
1236 else {
1237 match = base + matchIndex;
1238 lowLimit = (const BYTE *)
1239 source; /* required for match length counter */
1240 }
1241 }
1242 else { /* single memory segment */
1243 match = base + matchIndex;
1244 }
1245 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1246 assert(matchIndex < current);
1247 if (((dictIssue == dictSmall) ? (matchIndex >= prefixIdxLimit)
1248 : 1) &&
1249 ((tableType == byU16)
1250 ? 1
1251 : (matchIndex + MAX_DISTANCE >= current)) &&
1252 (LZ4_read32(match) == LZ4_read32(ip))) {
1253 token = op++;
1254 *token = 0;
1255 if (maybe_extMem)
1256 offset = current - matchIndex;
1257 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1258 (int)(anchor - (const BYTE *)source), 0,
1259 (int)(ip - (const BYTE *)source));
1260 goto _next_match;
1261 }
1262 }
1263
1264 /* Prepare next loop */
1265 forwardH = LZ4_hashPosition(++ip, tableType);
1266 }
1267
1268_last_literals:
1269 /* Encode Last Literals */
1270 {
1271 size_t lastRun = (size_t)(iend - anchor);
1272
1273 if ((outputLimited) && /* Check output buffer overflow */
1274 (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) > olimit)) {
1275 if (outputLimited == fillOutput) {
1276 /* adapt lastRun to fill 'dst' */
1277 lastRun = (olimit - op) - 1;
1278 lastRun -= (lastRun + 240) / 255;
1279 }
1280 if (outputLimited == limitedOutput)
1281 return 0;
1282 }
1283 if (lastRun >= RUN_MASK) {
1284 size_t accumulator = lastRun - RUN_MASK;
1285
1286 *op++ = RUN_MASK << ML_BITS;
1287 for (; accumulator >= 255; accumulator -= 255)
1288 *op++ = 255;
1289 *op++ = (BYTE)accumulator;
1290 }
1291 else {
1292 *op++ = (BYTE)(lastRun << ML_BITS);
1293 }
1294 memcpy(op, anchor, lastRun);
1295 ip = anchor + lastRun;
1296 op += lastRun;
1297 }
1298
1299 if (outputLimited == fillOutput) {
1300 *inputConsumed = (int)(((const char *)ip) - source);
1301 }
1302 DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes",
1303 inputSize, (int)(((char *)op) - dest));
1304 return (int)(((char *)op) - dest);
1305}
1307int LZ4_compress_fast_extState(void *state, const char *source, char *dest,
1308 int inputSize, int maxOutputSize,
1309 int acceleration)
1310{
1311 LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
1312
1313 if (acceleration < 1)
1314 acceleration = ACCELERATION_DEFAULT;
1315 LZ4_resetStream((LZ4_stream_t *)state);
1316 if (maxOutputSize >= LZ4_compressBound(inputSize)) {
1317 if (inputSize < LZ4_64Klimit) {
1318 return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0,
1320 acceleration);
1321 }
1322 else {
1323 const tableType_t tableType =
1324 ((sizeof(void *) == 4) && ((uptrval)source > MAX_DISTANCE))
1325 ? byPtr
1326 : byU32;
1327 return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0,
1328 notLimited, tableType, noDict,
1329 noDictIssue, acceleration);
1330 }
1331 }
1332 else {
1333 if (inputSize < LZ4_64Klimit) {
1334 ;
1335 return LZ4_compress_generic(ctx, source, dest, inputSize, NULL,
1336 maxOutputSize, limitedOutput, byU16,
1337 noDict, noDictIssue, acceleration);
1338 }
1339 else {
1340 const tableType_t tableType =
1341 ((sizeof(void *) == 4) && ((uptrval)source > MAX_DISTANCE))
1342 ? byPtr
1343 : byU32;
1344 return LZ4_compress_generic(ctx, source, dest, inputSize, NULL,
1345 maxOutputSize, limitedOutput, tableType,
1346 noDict, noDictIssue, acceleration);
1347 }
1348 }
1349}
1350
1351/**
1352 * LZ4_compress_fast_extState_fastReset() :
1353 * A variant of LZ4_compress_fast_extState().
1354 *
1355 * Using this variant avoids an expensive initialization step. It is only safe
1356 * to call if the state buffer is known to be correctly initialized already
1357 * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
1358 * "correctly initialized").
1360int LZ4_compress_fast_extState_fastReset(void *state, const char *src,
1361 char *dst, int srcSize,
1362 int dstCapacity, int acceleration)
1363{
1364 LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
1365
1366 if (acceleration < 1)
1367 acceleration = ACCELERATION_DEFAULT;
1368
1369 if (dstCapacity >= LZ4_compressBound(srcSize)) {
1370 if (srcSize < LZ4_64Klimit) {
1371 const tableType_t tableType = byU16;
1372
1373 LZ4_prepareTable(ctx, srcSize, tableType);
1374 if (ctx->currentOffset) {
1375 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
1376 notLimited, tableType, noDict,
1377 dictSmall, acceleration);
1378 }
1379 else {
1380 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
1381 notLimited, tableType, noDict,
1382 noDictIssue, acceleration);
1383 }
1384 }
1385 else {
1386 const tableType_t tableType =
1387 ((sizeof(void *) == 4) && ((uptrval)src > MAX_DISTANCE))
1388 ? byPtr
1389 : byU32;
1390 LZ4_prepareTable(ctx, srcSize, tableType);
1391 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
1392 notLimited, tableType, noDict,
1393 noDictIssue, acceleration);
1394 }
1395 }
1396 else {
1397 if (srcSize < LZ4_64Klimit) {
1398 const tableType_t tableType = byU16;
1399
1400 LZ4_prepareTable(ctx, srcSize, tableType);
1401 if (ctx->currentOffset) {
1402 return LZ4_compress_generic(
1403 ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput,
1404 tableType, noDict, dictSmall, acceleration);
1405 }
1406 else {
1407 return LZ4_compress_generic(
1408 ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput,
1409 tableType, noDict, noDictIssue, acceleration);
1410 }
1411 }
1412 else {
1413 const tableType_t tableType =
1414 ((sizeof(void *) == 4) && ((uptrval)src > MAX_DISTANCE))
1415 ? byPtr
1416 : byU32;
1417 LZ4_prepareTable(ctx, srcSize, tableType);
1418 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL,
1419 dstCapacity, limitedOutput, tableType,
1420 noDict, noDictIssue, acceleration);
1421 }
1422 }
1423}
1425int LZ4_compress_fast(const char *source, char *dest, int inputSize,
1426 int maxOutputSize, int acceleration)
1427{
1428 int result;
1429
1430#if (LZ4_HEAPMODE)
1431 LZ4_stream_t *ctxPtr =
1432 ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1433
1434 if (ctxPtr == NULL)
1435 return 0;
1436#else
1437 LZ4_stream_t ctx;
1438 LZ4_stream_t *const ctxPtr = &ctx;
1439#endif
1440 result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize,
1441 maxOutputSize, acceleration);
1442
1443#if (LZ4_HEAPMODE)
1444 FREEMEM(ctxPtr);
1445#endif
1446 return result;
1447}
1449int LZ4_compress_default(const char *source, char *dest, int inputSize,
1450 int maxOutputSize)
1451{
1452 return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
1453}
1454
1455/* hidden debug function */
1456/* strangely enough, gcc generates faster code when this function is
1457 * uncommented, even if unused */
1458int LZ4_compress_fast_force(const char *source, char *dest, int inputSize,
1459 int maxOutputSize, int acceleration)
1460{
1461 LZ4_stream_t ctx;
1462
1463 LZ4_resetStream(&ctx);
1464
1465 if (inputSize < LZ4_64Klimit)
1466 return LZ4_compress_generic(&ctx.internal_donotuse, source, dest,
1467 inputSize, NULL, maxOutputSize,
1469 acceleration);
1470 else
1471 return LZ4_compress_generic(
1472 &ctx.internal_donotuse, source, dest, inputSize, NULL,
1473 maxOutputSize, limitedOutput, sizeof(void *) == 8 ? byU32 : byPtr,
1474 noDict, noDictIssue, acceleration);
1475}
1476
1477/* Note!: This function leaves the stream in an unclean/broken state!
1478 * It is not safe to subsequently use the same state with a _fastReset() or
1479 * _continue() call without resetting it. */
1480static int LZ4_compress_destSize_extState(LZ4_stream_t *state, const char *src,
1481 char *dst, int *srcSizePtr,
1482 int targetDstSize)
1483{
1484 LZ4_resetStream(state);
1485
1486 if (targetDstSize >=
1488 *srcSizePtr)) { /* compression success is guaranteed */
1489 return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr,
1490 targetDstSize, 1);
1491 }
1492 else {
1493 if (*srcSizePtr < LZ4_64Klimit) {
1494 return LZ4_compress_generic(
1495 &state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr,
1496 targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
1497 }
1498 else {
1499 tableType_t const tableType =
1500 ((sizeof(void *) == 4) && ((uptrval)src > MAX_DISTANCE))
1501 ? byPtr
1502 : byU32;
1503 return LZ4_compress_generic(
1504 &state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr,
1505 targetDstSize, fillOutput, tableType, noDict, noDictIssue, 1);
1506 }
1507 }
1508}
1510int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr,
1511 int targetDstSize)
1512{
1513#if (LZ4_HEAPMODE)
1515 sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1516
1517 if (ctx == NULL)
1518 return 0;
1519#else
1520 LZ4_stream_t ctxBody;
1521 LZ4_stream_t *ctx = &ctxBody;
1522#endif
1523
1524 int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr,
1525 targetDstSize);
1526
1527#if (LZ4_HEAPMODE)
1528 FREEMEM(ctx);
1529#endif
1530 return result;
1531}
1532
1533/*-******************************
1534 * Streaming functions
1535 ********************************/
1538{
1539 LZ4_stream_t *lz4s = (LZ4_stream_t *)ALLOC(sizeof(LZ4_stream_t));
1540
1543 sizeof(LZ4_stream_t_internal)); /* A compilation error here means
1544 LZ4_STREAMSIZE is not large enough */
1545 DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1546 if (lz4s == NULL)
1547 return NULL;
1548 LZ4_resetStream(lz4s);
1549 return lz4s;
1550}
1552void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
1553{
1554 DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1555 MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
1556}
1561}
1563int LZ4_freeStream(LZ4_stream_t *LZ4_stream)
1564{
1565 if (!LZ4_stream)
1566 return 0; /* support free on NULL */
1567 DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
1568 FREEMEM(LZ4_stream);
1569 return (0);
1570}
1572#define HASH_UNIT sizeof(reg_t)
1573int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
1574{
1575 LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
1576 const tableType_t tableType = byU32;
1577 const BYTE *p = (const BYTE *)dictionary;
1578 const BYTE *const dictEnd = p + dictSize;
1579 const BYTE *base;
1580
1581 DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary,
1582 LZ4_dict);
1583
1584 /* It's necessary to reset the context,
1585 * and not just continue it with prepareTable()
1586 * to avoid any risk of generating overflowing matchIndex
1587 * when compressing using this dictionary */
1588 LZ4_resetStream(LZ4_dict);
1589
1590 /* We always increment the offset by 64 KB, since, if the dict is longer,
1591 * we truncate it to the last 64k, and if it's shorter, we still want to
1592 * advance by a whole window length so we can provide the guarantee that
1593 * there are only valid offsets in the window, which allows an optimization
1594 * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1595 * dictionary isn't a full 64k. */
1596
1597 if ((dictEnd - p) > 64 KB)
1598 p = dictEnd - 64 KB;
1599 base = dictEnd - 64 KB - dict->currentOffset;
1600 dict->dictionary = p;
1601 dict->dictSize = (U32)(dictEnd - p);
1602 dict->currentOffset += 64 KB;
1603 dict->tableType = tableType;
1604
1605 if (dictSize < (int)HASH_UNIT) {
1606 return 0;
1607 }
1608
1609 while (p <= dictEnd - HASH_UNIT) {
1610 LZ4_putPosition(p, dict->hashTable, tableType, base);
1611 p += 3;
1612 }
1613
1614 return dict->dictSize;
1615}
1617void LZ4_attach_dictionary(LZ4_stream_t *working_stream,
1618 const LZ4_stream_t *dictionary_stream)
1619{
1620 if (dictionary_stream != NULL) {
1621 /* If the current offset is zero, we will never look in the
1622 * external dictionary context, since there is no value a table
1623 * entry can take that indicate a miss. In that case, we need
1624 * to bump the offset to something non-zero.
1625 */
1626 if (working_stream->internal_donotuse.currentOffset == 0) {
1627 working_stream->internal_donotuse.currentOffset = 64 KB;
1628 }
1629 working_stream->internal_donotuse.dictCtx =
1630 &(dictionary_stream->internal_donotuse);
1631 }
1632 else {
1633 working_stream->internal_donotuse.dictCtx = NULL;
1634 }
1635}
1636
1637static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, int nextSize)
1638{
1639 if (LZ4_dict->currentOffset + nextSize >
1640 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
1641 /* rescale hash table */
1642 U32 const delta = LZ4_dict->currentOffset - 64 KB;
1643 const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1644 int i;
1645
1646 DEBUGLOG(4, "LZ4_renormDictT");
1647 for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
1648 if (LZ4_dict->hashTable[i] < delta)
1649 LZ4_dict->hashTable[i] = 0;
1650 else
1651 LZ4_dict->hashTable[i] -= delta;
1652 }
1653 LZ4_dict->currentOffset = 64 KB;
1654 if (LZ4_dict->dictSize > 64 KB)
1655 LZ4_dict->dictSize = 64 KB;
1656 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1657 }
1658}
1660int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
1661 char *dest, int inputSize, int maxOutputSize,
1662 int acceleration)
1663{
1664 const tableType_t tableType = byU32;
1665 LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
1666 const BYTE *dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1667
1668 DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
1669
1670 if (streamPtr->initCheck)
1671 return 0; /* Uninitialized structure detected */
1672 LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
1673 if (acceleration < 1)
1674 acceleration = ACCELERATION_DEFAULT;
1675
1676 /* invalidate tiny dictionaries */
1677 if ((streamPtr->dictSize - 1 < 4) /* intentional underflow */
1678 && (dictEnd != (const BYTE *)source)) {
1679 DEBUGLOG(
1680 5,
1681 "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small",
1682 streamPtr->dictSize, streamPtr->dictionary);
1683 streamPtr->dictSize = 0;
1684 streamPtr->dictionary = (const BYTE *)source;
1685 dictEnd = (const BYTE *)source;
1686 }
1687
1688 /* Check overlapping input/dictionary space */
1689 {
1690 const BYTE *sourceEnd = (const BYTE *)source + inputSize;
1691
1692 if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1693 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1694 if (streamPtr->dictSize > 64 KB)
1695 streamPtr->dictSize = 64 KB;
1696 if (streamPtr->dictSize < 4)
1697 streamPtr->dictSize = 0;
1698 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1699 }
1700 }
1701
1702 /* prefix mode : source data follows dictionary */
1703 if (dictEnd == (const BYTE *)source) {
1704 if ((streamPtr->dictSize < 64 KB) &&
1705 (streamPtr->dictSize < streamPtr->currentOffset))
1706 return LZ4_compress_generic(streamPtr, source, dest, inputSize,
1707 NULL, maxOutputSize, limitedOutput,
1708 tableType, withPrefix64k, dictSmall,
1709 acceleration);
1710 else
1711 return LZ4_compress_generic(streamPtr, source, dest, inputSize,
1712 NULL, maxOutputSize, limitedOutput,
1713 tableType, withPrefix64k, noDictIssue,
1714 acceleration);
1715 }
1716
1717 /* external dictionary mode */
1718 {
1719 int result;
1720
1721 if (streamPtr->dictCtx) {
1722 /* We depend here on the fact that dictCtx'es (produced by
1723 * LZ4_loadDict) guarantee that their tables contain no references
1724 * to offsets between dictCtx->currentOffset - 64 KB and
1725 * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1726 * to use noDictIssue even when the dict isn't a full 64 KB.
1727 */
1728 if (inputSize > 4 KB) {
1729 /* For compressing large blobs, it is faster to pay the setup
1730 * cost to copy the dictionary's tables into the active context,
1731 * so that the compression loop is only looking into one table.
1732 */
1733 memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
1734 result = LZ4_compress_generic(
1735 streamPtr, source, dest, inputSize, NULL, maxOutputSize,
1737 acceleration);
1738 }
1739 else {
1740 result = LZ4_compress_generic(
1741 streamPtr, source, dest, inputSize, NULL, maxOutputSize,
1743 acceleration);
1744 }
1745 }
1746 else {
1747 if ((streamPtr->dictSize < 64 KB) &&
1748 (streamPtr->dictSize < streamPtr->currentOffset)) {
1749 result = LZ4_compress_generic(
1750 streamPtr, source, dest, inputSize, NULL, maxOutputSize,
1752 acceleration);
1753 }
1754 else {
1755 result = LZ4_compress_generic(
1756 streamPtr, source, dest, inputSize, NULL, maxOutputSize,
1758 acceleration);
1759 }
1760 }
1761 streamPtr->dictionary = (const BYTE *)source;
1762 streamPtr->dictSize = (U32)inputSize;
1763 return result;
1764 }
1765}
1766
1767/* Hidden debug function, to force-test external dictionary mode */
1768int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source,
1769 char *dest, int srcSize)
1770{
1771 LZ4_stream_t_internal *streamPtr = &LZ4_dict->internal_donotuse;
1772 int result;
1773
1774 LZ4_renormDictT(streamPtr, srcSize);
1775
1776 if ((streamPtr->dictSize < 64 KB) &&
1777 (streamPtr->dictSize < streamPtr->currentOffset)) {
1778 result =
1779 LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0,
1781 }
1782 else {
1783 result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0,
1785 noDictIssue, 1);
1786 }
1787
1788 streamPtr->dictionary = (const BYTE *)source;
1789 streamPtr->dictSize = (U32)srcSize;
1790
1791 return result;
1792}
1793
1794/*! LZ4_saveDict() :
1795 * If previously compressed data block is not guaranteed to remain available at
1796 * its memory location, save it into a safer place (char* safeBuffer). Note :
1797 * you don't need to call LZ4_loadDict() afterwards, dictionary is immediately
1798 * usable, you can therefore call LZ4_compress_fast_continue(). Return : saved
1799 * dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1801int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
1802{
1803 LZ4_stream_t_internal *const dict = &LZ4_dict->internal_donotuse;
1804 const BYTE *const previousDictEnd = dict->dictionary + dict->dictSize;
1805
1806 if ((U32)dictSize > 64 KB)
1807 dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
1808 if ((U32)dictSize > dict->dictSize)
1809 dictSize = dict->dictSize;
1810
1811 memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1812
1813 dict->dictionary = (const BYTE *)safeBuffer;
1814 dict->dictSize = (U32)dictSize;
1815
1816 return dictSize;
1817}
1818
1819/*-*****************************
1820 * Decompression functions
1821 *******************************/
1822/*! LZ4_decompress_generic() :
1823 * This generic decompression function covers all use cases.
1824 * It shall be instantiated several times, using different sets of directives.
1825 * Note that it is important for performance that this function really get
1826 * inlined, in order to remove useless branches during compilation optimization.
1829 const char *const src, char *const dst, int srcSize,
1830 int outputSize, /* If endOnInput==endOnInputSize, this value is
1831 `dstCapacity` */
1832 int endOnInput, /* endOnOutputSize, endOnInputSize */
1833 int partialDecoding, /* full, partial */
1834 int targetOutputSize, /* only used if partialDecoding==partial */
1835 int dict, /* noDict, withPrefix64k, usingExtDict */
1836 const BYTE *const lowPrefix, /* always <= dst, == dst when no prefix */
1837 const BYTE *const dictStart, /* only if dict==usingExtDict */
1838 const size_t dictSize /* note : = 0 if noDict */
1839)
1840{
1841 const BYTE *ip = (const BYTE *)src;
1842 const BYTE *const iend = ip + srcSize;
1843
1844 BYTE *op = (BYTE *)dst;
1845 BYTE *const oend = op + outputSize;
1846 BYTE *cpy;
1847 BYTE *oexit = op + targetOutputSize;
1848
1849 const BYTE *const dictEnd = (const BYTE *)dictStart + dictSize;
1850 const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
1851 const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
1852
1853 const int safeDecode = (endOnInput == endOnInputSize);
1854 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1855
1856 /* Set up the "end" pointers for the shortcut. */
1857 const BYTE *const shortiend =
1858 iend - (endOnInput ? 14 : 8) /*maxLL */ - 2 /*offset */;
1859 const BYTE *const shortoend =
1860 oend - (endOnInput ? 14 : 8) /*maxLL */ - 18 /*maxML */;
1861
1862 DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i)", srcSize);
1863
1864 /* Special cases */
1865 if ((partialDecoding) && (oexit > oend - MFLIMIT))
1866 oexit =
1867 oend -
1868 MFLIMIT; /* targetOutputSize too high => just decode everything */
1869 if ((endOnInput) && (unlikely(outputSize == 0)))
1870 return ((srcSize == 1) && (*ip == 0)) ? 0
1871 : -1; /* Empty output buffer */
1872 if ((!endOnInput) && (unlikely(outputSize == 0)))
1873 return (*ip == 0 ? 1 : -1);
1874 if ((endOnInput) && unlikely(srcSize == 0))
1875 return -1;
1876
1877 /* Main Loop : decode sequences */
1878 while (1) {
1879 const BYTE *match;
1880 size_t offset;
1881
1882 unsigned const token = *ip++;
1883 size_t length = token >> ML_BITS; /* literal length */
1884
1885 assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1886
1887 /* A two-stage shortcut for the most common case:
1888 * 1) If the literal length is 0..14, and there is enough space,
1889 * enter the shortcut and copy 16 bytes on behalf of the literals
1890 * (in the fast mode, only 8 bytes can be safely copied this way).
1891 * 2) Further if the match length is 4..18, copy 18 bytes in a similar
1892 * manner; but we ensure that there's enough space in the output for
1893 * those 18 bytes earlier, upon entering the shortcut (in other words,
1894 * there is a combined check for both stages).
1895 */
1896 if ((endOnInput ? length != RUN_MASK : length <= 8)
1897 /* strictly "less than" on input, to re-enter the loop with at least
1898 one byte */
1899 && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend))) {
1900 /* Copy the literals */
1901 memcpy(op, ip, endOnInput ? 16 : 8);
1902 op += length;
1903 ip += length;
1904
1905 /* The second stage: prepare for match copying, decode full info.
1906 * If it doesn't work out, the info won't be wasted. */
1907 length = token & ML_MASK; /* match length */
1908 offset = LZ4_readLE16(ip);
1909 ip += 2;
1910 match = op - offset;
1911
1912 /* Do not deal with overlapping matches. */
1913 if ((length != ML_MASK) && (offset >= 8) &&
1914 (dict == withPrefix64k || match >= lowPrefix)) {
1915 /* Copy the match. */
1916 memcpy(op + 0, match + 0, 8);
1917 memcpy(op + 8, match + 8, 8);
1918 memcpy(op + 16, match + 16, 2);
1919 op += length + MINMATCH;
1920 /* Both stages worked, load the next token. */
1921 continue;
1922 }
1923
1924 /* The second stage didn't work out, but the info is ready.
1925 * Propel it right to the point of match copying. */
1926 goto _copy_match;
1927 }
1928
1929 /* decode literal length */
1930 if (length == RUN_MASK) {
1931 unsigned s;
1932
1933 if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0))
1934 goto _output_error; /* overflow detection */
1935 do {
1936 s = *ip++;
1937 length += s;
1938 } while (likely(endOnInput ? ip < iend - RUN_MASK : 1) &
1939 (s == 255));
1940 if ((safeDecode) &&
1941 unlikely((uptrval)(op) + length < (uptrval)(op)))
1942 goto _output_error; /* overflow detection */
1943 if ((safeDecode) &&
1944 unlikely((uptrval)(ip) + length < (uptrval)(ip)))
1945 goto _output_error; /* overflow detection */
1946 }
1947
1948 /* copy literals */
1949 cpy = op + length;
1950 if (((endOnInput) &&
1951 ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) ||
1952 (ip + length > iend - (2 + 1 + LASTLITERALS)))) ||
1953 ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
1954 if (partialDecoding) {
1955 if (cpy > oend)
1956 goto _output_error; /* Error : write attempt beyond end of
1957 output buffer */
1958 if ((endOnInput) && (ip + length > iend))
1959 goto _output_error; /* Error : read attempt beyond end of
1960 input buffer */
1961 }
1962 else {
1963 if ((!endOnInput) && (cpy != oend))
1964 goto _output_error; /* Error : block decoding must stop
1965 exactly there */
1966 if ((endOnInput) && ((ip + length != iend) || (cpy > oend)))
1967 goto _output_error; /* Error : input must be consumed */
1968 }
1969 memcpy(op, ip, length);
1970 ip += length;
1971 op += length;
1972 break; /* Necessarily EOF, due to parsing restrictions */
1973 }
1974 LZ4_wildCopy(op, ip, cpy);
1975 ip += length;
1976 op = cpy;
1977
1978 /* get offset */
1979 offset = LZ4_readLE16(ip);
1980 ip += 2;
1981 match = op - offset;
1982
1983 /* get matchlength */
1984 length = token & ML_MASK;
1985
1986 _copy_match:
1987 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix)))
1988 goto _output_error; /* Error : offset outside buffers */
1989 LZ4_write32(
1990 op,
1991 (U32)
1992 offset); /* costs ~1%; silence an msan warning when offset==0 */
1993
1994 if (length == ML_MASK) {
1995 unsigned s;
1996
1997 do {
1998 s = *ip++;
1999 if ((endOnInput) && (ip > iend - LASTLITERALS))
2000 goto _output_error;
2001 length += s;
2002 } while (s == 255);
2003 if ((safeDecode) && unlikely((uptrval)(op) + length < (uptrval)op))
2004 goto _output_error; /* overflow detection */
2005 }
2006 length += MINMATCH;
2007
2008 /* check external dictionary */
2009 if ((dict == usingExtDict) && (match < lowPrefix)) {
2010 if (unlikely(op + length > oend - LASTLITERALS))
2011 goto _output_error; /* doesn't respect parsing restriction */
2012
2013 if (length <= (size_t)(lowPrefix - match)) {
2014 /* match can be copied as a single segment from external
2015 * dictionary */
2016 memmove(op, dictEnd - (lowPrefix - match), length);
2017 op += length;
2018 }
2019 else {
2020 /* match encompass external dictionary and current block */
2021 size_t const copySize = (size_t)(lowPrefix - match);
2022 size_t const restSize = length - copySize;
2023
2024 memcpy(op, dictEnd - copySize, copySize);
2025 op += copySize;
2026 if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
2027 BYTE *const endOfMatch = op + restSize;
2028 const BYTE *copyFrom = lowPrefix;
2029
2030 while (op < endOfMatch)
2031 *op++ = *copyFrom++;
2032 }
2033 else {
2034 memcpy(op, lowPrefix, restSize);
2035 op += restSize;
2036 }
2037 }
2038 continue;
2039 }
2040
2041 /* copy match within block */
2042 cpy = op + length;
2043 if (unlikely(offset < 8)) {
2044 op[0] = match[0];
2045 op[1] = match[1];
2046 op[2] = match[2];
2047 op[3] = match[3];
2048 match += inc32table[offset];
2049 memcpy(op + 4, match, 4);
2050 match -= dec64table[offset];
2051 }
2052 else {
2053 memcpy(op, match, 8);
2054 match += 8;
2055 }
2056 op += 8;
2057
2058 if (unlikely(cpy > oend - 12)) {
2059 BYTE *const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
2060
2061 if (cpy > oend - LASTLITERALS)
2062 goto _output_error; /* Error : last LASTLITERALS bytes must be
2063 literals (uncompressed) */
2064 if (op < oCopyLimit) {
2065 LZ4_wildCopy(op, match, oCopyLimit);
2066 match += oCopyLimit - op;
2067 op = oCopyLimit;
2068 }
2069 while (op < cpy)
2070 *op++ = *match++;
2071 }
2072 else {
2073 memcpy(op, match, 8);
2074 if (length > 16)
2075 LZ4_wildCopy(op + 8, match + 8, cpy);
2076 }
2077 op = cpy; /* correction */
2078 }
2079
2080 /* end of decoding */
2081 if (endOnInput)
2082 return (int)(((char *)op) - dst); /* Nb of output bytes decoded */
2083 else
2084 return (int)(((const char *)ip) - src); /* Nb of input bytes read */
2085
2086 /* Overflow error detected */
2087_output_error:
2088 return (int)(-(((const char *)ip) - src)) - 1;
2089}
2090
2091/*===== Instantiate the API decoding functions. =====*/
2092
2094int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
2095 int maxDecompressedSize)
2096{
2097 return LZ4_decompress_generic(source, dest, compressedSize,
2098 maxDecompressedSize, endOnInputSize, full, 0,
2099 noDict, (BYTE *)dest, NULL, 0);
2100}
2101
2103int LZ4_decompress_safe_partial(const char *source, char *dest,
2104 int compressedSize, int targetOutputSize,
2105 int maxDecompressedSize)
2106{
2108 source, dest, compressedSize, maxDecompressedSize, endOnInputSize,
2109 partial, targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
2110}
2111
2113int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
2114{
2115 return LZ4_decompress_generic(source, dest, 0, originalSize,
2117 (BYTE *)dest - 64 KB, NULL, 0);
2118}
2119
2120/*===== Instantiate a few more decoding cases, used more than once. =====*/
2121
2122LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */
2123 int
2124 LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
2125 int compressedSize, int maxOutputSize)
2126{
2127 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2129 (BYTE *)dest - 64 KB, NULL, 0);
2130}
2131
2132/* Another obsolete API function, paired with the previous one. */
2133int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest,
2134 int originalSize)
2135{
2136 /* LZ4_decompress_fast doesn't validate match offsets,
2137 * and thus serves well with any prefixed dictionary. */
2138 return LZ4_decompress_fast(source, dest, originalSize);
2139}
2140
2142static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
2143 int compressedSize,
2144 int maxOutputSize,
2145 size_t prefixSize)
2146{
2147 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2149 (BYTE *)dest - prefixSize, NULL, 0);
2150}
2151
2152LZ4_FORCE_O2_GCC_PPC64LE /* Exported under another name, for tests/fullbench.c
2154#define LZ4_decompress_safe_extDict LZ4_decompress_safe_forceExtDict
2155 int
2156 LZ4_decompress_safe_extDict(const char *source, char *dest,
2157 int compressedSize, int maxOutputSize,
2158 const void *dictStart, size_t dictSize)
2159{
2161 source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0,
2162 usingExtDict, (BYTE *)dest, (const BYTE *)dictStart, dictSize);
2163}
2164
2166static int LZ4_decompress_fast_extDict(const char *source, char *dest,
2167 int originalSize, const void *dictStart,
2168 size_t dictSize)
2169{
2171 source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict,
2172 (BYTE *)dest, (const BYTE *)dictStart, dictSize);
2173}
2174
2175/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
2176 * of the dictionary is passed as prefix, and the second via dictStart +
2177 * dictSize. These routines are used only once, in LZ4_decompress_*_continue().
2178 */
2180int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
2181 int compressedSize, int maxOutputSize,
2182 size_t prefixSize, const void *dictStart,
2183 size_t dictSize)
2184{
2185 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2187 (BYTE *)dest - prefixSize,
2188 (const BYTE *)dictStart, dictSize);
2189}
2190
2192int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
2193 int originalSize, size_t prefixSize,
2194 const void *dictStart, size_t dictSize)
2195{
2197 source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict,
2198 (BYTE *)dest - prefixSize, (const BYTE *)dictStart, dictSize);
2199}
2200
2201/*===== streaming decompression functions =====*/
2208}
2211{
2212 if (!LZ4_stream)
2213 return 0; /* support free on NULL */
2214 FREEMEM(LZ4_stream);
2215 return 0;
2216}
2217
2218/*! LZ4_setStreamDecode() :
2219 * Use this function to instruct where to find the dictionary.
2220 * This function is not necessary if previous data is still available where it
2221 * was decoded. Loading a size of 0 is allowed (same effect as no dictionary).
2222 * @return : 1 if OK, 0 if error
2224int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
2225 const char *dictionary, int dictSize)
2226{
2227 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
2228
2229 lz4sd->prefixSize = (size_t)dictSize;
2230 lz4sd->prefixEnd = (const BYTE *)dictionary + dictSize;
2231 lz4sd->externalDict = NULL;
2232 lz4sd->extDictSize = 0;
2233 return 1;
2234}
2235
2236/*! LZ4_decoderRingBufferSize() :
2237 * when setting a ring buffer for streaming decompression (optional scenario),
2238 * provides the minimum size of this ring buffer
2239 * to be compatible with any source respecting maxBlockSize condition.
2240 * Note : in a ring buffer scenario,
2241 * blocks are presumed decompressed next to each other.
2242 * When not enough space remains for next block (remainingSize < maxBlockSize),
2243 * decoding resumes from beginning of ring buffer.
2244 * @return : minimum ring buffer size,
2245 * or 0 if there is an error (invalid maxBlockSize).
2247int LZ4_decoderRingBufferSize(int maxBlockSize)
2248{
2249 if (maxBlockSize < 0)
2250 return 0;
2251 if (maxBlockSize > LZ4_MAX_INPUT_SIZE)
2252 return 0;
2253 if (maxBlockSize < 16)
2254 maxBlockSize = 16;
2255 return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
2256}
2257
2258/*
2259 *_continue() :
2260 These decoding functions allow decompression of multiple blocks in "streaming"
2261 mode. Previously decoded blocks must still be available at the memory position
2262 where they were decoded. If it's not possible, save the relevant part of
2263 decoded data into a safe buffer, and indicate where it stands using
2264 LZ4_setStreamDecode()
2265 */
2268 const char *source, char *dest,
2269 int compressedSize, int maxOutputSize)
2270{
2271 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
2272 int result;
2273
2274 if (lz4sd->prefixSize == 0) {
2275 /* The first call, no dictionary yet. */
2276 assert(lz4sd->extDictSize == 0);
2277 result =
2278 LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2279 if (result <= 0)
2280 return result;
2281 lz4sd->prefixSize = result;
2282 lz4sd->prefixEnd = (BYTE *)dest + result;
2283 }
2284 else if (lz4sd->prefixEnd == (BYTE *)dest) {
2285 /* They're rolling the current segment. */
2286 if (lz4sd->prefixSize >= 64 KB - 1)
2288 source, dest, compressedSize, maxOutputSize);
2289 else if (lz4sd->extDictSize == 0)
2290 result = LZ4_decompress_safe_withSmallPrefix(
2291 source, dest, compressedSize, maxOutputSize, lz4sd->prefixSize);
2292 else
2294 source, dest, compressedSize, maxOutputSize, lz4sd->prefixSize,
2295 lz4sd->externalDict, lz4sd->extDictSize);
2296 if (result <= 0)
2297 return result;
2298 lz4sd->prefixSize += result;
2299 lz4sd->prefixEnd += result;
2300 }
2301 else {
2302 /* The buffer wraps around, or they're switching to another buffer. */
2303 lz4sd->extDictSize = lz4sd->prefixSize;
2304 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2305 result = LZ4_decompress_safe_extDict(source, dest, compressedSize,
2306 maxOutputSize, lz4sd->externalDict,
2307 lz4sd->extDictSize);
2308 if (result <= 0)
2309 return result;
2310 lz4sd->prefixSize = result;
2311 lz4sd->prefixEnd = (BYTE *)dest + result;
2312 }
2313
2314 return result;
2315}
2316
2319 const char *source, char *dest,
2320 int originalSize)
2321{
2322 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
2323 int result;
2324
2325 if (lz4sd->prefixSize == 0) {
2326 assert(lz4sd->extDictSize == 0);
2327 result = LZ4_decompress_fast(source, dest, originalSize);
2328 if (result <= 0)
2329 return result;
2330 lz4sd->prefixSize = originalSize;
2331 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
2332 }
2333 else if (lz4sd->prefixEnd == (BYTE *)dest) {
2334 if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
2335 result = LZ4_decompress_fast(source, dest, originalSize);
2336 else
2338 source, dest, originalSize, lz4sd->prefixSize,
2339 lz4sd->externalDict, lz4sd->extDictSize);
2340 if (result <= 0)
2341 return result;
2342 lz4sd->prefixSize += originalSize;
2343 lz4sd->prefixEnd += originalSize;
2344 }
2345 else {
2346 lz4sd->extDictSize = lz4sd->prefixSize;
2347 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2348 result = LZ4_decompress_fast_extDict(source, dest, originalSize,
2349 lz4sd->externalDict,
2350 lz4sd->extDictSize);
2351 if (result <= 0)
2352 return result;
2353 lz4sd->prefixSize = originalSize;
2354 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
2355 }
2356
2357 return result;
2358}
2359
2360/*
2361 Advanced decoding functions :
2362 *_usingDict() :
2363 These decoding functions work the same as "_continue" ones,
2364 the dictionary must be explicitly provided within parameters
2365 */
2367int LZ4_decompress_safe_usingDict(const char *source, char *dest,
2368 int compressedSize, int maxOutputSize,
2369 const char *dictStart, int dictSize)
2370{
2371 if (dictSize == 0)
2372 return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2373 if (dictStart + dictSize == dest) {
2374 if (dictSize >= 64 KB - 1)
2376 source, dest, compressedSize, maxOutputSize);
2377 return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize,
2378 maxOutputSize, dictSize);
2379 }
2380 return LZ4_decompress_safe_extDict(source, dest, compressedSize,
2381 maxOutputSize, dictStart, dictSize);
2382}
2384int LZ4_decompress_fast_usingDict(const char *source, char *dest,
2385 int originalSize, const char *dictStart,
2386 int dictSize)
2387{
2388 if (dictSize == 0 || dictStart + dictSize == dest)
2389 return LZ4_decompress_fast(source, dest, originalSize);
2390 return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart,
2391 dictSize);
2392}
2393
2394/*=*************************************************
2395 * Obsolete Functions
2396 ***************************************************/
2397/* obsolete compression functions */
2398int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize,
2399 int maxOutputSize)
2400{
2401 return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
2402}
2404int LZ4_compress(const char *source, char *dest, int inputSize)
2405{
2406 return LZ4_compress_default(source, dest, inputSize,
2407 LZ4_compressBound(inputSize));
2408}
2410int LZ4_compress_limitedOutput_withState(void *state, const char *src,
2411 char *dst, int srcSize, int dstSize)
2412{
2413 return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
2414}
2416int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize)
2417{
2418 return LZ4_compress_fast_extState(state, src, dst, srcSize,
2419 LZ4_compressBound(srcSize), 1);
2420}
2423 const char *src, char *dst, int srcSize,
2424 int dstCapacity)
2425{
2426 return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize,
2427 dstCapacity, 1);
2428}
2430int LZ4_compress_continue(LZ4_stream_t *LZ4_stream, const char *source,
2431 char *dest, int inputSize)
2432{
2433 return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize,
2434 LZ4_compressBound(inputSize), 1);
2435}
2436
2437/*
2438 These decompression functions are deprecated and should no longer be used.
2439 They are only provided here for compatibility with older user programs.
2440 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
2441 - LZ4_uncompress_unknownOutputSize is totally equivalent to
2442 LZ4_decompress_safe
2444int LZ4_uncompress(const char *source, char *dest, int outputSize)
2445{
2446 return LZ4_decompress_fast(source, dest, outputSize);
2447}
2449int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
2450 int maxOutputSize)
2451{
2452 return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
2453}
2454
2455/* Obsolete Streaming functions */
2457int LZ4_sizeofStreamState(void)
2458{
2459 return LZ4_STREAMSIZE;
2460}
2462int LZ4_resetStreamState(void *state, char *inputBuffer)
2463{
2464 (void)inputBuffer;
2465 LZ4_resetStream((LZ4_stream_t *)state);
2466 return 0;
2467}
2469void *LZ4_create(char *inputBuffer)
2470{
2471 (void)inputBuffer;
2472 return LZ4_createStream();
2473}
2475char *LZ4_slideInputBuffer(void *state)
2476{
2477 /* avoid const char * -> char * conversion warning */
2478 return (char *)(uptrval)((LZ4_stream_t *)state)
2479 ->internal_donotuse.dictionary;
2480}
2481
2482#endif /* LZ4_COMMONDEFS_ONLY */
#define NULL
Definition ccmath.h:32
double r
#define MAX_DISTANCE
Definition lz4.c:379
#define STEPSIZE
Definition lz4.c:528
#define KB
Definition lz4.c:374
int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c:1306
int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize)
Definition lz4.c:2383
#define LZ4_STATIC_ASSERT(c)
Definition lz4.c:397
endCondition_directive
Definition lz4.c:622
@ endOnInputSize
Definition lz4.c:622
@ endOnOutputSize
Definition lz4.c:622
unsigned long long U64
Definition lz4.c:199
earlyEnd_directive
Definition lz4.c:623
@ full
Definition lz4.c:623
@ partial
Definition lz4.c:623
size_t reg_t
Definition lz4.c:206
#define ACCELERATION_DEFAULT
Definition lz4.c:52
int LZ4_compressBound(int isize)
Definition lz4.c:638
int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
Definition lz4.c:1572
#define MINMATCH
Definition lz4.c:367
int LZ4_compress_default(const char *source, char *dest, int inputSize, int maxOutputSize)
Definition lz4.c:1448
LZ4_FORCE_INLINE int LZ4_decompress_fast_doubleDict(const char *source, char *dest, int originalSize, size_t prefixSize, const void *dictStart, size_t dictSize)
Definition lz4.c:2191
int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const char *dictStart, int dictSize)
Definition lz4.c:2366
LZ4_FORCE_INLINE int LZ4_compress_generic(LZ4_stream_t_internal *const cctx, const char *const source, char *const dest, const int inputSize, int *inputConsumed, const int maxOutputSize, const limitedOutput_directive outputLimited, const tableType_t tableType, const dict_directive dictDirective, const dictIssue_directive dictIssue, const U32 acceleration)
Definition lz4.c:843
#define GB
Definition lz4.c:376
int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize, int maxOutputSize)
Definition lz4.c:2397
int LZ4_decoderRingBufferSize(int maxBlockSize)
Definition lz4.c:2246
int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_stream, const char *src, char *dst, int srcSize, int dstCapacity)
Definition lz4.c:2421
unsigned char BYTE
Definition lz4.c:195
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition lz4.c:2123
int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr, int targetDstSize)
Definition lz4.c:1509
#define LZ4_FORCE_O2_GCC_PPC64LE
Definition lz4.c:153
int LZ4_sizeofState(void)
Definition lz4.c:643
signed int S32
Definition lz4.c:198
#define LZ4_FORCE_O2_INLINE_GCC_PPC64LE
Definition lz4.c:154
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize)
Definition lz4.c:2093
void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
Definition lz4.c:1551
void LZ4_resetStream_fast(LZ4_stream_t *ctx)
Definition lz4.c:1557
LZ4_FORCE_O2_INLINE_GCC_PPC64LE void LZ4_wildCopy(void *dstPtr, const void *srcPtr, void *dstEnd)
Definition lz4.c:351
int LZ4_freeStream(LZ4_stream_t *LZ4_stream)
Definition lz4.c:1562
#define MFLIMIT
Definition lz4.c:371
const char * LZ4_versionString(void)
Definition lz4.c:633
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition lz4.c:736
int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c:1424
int LZ4_uncompress(const char *source, char *dest, int outputSize)
Definition lz4.c:2443
char * LZ4_slideInputBuffer(void *state)
Definition lz4.c:2474
int LZ4_compress_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize)
Definition lz4.c:2429
#define LASTLITERALS
Definition lz4.c:370
int LZ4_compress(const char *source, char *dest, int inputSize)
Definition lz4.c:2403
int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize, int maxOutputSize)
Definition lz4.c:2448
#define DEBUGLOG(l,...)
Definition lz4.c:415
LZ4_FORCE_INLINE void LZ4_prepareTable(LZ4_stream_t_internal *const cctx, const int inputSize, const tableType_t tableType)
Definition lz4.c:802
LZ4_FORCE_INLINE int LZ4_decompress_safe_doubleDict(const char *source, char *dest, int compressedSize, int maxOutputSize, size_t prefixSize, const void *dictStart, size_t dictSize)
Definition lz4.c:2179
int LZ4_sizeofStreamState(void)
Definition lz4.c:2456
LZ4_FORCE_INLINE unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
Definition lz4.c:530
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize)
Definition lz4.c:2223
#define ALLOC(s)
Definition lz4.c:176
#define FREEMEM(p)
Definition lz4.c:178
int LZ4_versionNumber(void)
Definition lz4.c:628
dictIssue_directive
Definition lz4.c:620
@ noDictIssue
Definition lz4.c:620
@ dictSmall
Definition lz4.c:620
LZ4_stream_t * LZ4_createStream(void)
Definition lz4.c:1536
int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize)
Definition lz4.c:2415
#define likely(expr)
Definition lz4.c:166
int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source, char *dest, int srcSize)
Definition lz4.c:1767
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition lz4.c:2266
#define ML_BITS
Definition lz4.c:381
#define ML_MASK
Definition lz4.c:382
#define ALLOC_AND_ZERO(s)
Definition lz4.c:177
#define MEM_INIT(p, v, s)
Definition lz4.c:180
LZ4_FORCE_INLINE const BYTE * LZ4_getPosition(const BYTE *p, const void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition lz4.c:792
#define HASH_UNIT
Definition lz4.c:1571
dict_directive
Definition lz4.c:614
@ noDict
Definition lz4.c:615
@ withPrefix64k
Definition lz4.c:616
@ usingExtDict
Definition lz4.c:617
@ usingDictCtx
Definition lz4.c:618
size_t uptrval
Definition lz4.c:200
limitedOutput_directive
Definition lz4.c:584
@ limitedOutput
Definition lz4.c:586
@ fillOutput
Definition lz4.c:587
@ notLimited
Definition lz4.c:585
int LZ4_compress_limitedOutput_withState(void *state, const char *src, char *dst, int srcSize, int dstSize)
Definition lz4.c:2409
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
Definition lz4.c:1800
#define LZ4_FORCE_INLINE
Definition lz4.c:129
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c:1659
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void *const p, tableType_t const tableType)
Definition lz4.c:672
unsigned int U32
Definition lz4.c:197
int LZ4_resetStreamState(void *state, char *inputBuffer)
Definition lz4.c:2461
#define assert(condition)
Definition lz4.c:393
void LZ4_attach_dictionary(LZ4_stream_t *working_stream, const LZ4_stream_t *dictionary_stream)
Definition lz4.c:1616
int LZ4_compress_fast_force(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c:1457
#define unlikely(expr)
Definition lz4.c:169
int LZ4_compress_fast_extState_fastReset(void *state, const char *src, char *dst, int srcSize, int dstCapacity, int acceleration)
Definition lz4.c:1359
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_partial(const char *source, char *dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
Definition lz4.c:2102
LZ4_FORCE_O2_GCC_PPC64LE LZ4_FORCE_INLINE int LZ4_decompress_generic(const char *const src, char *const dst, int srcSize, int outputSize, int endOnInput, int partialDecoding, int targetOutputSize, int dict, const BYTE *const lowPrefix, const BYTE *const dictStart, const size_t dictSize)
Definition lz4.c:1827
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int originalSize)
Definition lz4.c:2317
#define WILDCOPYLENGTH
Definition lz4.c:369
unsigned short U16
Definition lz4.c:196
#define LZ4_decompress_safe_extDict
Definition lz4.c:2153
tableType_t
Definition lz4.c:589
@ clearedTable
Definition lz4.c:589
@ byU16
Definition lz4.c:589
@ byPtr
Definition lz4.c:589
@ byU32
Definition lz4.c:589
#define RUN_MASK
Definition lz4.c:384
int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest, int originalSize)
Definition lz4.c:2132
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
Definition lz4.c:2112
int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream)
Definition lz4.c:2209
LZ4_streamDecode_t * LZ4_createStreamDecode(void)
Definition lz4.c:2202
void * LZ4_create(char *inputBuffer)
Definition lz4.c:2468
#define LZ4_STREAMSIZE
Definition lz4.h:581
#define LZ4_HASHTABLESIZE
Definition lz4.h:524
#define LZ4_COMPRESSBOUND(isize)
Definition lz4.h:169
#define LZ4_MEMORY_USAGE
Definition lz4.h:131
#define LZ4_VERSION_STRING
Definition lz4.h:112
#define LZ4_HASH_SIZE_U32
Definition lz4.h:525
#define LZ4_DECODER_RING_BUFFER_SIZE(mbs)
Definition lz4.h:360
#define LZ4_MAX_INPUT_SIZE
Definition lz4.h:168
#define LZ4_VERSION_NUMBER
Definition lz4.h:105
#define LZ4_HASHLOG
Definition lz4.h:523
const unsigned char * externalDict
Definition lz4.h:564
const unsigned char * prefixEnd
Definition lz4.h:566
unsigned short initCheck
Definition lz4.h:556
const LZ4_stream_t_internal * dictCtx
Definition lz4.h:559
unsigned short tableType
Definition lz4.h:557
unsigned int currentOffset
Definition lz4.h:555
unsigned int dictSize
Definition lz4.h:560
unsigned int hashTable[LZ4_HASH_SIZE_U32]
Definition lz4.h:554
const unsigned char * dictionary
Definition lz4.h:558
LZ4_streamDecode_t_internal internal_donotuse
Definition lz4.h:600
LZ4_stream_t_internal internal_donotuse
Definition lz4.h:584