• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    LZ4 - Fast LZ compression algorithm
3    Copyright (C) 2011-2016, Yann Collet.
4 
5    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 
7    Redistribution and use in source and binary forms, with or without
8    modification, are permitted provided that the following conditions are
9    met:
10 
11        * Redistributions of source code must retain the above copyright
12    notice, this list of conditions and the following disclaimer.
13        * Redistributions in binary form must reproduce the above
14    copyright notice, this list of conditions and the following disclaimer
15    in the documentation and/or other materials provided with the
16    distribution.
17 
18    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 
30    You can contact the author at :
31     - LZ4 homepage : http://www.lz4.org
32     - LZ4 source repository : https://github.com/lz4/lz4
33 */
34 
35 
36 /*-************************************
37 *  Tuning parameters
38 **************************************/
39 /*
40  * HEAPMODE :
41  * Select how default compression functions will allocate memory for their hash table,
42  * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
43  */
44 #ifndef HEAPMODE
45 #  define HEAPMODE 0
46 #endif
47 
48 /*
49  * ACCELERATION_DEFAULT :
50  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
51  */
52 #define ACCELERATION_DEFAULT 1
53 
54 
55 /*-************************************
56 *  CPU Feature Detection
57 **************************************/
58 /* LZ4_FORCE_MEMORY_ACCESS
59  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
60  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
61  * The below switch allow to select different access method for improved performance.
62  * Method 0 (default) : use `memcpy()`. Safe and portable.
63  * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
64  *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
65  * Method 2 : direct access. This method is portable but violate C standard.
66  *            It can generate buggy code on targets which generate assembly depending on alignment.
67  *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
68  * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
69  * Prefer these methods in priority order (0 > 1 > 2)
70  */
71 #ifndef LZ4_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
72 #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
73 #    define LZ4_FORCE_MEMORY_ACCESS 2
74 #  elif defined(__INTEL_COMPILER) || \
75   (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
76 #    define LZ4_FORCE_MEMORY_ACCESS 1
77 #  endif
78 #endif
79 
80 /*
81  * LZ4_FORCE_SW_BITCOUNT
82  * Define this parameter if your target system or compiler does not support hardware bit count
83  */
84 #if defined(_MSC_VER) && defined(_WIN32_WCE)   /* Visual Studio for Windows CE does not support Hardware bit count */
85 #  define LZ4_FORCE_SW_BITCOUNT
86 #endif
87 
88 
89 /*-************************************
90 *  Dependency
91 **************************************/
92 #include "lz4.h"
93 /* see also "memory routines" below */
94 
95 
96 /*-************************************
97 *  Compiler Options
98 **************************************/
99 #ifdef _MSC_VER    /* Visual Studio */
100 #  define FORCE_INLINE static __forceinline
101 #  include <intrin.h>
102 #  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
103 #  pragma warning(disable : 4293)        /* disable: C4293: too large shift (32-bits) */
104 #else
105 #  if defined(__GNUC__) || defined(__clang__)
106 #    define FORCE_INLINE static inline __attribute__((always_inline))
107 #  elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
108 #    define FORCE_INLINE static inline
109 #  else
110 #    define FORCE_INLINE static
111 #  endif
112 #endif  /* _MSC_VER */
113 
114 #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
115 #  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
116 #else
117 #  define expect(expr,value)    (expr)
118 #endif
119 
120 #define likely(expr)     expect((expr) != 0, 1)
121 #define unlikely(expr)   expect((expr) != 0, 0)
122 
123 
124 /*-************************************
125 *  Memory routines
126 **************************************/
127 #include <stdlib.h>   /* malloc, calloc, free */
128 #define ALLOCATOR(n,s) calloc(n,s)
129 #define FREEMEM        free
130 #include <string.h>   /* memset, memcpy */
131 #define MEM_INIT       memset
132 
133 
134 /*-************************************
135 *  Basic Types
136 **************************************/
137 #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
138 # include <stdint.h>
139   typedef  uint8_t BYTE;
140   typedef uint16_t U16;
141   typedef uint32_t U32;
142   typedef  int32_t S32;
143   typedef uint64_t U64;
144   typedef uintptr_t uptrval;
145 #else
146   typedef unsigned char       BYTE;
147   typedef unsigned short      U16;
148   typedef unsigned int        U32;
149   typedef   signed int        S32;
150   typedef unsigned long long  U64;
151   typedef size_t              uptrval;   /* generally true, except OpenVMS-64 */
152 #endif
153 
154 #if defined(__x86_64__)
155   typedef U64    reg_t;   /* 64-bits in x32 mode */
156 #else
157   typedef size_t reg_t;   /* 32-bits in x32 mode */
158 #endif
159 
160 /*-************************************
161 *  Reading and writing into memory
162 **************************************/
LZ4_isLittleEndian(void)163 static unsigned LZ4_isLittleEndian(void)
164 {
165     const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
166     return one.c[0];
167 }
168 
169 
170 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
171 /* lie to the compiler about data alignment; use with caution */
172 
LZ4_read16(const void * memPtr)173 static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
LZ4_read32(const void * memPtr)174 static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
LZ4_read_ARCH(const void * memPtr)175 static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
176 
LZ4_write16(void * memPtr,U16 value)177 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
LZ4_write32(void * memPtr,U32 value)178 static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
179 
180 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
181 
182 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
183 /* currently only defined for gcc and icc */
184 typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
185 
LZ4_read16(const void * ptr)186 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
LZ4_read32(const void * ptr)187 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
LZ4_read_ARCH(const void * ptr)188 static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
189 
LZ4_write16(void * memPtr,U16 value)190 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
LZ4_write32(void * memPtr,U32 value)191 static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
192 
193 #else  /* safe and portable access through memcpy() */
194 
LZ4_read16(const void * memPtr)195 static U16 LZ4_read16(const void* memPtr)
196 {
197     U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
198 }
199 
LZ4_read32(const void * memPtr)200 static U32 LZ4_read32(const void* memPtr)
201 {
202     U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
203 }
204 
LZ4_read_ARCH(const void * memPtr)205 static reg_t LZ4_read_ARCH(const void* memPtr)
206 {
207     reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
208 }
209 
LZ4_write16(void * memPtr,U16 value)210 static void LZ4_write16(void* memPtr, U16 value)
211 {
212     memcpy(memPtr, &value, sizeof(value));
213 }
214 
LZ4_write32(void * memPtr,U32 value)215 static void LZ4_write32(void* memPtr, U32 value)
216 {
217     memcpy(memPtr, &value, sizeof(value));
218 }
219 
220 #endif /* LZ4_FORCE_MEMORY_ACCESS */
221 
222 
LZ4_readLE16(const void * memPtr)223 static U16 LZ4_readLE16(const void* memPtr)
224 {
225     if (LZ4_isLittleEndian()) {
226         return LZ4_read16(memPtr);
227     } else {
228         const BYTE* p = (const BYTE*)memPtr;
229         return (U16)((U16)p[0] + (p[1]<<8));
230     }
231 }
232 
LZ4_writeLE16(void * memPtr,U16 value)233 static void LZ4_writeLE16(void* memPtr, U16 value)
234 {
235     if (LZ4_isLittleEndian()) {
236         LZ4_write16(memPtr, value);
237     } else {
238         BYTE* p = (BYTE*)memPtr;
239         p[0] = (BYTE) value;
240         p[1] = (BYTE)(value>>8);
241     }
242 }
243 
LZ4_copy8(void * dst,const void * src)244 static void LZ4_copy8(void* dst, const void* src)
245 {
246     memcpy(dst,src,8);
247 }
248 
249 /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
LZ4_wildCopy(void * dstPtr,const void * srcPtr,void * dstEnd)250 static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
251 {
252     BYTE* d = (BYTE*)dstPtr;
253     const BYTE* s = (const BYTE*)srcPtr;
254     BYTE* const e = (BYTE*)dstEnd;
255 
256     do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
257 }
258 
259 
260 /*-************************************
261 *  Common Constants
262 **************************************/
263 #define MINMATCH 4
264 
265 #define WILDCOPYLENGTH 8
266 #define LASTLITERALS 5
267 #define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
268 static const int LZ4_minLength = (MFLIMIT+1);
269 
270 #define KB *(1 <<10)
271 #define MB *(1 <<20)
272 #define GB *(1U<<30)
273 
274 #define MAXD_LOG 16
275 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
276 
277 #define ML_BITS  4
278 #define ML_MASK  ((1U<<ML_BITS)-1)
279 #define RUN_BITS (8-ML_BITS)
280 #define RUN_MASK ((1U<<RUN_BITS)-1)
281 
282 
283 /*-************************************
284 *  Common Utils
285 **************************************/
286 #define LZ4_STATIC_ASSERT(c)    { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
287 
288 
289 /*-************************************
290 *  Common functions
291 **************************************/
LZ4_NbCommonBytes(register reg_t val)292 static unsigned LZ4_NbCommonBytes (register reg_t val)
293 {
294     if (LZ4_isLittleEndian()) {
295         if (sizeof(val)==8) {
296 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
297             unsigned long r = 0;
298             _BitScanForward64( &r, (U64)val );
299             return (int)(r>>3);
300 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
301             return (__builtin_ctzll((U64)val) >> 3);
302 #       else
303             static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
304             return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
305 #       endif
306         } else /* 32 bits */ {
307 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
308             unsigned long r;
309             _BitScanForward( &r, (U32)val );
310             return (int)(r>>3);
311 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
312             return (__builtin_ctz((U32)val) >> 3);
313 #       else
314             static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
315             return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
316 #       endif
317         }
318     } else   /* Big Endian CPU */ {
319         if (sizeof(val)==8) {
320 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
321             unsigned long r = 0;
322             _BitScanReverse64( &r, val );
323             return (unsigned)(r>>3);
324 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
325             return (__builtin_clzll((U64)val) >> 3);
326 #       else
327             unsigned r;
328             if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
329             if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
330             r += (!val);
331             return r;
332 #       endif
333         } else /* 32 bits */ {
334 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
335             unsigned long r = 0;
336             _BitScanReverse( &r, (unsigned long)val );
337             return (unsigned)(r>>3);
338 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
339             return (__builtin_clz((U32)val) >> 3);
340 #       else
341             unsigned r;
342             if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
343             r += (!val);
344             return r;
345 #       endif
346         }
347     }
348 }
349 
350 #define STEPSIZE sizeof(reg_t)
LZ4_count(const BYTE * pIn,const BYTE * pMatch,const BYTE * pInLimit)351 static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
352 {
353     const BYTE* const pStart = pIn;
354 
355     while (likely(pIn<pInLimit-(STEPSIZE-1))) {
356         reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
357         if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
358         pIn += LZ4_NbCommonBytes(diff);
359         return (unsigned)(pIn - pStart);
360     }
361 
362     if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
363     if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
364     if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
365     return (unsigned)(pIn - pStart);
366 }
367 
368 
369 #ifndef LZ4_COMMONDEFS_ONLY
370 /*-************************************
371 *  Local Constants
372 **************************************/
373 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
374 static const U32 LZ4_skipTrigger = 6;  /* Increase this value ==> compression run slower on incompressible data */
375 
376 
377 /*-************************************
378 *  Local Structures and types
379 **************************************/
380 typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
381 typedef enum { byPtr, byU32, byU16 } tableType_t;
382 
383 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
384 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
385 
386 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
387 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
388 
389 
390 /*-************************************
391 *  Local Utils
392 **************************************/
LZ4_versionNumber(void)393 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
LZ4_versionString(void)394 const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
LZ4_compressBound(int isize)395 int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
LZ4_sizeofState()396 int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
397 
398 
399 /*-******************************
400 *  Compression functions
401 ********************************/
LZ4_hash4(U32 sequence,tableType_t const tableType)402 static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
403 {
404     if (tableType == byU16)
405         return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
406     else
407         return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
408 }
409 
LZ4_hash5(U64 sequence,tableType_t const tableType)410 static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
411 {
412     static const U64 prime5bytes = 889523592379ULL;
413     static const U64 prime8bytes = 11400714785074694791ULL;
414     const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
415     if (LZ4_isLittleEndian())
416         return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
417     else
418         return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
419 }
420 
LZ4_hashPosition(const void * const p,tableType_t const tableType)421 FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
422 {
423     if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
424     return LZ4_hash4(LZ4_read32(p), tableType);
425 }
426 
LZ4_putPositionOnHash(const BYTE * p,U32 h,void * tableBase,tableType_t const tableType,const BYTE * srcBase)427 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
428 {
429     switch (tableType)
430     {
431     case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
432     case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
433     case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
434     }
435 }
436 
LZ4_putPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)437 FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
438 {
439     U32 const h = LZ4_hashPosition(p, tableType);
440     LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
441 }
442 
LZ4_getPositionOnHash(U32 h,void * tableBase,tableType_t tableType,const BYTE * srcBase)443 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
444 {
445     if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
446     if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
447     { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
448 }
449 
LZ4_getPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)450 FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
451 {
452     U32 const h = LZ4_hashPosition(p, tableType);
453     return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
454 }
455 
456 
457 /** LZ4_compress_generic() :
458     inlined, to ensure branches are decided at compilation time */
LZ4_compress_generic(LZ4_stream_t_internal * const cctx,const char * const source,char * const dest,const int inputSize,const int maxOutputSize,const limitedOutput_directive outputLimited,const tableType_t tableType,const dict_directive dict,const dictIssue_directive dictIssue,const U32 acceleration)459 FORCE_INLINE int LZ4_compress_generic(
460                  LZ4_stream_t_internal* const cctx,
461                  const char* const source,
462                  char* const dest,
463                  const int inputSize,
464                  const int maxOutputSize,
465                  const limitedOutput_directive outputLimited,
466                  const tableType_t tableType,
467                  const dict_directive dict,
468                  const dictIssue_directive dictIssue,
469                  const U32 acceleration)
470 {
471     const BYTE* ip = (const BYTE*) source;
472     const BYTE* base;
473     const BYTE* lowLimit;
474     const BYTE* const lowRefLimit = ip - cctx->dictSize;
475     const BYTE* const dictionary = cctx->dictionary;
476     const BYTE* const dictEnd = dictionary + cctx->dictSize;
477     const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
478     const BYTE* anchor = (const BYTE*) source;
479     const BYTE* const iend = ip + inputSize;
480     const BYTE* const mflimit = iend - MFLIMIT;
481     const BYTE* const matchlimit = iend - LASTLITERALS;
482 
483     BYTE* op = (BYTE*) dest;
484     BYTE* const olimit = op + maxOutputSize;
485 
486     U32 forwardH;
487 
488     /* Init conditions */
489     if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0;   /* Unsupported inputSize, too large (or negative) */
490     switch(dict)
491     {
492     case noDict:
493     default:
494         base = (const BYTE*)source;
495         lowLimit = (const BYTE*)source;
496         break;
497     case withPrefix64k:
498         base = (const BYTE*)source - cctx->currentOffset;
499         lowLimit = (const BYTE*)source - cctx->dictSize;
500         break;
501     case usingExtDict:
502         base = (const BYTE*)source - cctx->currentOffset;
503         lowLimit = (const BYTE*)source;
504         break;
505     }
506     if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
507     if (inputSize<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
508 
509     /* First Byte */
510     LZ4_putPosition(ip, cctx->hashTable, tableType, base);
511     ip++; forwardH = LZ4_hashPosition(ip, tableType);
512 
513     /* Main Loop */
514     for ( ; ; ) {
515         ptrdiff_t refDelta = 0;
516         const BYTE* match;
517         BYTE* token;
518 
519         /* Find a match */
520         {   const BYTE* forwardIp = ip;
521             unsigned step = 1;
522             unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
523             do {
524                 U32 const h = forwardH;
525                 ip = forwardIp;
526                 forwardIp += step;
527                 step = (searchMatchNb++ >> LZ4_skipTrigger);
528 
529                 if (unlikely(forwardIp > mflimit)) goto _last_literals;
530 
531                 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
532                 if (dict==usingExtDict) {
533                     if (match < (const BYTE*)source) {
534                         refDelta = dictDelta;
535                         lowLimit = dictionary;
536                     } else {
537                         refDelta = 0;
538                         lowLimit = (const BYTE*)source;
539                 }   }
540                 forwardH = LZ4_hashPosition(forwardIp, tableType);
541                 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
542 
543             } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
544                 || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
545                 || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
546         }
547 
548         /* Catch up */
549         while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
550 
551         /* Encode Literals */
552         {   unsigned const litLength = (unsigned)(ip - anchor);
553             token = op++;
554             if ((outputLimited) &&  /* Check output buffer overflow */
555                 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
556                 return 0;
557             if (litLength >= RUN_MASK) {
558                 int len = (int)litLength-RUN_MASK;
559                 *token = (RUN_MASK<<ML_BITS);
560                 for(; len >= 255 ; len-=255) *op++ = 255;
561                 *op++ = (BYTE)len;
562             }
563             else *token = (BYTE)(litLength<<ML_BITS);
564 
565             /* Copy Literals */
566             LZ4_wildCopy(op, anchor, op+litLength);
567             op+=litLength;
568         }
569 
570 _next_match:
571         /* Encode Offset */
572         LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
573 
574         /* Encode MatchLength */
575         {   unsigned matchCode;
576 
577             if ((dict==usingExtDict) && (lowLimit==dictionary)) {
578                 const BYTE* limit;
579                 match += refDelta;
580                 limit = ip + (dictEnd-match);
581                 if (limit > matchlimit) limit = matchlimit;
582                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
583                 ip += MINMATCH + matchCode;
584                 if (ip==limit) {
585                     unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
586                     matchCode += more;
587                     ip += more;
588                 }
589             } else {
590                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
591                 ip += MINMATCH + matchCode;
592             }
593 
594             if ( outputLimited &&    /* Check output buffer overflow */
595                 (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
596                 return 0;
597             if (matchCode >= ML_MASK) {
598                 *token += ML_MASK;
599                 matchCode -= ML_MASK;
600                 LZ4_write32(op, 0xFFFFFFFF);
601                 while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255;
602                 op += matchCode / 255;
603                 *op++ = (BYTE)(matchCode % 255);
604             } else
605                 *token += (BYTE)(matchCode);
606         }
607 
608         anchor = ip;
609 
610         /* Test end of chunk */
611         if (ip > mflimit) break;
612 
613         /* Fill table */
614         LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
615 
616         /* Test next position */
617         match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
618         if (dict==usingExtDict) {
619             if (match < (const BYTE*)source) {
620                 refDelta = dictDelta;
621                 lowLimit = dictionary;
622             } else {
623                 refDelta = 0;
624                 lowLimit = (const BYTE*)source;
625         }   }
626         LZ4_putPosition(ip, cctx->hashTable, tableType, base);
627         if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
628             && (match+MAX_DISTANCE>=ip)
629             && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
630         { token=op++; *token=0; goto _next_match; }
631 
632         /* Prepare next loop */
633         forwardH = LZ4_hashPosition(++ip, tableType);
634     }
635 
636 _last_literals:
637     /* Encode Last Literals */
638     {   size_t const lastRun = (size_t)(iend - anchor);
639         if ( (outputLimited) &&  /* Check output buffer overflow */
640             ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
641             return 0;
642         if (lastRun >= RUN_MASK) {
643             size_t accumulator = lastRun - RUN_MASK;
644             *op++ = RUN_MASK << ML_BITS;
645             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
646             *op++ = (BYTE) accumulator;
647         } else {
648             *op++ = (BYTE)(lastRun<<ML_BITS);
649         }
650         memcpy(op, anchor, lastRun);
651         op += lastRun;
652     }
653 
654     /* End */
655     return (int) (((char*)op)-dest);
656 }
657 
658 
LZ4_compress_fast_extState(void * state,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)659 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
660 {
661     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
662     LZ4_resetStream((LZ4_stream_t*)state);
663     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
664 
665     if (maxOutputSize >= LZ4_compressBound(inputSize)) {
666         if (inputSize < LZ4_64Klimit)
667             return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited,                        byU16, noDict, noDictIssue, acceleration);
668         else
669             return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
670     } else {
671         if (inputSize < LZ4_64Klimit)
672             return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput,                        byU16, noDict, noDictIssue, acceleration);
673         else
674             return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
675     }
676 }
677 
678 
LZ4_compress_fast(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)679 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
680 {
681 #if (HEAPMODE)
682     void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
683 #else
684     LZ4_stream_t ctx;
685     void* const ctxPtr = &ctx;
686 #endif
687 
688     int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
689 
690 #if (HEAPMODE)
691     FREEMEM(ctxPtr);
692 #endif
693     return result;
694 }
695 
696 
LZ4_compress_default(const char * source,char * dest,int inputSize,int maxOutputSize)697 int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
698 {
699     return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
700 }
701 
702 
703 /* hidden debug function */
704 /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
LZ4_compress_fast_force(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)705 int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
706 {
707     LZ4_stream_t ctx;
708     LZ4_resetStream(&ctx);
709 
710     if (inputSize < LZ4_64Klimit)
711         return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16,                        noDict, noDictIssue, acceleration);
712     else
713         return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
714 }
715 
716 
717 /*-******************************
718 *  *_destSize() variant
719 ********************************/
720 
LZ4_compress_destSize_generic(LZ4_stream_t_internal * const ctx,const char * const src,char * const dst,int * const srcSizePtr,const int targetDstSize,const tableType_t tableType)721 static int LZ4_compress_destSize_generic(
722                        LZ4_stream_t_internal* const ctx,
723                  const char* const src,
724                        char* const dst,
725                        int*  const srcSizePtr,
726                  const int targetDstSize,
727                  const tableType_t tableType)
728 {
729     const BYTE* ip = (const BYTE*) src;
730     const BYTE* base = (const BYTE*) src;
731     const BYTE* lowLimit = (const BYTE*) src;
732     const BYTE* anchor = ip;
733     const BYTE* const iend = ip + *srcSizePtr;
734     const BYTE* const mflimit = iend - MFLIMIT;
735     const BYTE* const matchlimit = iend - LASTLITERALS;
736 
737     BYTE* op = (BYTE*) dst;
738     BYTE* const oend = op + targetDstSize;
739     BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
740     BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
741     BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
742 
743     U32 forwardH;
744 
745 
746     /* Init conditions */
747     if (targetDstSize < 1) return 0;                                     /* Impossible to store anything */
748     if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0;            /* Unsupported input size, too large (or negative) */
749     if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
750     if (*srcSizePtr<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
751 
752     /* First Byte */
753     *srcSizePtr = 0;
754     LZ4_putPosition(ip, ctx->hashTable, tableType, base);
755     ip++; forwardH = LZ4_hashPosition(ip, tableType);
756 
757     /* Main Loop */
758     for ( ; ; ) {
759         const BYTE* match;
760         BYTE* token;
761 
762         /* Find a match */
763         {   const BYTE* forwardIp = ip;
764             unsigned step = 1;
765             unsigned searchMatchNb = 1 << LZ4_skipTrigger;
766 
767             do {
768                 U32 h = forwardH;
769                 ip = forwardIp;
770                 forwardIp += step;
771                 step = (searchMatchNb++ >> LZ4_skipTrigger);
772 
773                 if (unlikely(forwardIp > mflimit)) goto _last_literals;
774 
775                 match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
776                 forwardH = LZ4_hashPosition(forwardIp, tableType);
777                 LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
778 
779             } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
780                 || (LZ4_read32(match) != LZ4_read32(ip)) );
781         }
782 
783         /* Catch up */
784         while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
785 
786         /* Encode Literal length */
787         {   unsigned litLength = (unsigned)(ip - anchor);
788             token = op++;
789             if (op + ((litLength+240)/255) + litLength > oMaxLit) {
790                 /* Not enough space for a last match */
791                 op--;
792                 goto _last_literals;
793             }
794             if (litLength>=RUN_MASK) {
795                 unsigned len = litLength - RUN_MASK;
796                 *token=(RUN_MASK<<ML_BITS);
797                 for(; len >= 255 ; len-=255) *op++ = 255;
798                 *op++ = (BYTE)len;
799             }
800             else *token = (BYTE)(litLength<<ML_BITS);
801 
802             /* Copy Literals */
803             LZ4_wildCopy(op, anchor, op+litLength);
804             op += litLength;
805         }
806 
807 _next_match:
808         /* Encode Offset */
809         LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
810 
811         /* Encode MatchLength */
812         {   size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
813 
814             if (op + ((matchLength+240)/255) > oMaxMatch) {
815                 /* Match description too long : reduce it */
816                 matchLength = (15-1) + (oMaxMatch-op) * 255;
817             }
818             ip += MINMATCH + matchLength;
819 
820             if (matchLength>=ML_MASK) {
821                 *token += ML_MASK;
822                 matchLength -= ML_MASK;
823                 while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
824                 *op++ = (BYTE)matchLength;
825             }
826             else *token += (BYTE)(matchLength);
827         }
828 
829         anchor = ip;
830 
831         /* Test end of block */
832         if (ip > mflimit) break;
833         if (op > oMaxSeq) break;
834 
835         /* Fill table */
836         LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
837 
838         /* Test next position */
839         match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
840         LZ4_putPosition(ip, ctx->hashTable, tableType, base);
841         if ( (match+MAX_DISTANCE>=ip)
842             && (LZ4_read32(match)==LZ4_read32(ip)) )
843         { token=op++; *token=0; goto _next_match; }
844 
845         /* Prepare next loop */
846         forwardH = LZ4_hashPosition(++ip, tableType);
847     }
848 
849 _last_literals:
850     /* Encode Last Literals */
851     {   size_t lastRunSize = (size_t)(iend - anchor);
852         if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
853             /* adapt lastRunSize to fill 'dst' */
854             lastRunSize  = (oend-op) - 1;
855             lastRunSize -= (lastRunSize+240)/255;
856         }
857         ip = anchor + lastRunSize;
858 
859         if (lastRunSize >= RUN_MASK) {
860             size_t accumulator = lastRunSize - RUN_MASK;
861             *op++ = RUN_MASK << ML_BITS;
862             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
863             *op++ = (BYTE) accumulator;
864         } else {
865             *op++ = (BYTE)(lastRunSize<<ML_BITS);
866         }
867         memcpy(op, anchor, lastRunSize);
868         op += lastRunSize;
869     }
870 
871     /* End */
872     *srcSizePtr = (int) (((const char*)ip)-src);
873     return (int) (((char*)op)-dst);
874 }
875 
876 
LZ4_compress_destSize_extState(LZ4_stream_t * state,const char * src,char * dst,int * srcSizePtr,int targetDstSize)877 static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
878 {
879     LZ4_resetStream(state);
880 
881     if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) {  /* compression success is guaranteed */
882         return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
883     } else {
884         if (*srcSizePtr < LZ4_64Klimit)
885             return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
886         else
887             return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
888     }
889 }
890 
891 
LZ4_compress_destSize(const char * src,char * dst,int * srcSizePtr,int targetDstSize)892 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
893 {
894 #if (HEAPMODE)
895     LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
896 #else
897     LZ4_stream_t ctxBody;
898     LZ4_stream_t* ctx = &ctxBody;
899 #endif
900 
901     int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
902 
903 #if (HEAPMODE)
904     FREEMEM(ctx);
905 #endif
906     return result;
907 }
908 
909 
910 
911 /*-******************************
912 *  Streaming functions
913 ********************************/
914 
LZ4_createStream(void)915 LZ4_stream_t* LZ4_createStream(void)
916 {
917     LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
918     LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal));    /* A compilation error here means LZ4_STREAMSIZE is not large enough */
919     LZ4_resetStream(lz4s);
920     return lz4s;
921 }
922 
LZ4_resetStream(LZ4_stream_t * LZ4_stream)923 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
924 {
925     MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
926 }
927 
LZ4_freeStream(LZ4_stream_t * LZ4_stream)928 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
929 {
930     FREEMEM(LZ4_stream);
931     return (0);
932 }
933 
934 
935 #define HASH_UNIT sizeof(reg_t)
LZ4_loadDict(LZ4_stream_t * LZ4_dict,const char * dictionary,int dictSize)936 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
937 {
938     LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
939     const BYTE* p = (const BYTE*)dictionary;
940     const BYTE* const dictEnd = p + dictSize;
941     const BYTE* base;
942 
943     if ((dict->initCheck) || (dict->currentOffset > 1 GB))  /* Uninitialized structure, or reuse overflow */
944         LZ4_resetStream(LZ4_dict);
945 
946     if (dictSize < (int)HASH_UNIT) {
947         dict->dictionary = NULL;
948         dict->dictSize = 0;
949         return 0;
950     }
951 
952     if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
953     dict->currentOffset += 64 KB;
954     base = p - dict->currentOffset;
955     dict->dictionary = p;
956     dict->dictSize = (U32)(dictEnd - p);
957     dict->currentOffset += dict->dictSize;
958 
959     while (p <= dictEnd-HASH_UNIT) {
960         LZ4_putPosition(p, dict->hashTable, byU32, base);
961         p+=3;
962     }
963 
964     return dict->dictSize;
965 }
966 
967 
LZ4_renormDictT(LZ4_stream_t_internal * LZ4_dict,const BYTE * src)968 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
969 {
970     if ((LZ4_dict->currentOffset > 0x80000000) ||
971         ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {   /* address space overflow */
972         /* rescale hash table */
973         U32 const delta = LZ4_dict->currentOffset - 64 KB;
974         const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
975         int i;
976         for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
977             if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
978             else LZ4_dict->hashTable[i] -= delta;
979         }
980         LZ4_dict->currentOffset = 64 KB;
981         if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
982         LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
983     }
984 }
985 
986 
LZ4_compress_fast_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)987 int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
988 {
989     LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
990     const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
991 
992     const BYTE* smallest = (const BYTE*) source;
993     if (streamPtr->initCheck) return 0;   /* Uninitialized structure detected */
994     if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
995     LZ4_renormDictT(streamPtr, smallest);
996     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
997 
998     /* Check overlapping input/dictionary space */
999     {   const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1000         if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1001             streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1002             if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1003             if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1004             streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1005         }
1006     }
1007 
1008     /* prefix mode : source data follows dictionary */
1009     if (dictEnd == (const BYTE*)source) {
1010         int result;
1011         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1012             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
1013         else
1014             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
1015         streamPtr->dictSize += (U32)inputSize;
1016         streamPtr->currentOffset += (U32)inputSize;
1017         return result;
1018     }
1019 
1020     /* external dictionary mode */
1021     {   int result;
1022         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1023             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
1024         else
1025             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
1026         streamPtr->dictionary = (const BYTE*)source;
1027         streamPtr->dictSize = (U32)inputSize;
1028         streamPtr->currentOffset += (U32)inputSize;
1029         return result;
1030     }
1031 }
1032 
1033 
1034 /* Hidden debug function, to force external dictionary mode */
LZ4_compress_forceExtDict(LZ4_stream_t * LZ4_dict,const char * source,char * dest,int inputSize)1035 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
1036 {
1037     LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1038     int result;
1039     const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1040 
1041     const BYTE* smallest = dictEnd;
1042     if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
1043     LZ4_renormDictT(streamPtr, smallest);
1044 
1045     result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1046 
1047     streamPtr->dictionary = (const BYTE*)source;
1048     streamPtr->dictSize = (U32)inputSize;
1049     streamPtr->currentOffset += (U32)inputSize;
1050 
1051     return result;
1052 }
1053 
1054 
1055 /*! LZ4_saveDict() :
1056  *  If previously compressed data block is not guaranteed to remain available at its memory location,
1057  *  save it into a safer place (char* safeBuffer).
1058  *  Note : you don't need to call LZ4_loadDict() afterwards,
1059  *         dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
1060  *  Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1061  */
LZ4_saveDict(LZ4_stream_t * LZ4_dict,char * safeBuffer,int dictSize)1062 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1063 {
1064     LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1065     const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1066 
1067     if ((U32)dictSize > 64 KB) dictSize = 64 KB;   /* useless to define a dictionary > 64 KB */
1068     if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1069 
1070     memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1071 
1072     dict->dictionary = (const BYTE*)safeBuffer;
1073     dict->dictSize = (U32)dictSize;
1074 
1075     return dictSize;
1076 }
1077 
1078 
1079 
1080 /*-*****************************
1081 *  Decompression functions
1082 *******************************/
1083 /*! LZ4_decompress_generic() :
1084  *  This generic decompression function cover all use cases.
1085  *  It shall be instantiated several times, using different sets of directives
1086  *  Note that it is important this generic function is really inlined,
1087  *  in order to remove useless branches during compilation optimization.
1088  */
LZ4_decompress_generic(const char * const source,char * const dest,int inputSize,int outputSize,int endOnInput,int partialDecoding,int targetOutputSize,int dict,const BYTE * const lowPrefix,const BYTE * const dictStart,const size_t dictSize)1089 FORCE_INLINE int LZ4_decompress_generic(
1090                  const char* const source,
1091                  char* const dest,
1092                  int inputSize,
1093                  int outputSize,         /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
1094 
1095                  int endOnInput,         /* endOnOutputSize, endOnInputSize */
1096                  int partialDecoding,    /* full, partial */
1097                  int targetOutputSize,   /* only used if partialDecoding==partial */
1098                  int dict,               /* noDict, withPrefix64k, usingExtDict */
1099                  const BYTE* const lowPrefix,  /* == dest when no prefix */
1100                  const BYTE* const dictStart,  /* only if dict==usingExtDict */
1101                  const size_t dictSize         /* note : = 0 if noDict */
1102                  )
1103 {
1104     /* Local Variables */
1105     const BYTE* ip = (const BYTE*) source;
1106     const BYTE* const iend = ip + inputSize;
1107 
1108     BYTE* op = (BYTE*) dest;
1109     BYTE* const oend = op + outputSize;
1110     BYTE* cpy;
1111     BYTE* oexit = op + targetOutputSize;
1112     const BYTE* const lowLimit = lowPrefix - dictSize;
1113 
1114     const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1115     const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};
1116     const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
1117 
1118     const int safeDecode = (endOnInput==endOnInputSize);
1119     const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1120 
1121 
1122     /* Special cases */
1123     if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT;                        /* targetOutputSize too high => decode everything */
1124     if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1;  /* Empty output buffer */
1125     if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
1126 
1127     /* Main Loop : decode sequences */
1128     while (1) {
1129         size_t length;
1130         const BYTE* match;
1131         size_t offset;
1132 
1133         /* get literal length */
1134         unsigned const token = *ip++;
1135         if ((length=(token>>ML_BITS)) == RUN_MASK) {
1136             unsigned s;
1137             do {
1138                 s = *ip++;
1139                 length += s;
1140             } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
1141             if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error;   /* overflow detection */
1142             if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error;   /* overflow detection */
1143         }
1144 
1145         /* copy literals */
1146         cpy = op+length;
1147         if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
1148             || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1149         {
1150             if (partialDecoding) {
1151                 if (cpy > oend) goto _output_error;                           /* Error : write attempt beyond end of output buffer */
1152                 if ((endOnInput) && (ip+length > iend)) goto _output_error;   /* Error : read attempt beyond end of input buffer */
1153             } else {
1154                 if ((!endOnInput) && (cpy != oend)) goto _output_error;       /* Error : block decoding must stop exactly there */
1155                 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error;   /* Error : input must be consumed */
1156             }
1157             memcpy(op, ip, length);
1158             ip += length;
1159             op += length;
1160             break;     /* Necessarily EOF, due to parsing restrictions */
1161         }
1162         LZ4_wildCopy(op, ip, cpy);
1163         ip += length; op = cpy;
1164 
1165         /* get offset */
1166         offset = LZ4_readLE16(ip); ip+=2;
1167         match = op - offset;
1168         if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error;   /* Error : offset outside buffers */
1169         LZ4_write32(op, (U32)offset);   /* costs ~1%; silence an msan warning when offset==0 */
1170 
1171         /* get matchlength */
1172         length = token & ML_MASK;
1173         if (length == ML_MASK) {
1174             unsigned s;
1175             do {
1176                 s = *ip++;
1177                 if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1178                 length += s;
1179             } while (s==255);
1180             if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error;   /* overflow detection */
1181         }
1182         length += MINMATCH;
1183 
1184         /* check external dictionary */
1185         if ((dict==usingExtDict) && (match < lowPrefix)) {
1186             if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error;   /* doesn't respect parsing restriction */
1187 
1188             if (length <= (size_t)(lowPrefix-match)) {
1189                 /* match can be copied as a single segment from external dictionary */
1190                 memmove(op, dictEnd - (lowPrefix-match), length);
1191                 op += length;
1192             } else {
1193                 /* match encompass external dictionary and current block */
1194                 size_t const copySize = (size_t)(lowPrefix-match);
1195                 size_t const restSize = length - copySize;
1196                 memcpy(op, dictEnd - copySize, copySize);
1197                 op += copySize;
1198                 if (restSize > (size_t)(op-lowPrefix)) {  /* overlap copy */
1199                     BYTE* const endOfMatch = op + restSize;
1200                     const BYTE* copyFrom = lowPrefix;
1201                     while (op < endOfMatch) *op++ = *copyFrom++;
1202                 } else {
1203                     memcpy(op, lowPrefix, restSize);
1204                     op += restSize;
1205             }   }
1206             continue;
1207         }
1208 
1209         /* copy match within block */
1210         cpy = op + length;
1211         if (unlikely(offset<8)) {
1212             const int dec64 = dec64table[offset];
1213             op[0] = match[0];
1214             op[1] = match[1];
1215             op[2] = match[2];
1216             op[3] = match[3];
1217             match += dec32table[offset];
1218             memcpy(op+4, match, 4);
1219             match -= dec64;
1220         } else { LZ4_copy8(op, match); match+=8; }
1221         op += 8;
1222 
1223         if (unlikely(cpy>oend-12)) {
1224             BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
1225             if (cpy > oend-LASTLITERALS) goto _output_error;    /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
1226             if (op < oCopyLimit) {
1227                 LZ4_wildCopy(op, match, oCopyLimit);
1228                 match += oCopyLimit - op;
1229                 op = oCopyLimit;
1230             }
1231             while (op<cpy) *op++ = *match++;
1232         } else {
1233             LZ4_copy8(op, match);
1234             if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
1235         }
1236         op=cpy;   /* correction */
1237     }
1238 
1239     /* end of decoding */
1240     if (endOnInput)
1241        return (int) (((char*)op)-dest);     /* Nb of output bytes decoded */
1242     else
1243        return (int) (((const char*)ip)-source);   /* Nb of input bytes read */
1244 
1245     /* Overflow error detected */
1246 _output_error:
1247     return (int) (-(((const char*)ip)-source))-1;
1248 }
1249 
1250 
LZ4_decompress_safe(const char * source,char * dest,int compressedSize,int maxDecompressedSize)1251 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1252 {
1253     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
1254 }
1255 
LZ4_decompress_safe_partial(const char * source,char * dest,int compressedSize,int targetOutputSize,int maxDecompressedSize)1256 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
1257 {
1258     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1259 }
1260 
LZ4_decompress_fast(const char * source,char * dest,int originalSize)1261 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1262 {
1263     return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
1264 }
1265 
1266 
1267 /*===== streaming decompression functions =====*/
1268 
1269 /*
1270  * If you prefer dynamic allocation methods,
1271  * LZ4_createStreamDecode()
1272  * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
1273  */
LZ4_createStreamDecode(void)1274 LZ4_streamDecode_t* LZ4_createStreamDecode(void)
1275 {
1276     LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
1277     return lz4s;
1278 }
1279 
LZ4_freeStreamDecode(LZ4_streamDecode_t * LZ4_stream)1280 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
1281 {
1282     FREEMEM(LZ4_stream);
1283     return 0;
1284 }
1285 
1286 /*!
1287  * LZ4_setStreamDecode() :
1288  * Use this function to instruct where to find the dictionary.
1289  * This function is not necessary if previous data is still available where it was decoded.
1290  * Loading a size of 0 is allowed (same effect as no dictionary).
1291  * Return : 1 if OK, 0 if error
1292  */
LZ4_setStreamDecode(LZ4_streamDecode_t * LZ4_streamDecode,const char * dictionary,int dictSize)1293 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1294 {
1295     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1296     lz4sd->prefixSize = (size_t) dictSize;
1297     lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
1298     lz4sd->externalDict = NULL;
1299     lz4sd->extDictSize  = 0;
1300     return 1;
1301 }
1302 
1303 /*
1304 *_continue() :
1305     These decoding functions allow decompression of multiple blocks in "streaming" mode.
1306     Previously decoded blocks must still be available at the memory position where they were decoded.
1307     If it's not possible, save the relevant part of decoded data into a safe buffer,
1308     and indicate where it stands using LZ4_setStreamDecode()
1309 */
LZ4_decompress_safe_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int compressedSize,int maxOutputSize)1310 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1311 {
1312     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1313     int result;
1314 
1315     if (lz4sd->prefixEnd == (BYTE*)dest) {
1316         result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1317                                         endOnInputSize, full, 0,
1318                                         usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1319         if (result <= 0) return result;
1320         lz4sd->prefixSize += result;
1321         lz4sd->prefixEnd  += result;
1322     } else {
1323         lz4sd->extDictSize = lz4sd->prefixSize;
1324         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1325         result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1326                                         endOnInputSize, full, 0,
1327                                         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1328         if (result <= 0) return result;
1329         lz4sd->prefixSize = result;
1330         lz4sd->prefixEnd  = (BYTE*)dest + result;
1331     }
1332 
1333     return result;
1334 }
1335 
LZ4_decompress_fast_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int originalSize)1336 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1337 {
1338     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1339     int result;
1340 
1341     if (lz4sd->prefixEnd == (BYTE*)dest) {
1342         result = LZ4_decompress_generic(source, dest, 0, originalSize,
1343                                         endOnOutputSize, full, 0,
1344                                         usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1345         if (result <= 0) return result;
1346         lz4sd->prefixSize += originalSize;
1347         lz4sd->prefixEnd  += originalSize;
1348     } else {
1349         lz4sd->extDictSize = lz4sd->prefixSize;
1350         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1351         result = LZ4_decompress_generic(source, dest, 0, originalSize,
1352                                         endOnOutputSize, full, 0,
1353                                         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1354         if (result <= 0) return result;
1355         lz4sd->prefixSize = originalSize;
1356         lz4sd->prefixEnd  = (BYTE*)dest + originalSize;
1357     }
1358 
1359     return result;
1360 }
1361 
1362 
1363 /*
1364 Advanced decoding functions :
1365 *_usingDict() :
1366     These decoding functions work the same as "_continue" ones,
1367     the dictionary must be explicitly provided within parameters
1368 */
1369 
LZ4_decompress_usingDict_generic(const char * source,char * dest,int compressedSize,int maxOutputSize,int safe,const char * dictStart,int dictSize)1370 FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
1371 {
1372     if (dictSize==0)
1373         return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1374     if (dictStart+dictSize == dest) {
1375         if (dictSize >= (int)(64 KB - 1))
1376             return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1377         return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
1378     }
1379     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1380 }
1381 
LZ4_decompress_safe_usingDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1382 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1383 {
1384     return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1385 }
1386 
LZ4_decompress_fast_usingDict(const char * source,char * dest,int originalSize,const char * dictStart,int dictSize)1387 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1388 {
1389     return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1390 }
1391 
1392 /* debug function */
LZ4_decompress_safe_forceExtDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1393 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1394 {
1395     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1396 }
1397 
1398 
1399 /*=*************************************************
1400 *  Obsolete Functions
1401 ***************************************************/
1402 /* obsolete compression functions */
LZ4_compress_limitedOutput(const char * source,char * dest,int inputSize,int maxOutputSize)1403 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
LZ4_compress(const char * source,char * dest,int inputSize)1404 int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
LZ4_compress_limitedOutput_withState(void * state,const char * src,char * dst,int srcSize,int dstSize)1405 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
LZ4_compress_withState(void * state,const char * src,char * dst,int srcSize)1406 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
LZ4_compress_limitedOutput_continue(LZ4_stream_t * LZ4_stream,const char * src,char * dst,int srcSize,int maxDstSize)1407 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
LZ4_compress_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize)1408 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
1409 
1410 /*
1411 These function names are deprecated and should no longer be used.
1412 They are only provided here for compatibility with older user programs.
1413 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1414 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1415 */
LZ4_uncompress(const char * source,char * dest,int outputSize)1416 int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
LZ4_uncompress_unknownOutputSize(const char * source,char * dest,int isize,int maxOutputSize)1417 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1418 
1419 
1420 /* Obsolete Streaming functions */
1421 
LZ4_sizeofStreamState()1422 int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
1423 
LZ4_init(LZ4_stream_t * lz4ds,BYTE * base)1424 static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
1425 {
1426     MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
1427     lz4ds->internal_donotuse.bufferStart = base;
1428 }
1429 
LZ4_resetStreamState(void * state,char * inputBuffer)1430 int LZ4_resetStreamState(void* state, char* inputBuffer)
1431 {
1432     if ((((uptrval)state) & 3) != 0) return 1;   /* Error : pointer is not aligned on 4-bytes boundary */
1433     LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
1434     return 0;
1435 }
1436 
LZ4_create(char * inputBuffer)1437 void* LZ4_create (char* inputBuffer)
1438 {
1439     LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
1440     LZ4_init (lz4ds, (BYTE*)inputBuffer);
1441     return lz4ds;
1442 }
1443 
LZ4_slideInputBuffer(void * LZ4_Data)1444 char* LZ4_slideInputBuffer (void* LZ4_Data)
1445 {
1446     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
1447     int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
1448     return (char*)(ctx->bufferStart + dictSize);
1449 }
1450 
1451 /* Obsolete streaming decompression functions */
1452 
LZ4_decompress_safe_withPrefix64k(const char * source,char * dest,int compressedSize,int maxOutputSize)1453 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1454 {
1455     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1456 }
1457 
LZ4_decompress_fast_withPrefix64k(const char * source,char * dest,int originalSize)1458 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
1459 {
1460     return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1461 }
1462 
1463 #endif   /* LZ4_COMMONDEFS_ONLY */
1464