1/*
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2013, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
8 met:
9
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
15 distribution.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 You can contact the author at :
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 - LZ4 source repository : http://code.google.com/p/lz4/
32*/
33
34/*
35Note : this source file requires "lz4_encoder.h"
36*/
37
38//**************************************
39// Tuning parameters
40//**************************************
41// MEMORY_USAGE :
42// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
43// Increasing memory usage improves compression ratio
44// Reduced memory usage can improve speed, due to cache effect
45// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
46#define MEMORY_USAGE 14
47
48// HEAPMODE :
49// Select how default compression function will allocate memory for its hash table,
50// in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
51// Default allocation strategy is to use stack (HEAPMODE 0)
52// Note : explicit functions *_stack* and *_heap* are unaffected by this setting
53#define HEAPMODE 0
54
55// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
56// This will provide a small boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
57// You can set this option to 1 in situations where data will remain within closed environment
58// This option is useless on Little_Endian CPU (such as x86)
59//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
60
61
62
63//**************************************
64// CPU Feature Detection
65//**************************************
66// 32 or 64 bits ?
67#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
68 || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
69 || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
70 || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode
71# define LZ4_ARCH64 1
72#else
73# define LZ4_ARCH64 0
74#endif
75
76// Little Endian or Big Endian ?
77// Overwrite the #define below if you know your architecture endianess
78#if defined (__GLIBC__)
79# include <endian.h>
80# if (__BYTE_ORDER == __BIG_ENDIAN)
81# define LZ4_BIG_ENDIAN 1
82# endif
83#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
84# define LZ4_BIG_ENDIAN 1
85#elif defined(__sparc) || defined(__sparc__) \
86 || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
87 || defined(__hpux) || defined(__hppa) \
88 || defined(_MIPSEB) || defined(__s390__)
89# define LZ4_BIG_ENDIAN 1
90#else
91// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
92#endif
93
94// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
95// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
96// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
97#if defined(__ARM_FEATURE_UNALIGNED)
98# define LZ4_FORCE_UNALIGNED_ACCESS 1
99#endif
100
101// Define this parameter if your target system or compiler does not support hardware bit count
102#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
103# define LZ4_FORCE_SW_BITCOUNT
104#endif
105
106
107//**************************************
108// Compiler Options
109//**************************************
110#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
111/* "restrict" is a known keyword */
112#else
113# define restrict // Disable restrict
114#endif
115
116#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
117
118#ifdef _MSC_VER // Visual Studio
119# define forceinline static __forceinline
120# include <intrin.h> // For Visual 2005
121# if LZ4_ARCH64 // 64-bits
122# pragma intrinsic(_BitScanForward64) // For Visual 2005
123# pragma intrinsic(_BitScanReverse64) // For Visual 2005
124# else // 32-bits
125# pragma intrinsic(_BitScanForward) // For Visual 2005
126# pragma intrinsic(_BitScanReverse) // For Visual 2005
127# endif
128# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
129#else
130# ifdef __GNUC__
131# define forceinline static inline __attribute__((always_inline))
132# else
133# define forceinline static inline
134# endif
135#endif
136
137#ifdef _MSC_VER
138# define lz4_bswap16(x) _byteswap_ushort(x)
139#else
140# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
141#endif
142
143#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
144# define expect(expr,value) (__builtin_expect ((expr),(value)) )
145#else
146# define expect(expr,value) (expr)
147#endif
148
149#define likely(expr) expect((expr) != 0, 1)
150#define unlikely(expr) expect((expr) != 0, 0)
151
152
153//**************************************
154// Includes
155//**************************************
156#include <stdlib.h> // for malloc
157#include <string.h> // for memset
158#include "lz4.h"
159
160
161//**************************************
162// Basic Types
163//**************************************
164#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
165# include <stdint.h>
166 typedef uint8_t BYTE;
167 typedef uint16_t U16;
168 typedef uint32_t U32;
169 typedef int32_t S32;
170 typedef uint64_t U64;
171#else
172 typedef unsigned char BYTE;
173 typedef unsigned short U16;
174 typedef unsigned int U32;
175 typedef signed int S32;
176 typedef unsigned long long U64;
177#endif
178
179#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
180# define _PACKED __attribute__ ((packed))
181#else
182# define _PACKED
183#endif
184
185#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
186# ifdef __IBMC__
187# pragma pack(1)
188# else
189# pragma pack(push, 1)
190# endif
191#endif
192
193typedef struct _U16_S { U16 v; } _PACKED U16_S;
194typedef struct _U32_S { U32 v; } _PACKED U32_S;
195typedef struct _U64_S { U64 v; } _PACKED U64_S;
196
197#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
198# pragma pack(pop)
199#endif
200
201#define A64(x) (((U64_S *)(x))->v)
202#define A32(x) (((U32_S *)(x))->v)
203#define A16(x) (((U16_S *)(x))->v)
204
205
206//**************************************
207// Constants
208//**************************************
209#define HASHTABLESIZE (1 << MEMORY_USAGE)
210
211#define MINMATCH 4
212
213#define COPYLENGTH 8
214#define LASTLITERALS 5
215#define MFLIMIT (COPYLENGTH+MINMATCH)
216#define MINLENGTH (MFLIMIT+1)
217
218#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
219#define SKIPSTRENGTH 6 // Increasing this value will make the compression run slower on incompressible data
220
221#define MAXD_LOG 16
222#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
223
224#define ML_BITS 4
225#define ML_MASK ((1U<<ML_BITS)-1)
226#define RUN_BITS (8-ML_BITS)
227#define RUN_MASK ((1U<<RUN_BITS)-1)
228
229
230//**************************************
231// Architecture-specific macros
232//**************************************
233#if LZ4_ARCH64 // 64-bit
234# define STEPSIZE 8
235# define UARCH U64
236# define AARCH A64
237# define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
238# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
239# define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
240# define HTYPE U32
241# define INITBASE(base) const BYTE* const base = ip
242#else // 32-bit
243# define STEPSIZE 4
244# define UARCH U32
245# define AARCH A32
246# define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
247# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
248# define LZ4_SECURECOPY LZ4_WILDCOPY
249# define HTYPE const BYTE*
250# define INITBASE(base) const int base = 0
251#endif
252
253#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
254# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
255# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
256#else // Little Endian
257# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
258# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
259#endif
260
261
262//**************************************
263// Macros
264//**************************************
265#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
266#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+(l); LZ4_WILDCOPY(s,d,e); d=e; }
267
268
269//****************************
270// Private functions
271//****************************
272#if LZ4_ARCH64
273
274forceinline int LZ4_NbCommonBytes (register U64 val)
275{
276#if defined(LZ4_BIG_ENDIAN)
277 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
278 unsigned long r = 0;
279 _BitScanReverse64( &r, val );
280 return (int)(r>>3);
281 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
282 return (__builtin_clzll(val) >> 3);
283 #else
284 int r;
285 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
286 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
287 r += (!val);
288 return r;
289 #endif
290#else
291 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
292 unsigned long r = 0;
293 _BitScanForward64( &r, val );
294 return (int)(r>>3);
295 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
296 return (__builtin_ctzll(val) >> 3);
297 #else
298 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
299 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
300 #endif
301#endif
302}
303
304#else
305
306forceinline int LZ4_NbCommonBytes (register U32 val)
307{
308#if defined(LZ4_BIG_ENDIAN)
309# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
310 unsigned long r = 0;
311 _BitScanReverse( &r, val );
312 return (int)(r>>3);
313# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
314 return (__builtin_clz(val) >> 3);
315# else
316 int r;
317 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
318 r += (!val);
319 return r;
320# endif
321#else
322# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
323 unsigned long r;
324 _BitScanForward( &r, val );
325 return (int)(r>>3);
326# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
327 return (__builtin_ctz(val) >> 3);
328# else
329 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
330 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
331# endif
332#endif
333}
334
335#endif
336
337
338
339//******************************
340// Compression functions
341//******************************
342
343/*
344int LZ4_compress_stack(
345 const char* source,
346 char* dest,
347 int inputSize)
348
349Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
350Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
351return : the number of bytes written in buffer 'dest'
352*/
353#define FUNCTION_NAME LZ4_compress_stack
354#include "lz4_encoder.h"
355
356
357/*
358int LZ4_compress_stack_limitedOutput(
359 const char* source,
360 char* dest,
361 int inputSize,
362 int maxOutputSize)
363
364Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
365If it cannot achieve it, compression will stop, and result of the function will be zero.
366return : the number of bytes written in buffer 'dest', or 0 if the compression fails
367*/
368#define FUNCTION_NAME LZ4_compress_stack_limitedOutput
369#define LIMITED_OUTPUT
370#include "lz4_encoder.h"
371
372
373/*
374int LZ4_compress64k_stack(
375 const char* source,
376 char* dest,
377 int inputSize)
378
379Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
380This function compresses better than LZ4_compress_stack(), on the condition that
381'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
382Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
383return : the number of bytes written in buffer 'dest', or 0 if compression fails
384*/
385#define FUNCTION_NAME LZ4_compress64k_stack
386#define COMPRESS_64K
387#include "lz4_encoder.h"
388
389
390/*
391int LZ4_compress64k_stack_limitedOutput(
392 const char* source,
393 char* dest,
394 int inputSize,
395 int maxOutputSize)
396
397Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
398This function compresses better than LZ4_compress_stack_limitedOutput(), on the condition that
399'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
400If it cannot achieve it, compression will stop, and result of the function will be zero.
401return : the number of bytes written in buffer 'dest', or 0 if the compression fails
402*/
403#define FUNCTION_NAME LZ4_compress64k_stack_limitedOutput
404#define COMPRESS_64K
405#define LIMITED_OUTPUT
406#include "lz4_encoder.h"
407
408
409/*
410void* LZ4_createHeapMemory();
411int LZ4_freeHeapMemory(void* ctx);
412
413Used to allocate and free hashTable memory
414to be used by the LZ4_compress_heap* family of functions.
415LZ4_createHeapMemory() returns NULL is memory allocation fails.
416*/
417void* LZ4_create() { return malloc(HASHTABLESIZE); }
418int LZ4_free(void* ctx) { free(ctx); return 0; }
419
420
421/*
422int LZ4_compress_heap(
423 void* ctx,
424 const char* source,
425 char* dest,
426 int inputSize)
427
428Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
429The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
430Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
431return : the number of bytes written in buffer 'dest'
432*/
433#define FUNCTION_NAME LZ4_compress_heap
434#define USE_HEAPMEMORY
435#include "lz4_encoder.h"
436
437
438/*
439int LZ4_compress_heap_limitedOutput(
440 void* ctx,
441 const char* source,
442 char* dest,
443 int inputSize,
444 int maxOutputSize)
445
446Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
447If it cannot achieve it, compression will stop, and result of the function will be zero.
448The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
449return : the number of bytes written in buffer 'dest', or 0 if the compression fails
450*/
451#define FUNCTION_NAME LZ4_compress_heap_limitedOutput
452#define LIMITED_OUTPUT
453#define USE_HEAPMEMORY
454#include "lz4_encoder.h"
455
456
457/*
458int LZ4_compress64k_heap(
459 void* ctx,
460 const char* source,
461 char* dest,
462 int inputSize)
463
464Compress 'inputSize' bytes from 'source' into an output buffer 'dest'.
465The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
466'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
467Destination buffer must be already allocated, and sized at a minimum of LZ4_compressBound(inputSize).
468return : the number of bytes written in buffer 'dest'
469*/
470#define FUNCTION_NAME LZ4_compress64k_heap
471#define COMPRESS_64K
472#define USE_HEAPMEMORY
473#include "lz4_encoder.h"
474
475
476/*
477int LZ4_compress64k_heap_limitedOutput(
478 void* ctx,
479 const char* source,
480 char* dest,
481 int inputSize,
482 int maxOutputSize)
483
484Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
485If it cannot achieve it, compression will stop, and result of the function will be zero.
486The memory used for compression must be created by LZ4_createHeapMemory() and provided by pointer 'ctx'.
487'inputSize' must be < to LZ4_64KLIMIT, or the function will fail.
488return : the number of bytes written in buffer 'dest', or 0 if the compression fails
489*/
490#define FUNCTION_NAME LZ4_compress64k_heap_limitedOutput
491#define COMPRESS_64K
492#define LIMITED_OUTPUT
493#define USE_HEAPMEMORY
494#include "lz4_encoder.h"
495
496
497int LZ4_compress(const char* source, char* dest, int inputSize)
498{
499#if HEAPMODE
500 void* ctx = LZ4_create();
501 int result;
502 if (ctx == NULL) return 0; // Failed allocation => compression not done
503 if (inputSize < LZ4_64KLIMIT)
504 result = LZ4_compress64k_heap(ctx, source, dest, inputSize);
505 else result = LZ4_compress_heap(ctx, source, dest, inputSize);
506 LZ4_free(ctx);
507 return result;
508#else
509 if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack(source, dest, inputSize);
510 return LZ4_compress_stack(source, dest, inputSize);
511#endif
512}
513
514
515int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
516{
517#if HEAPMODE
518 void* ctx = LZ4_create();
519 int result;
520 if (ctx == NULL) return 0; // Failed allocation => compression not done
521 if (inputSize < LZ4_64KLIMIT)
522 result = LZ4_compress64k_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
523 else result = LZ4_compress_heap_limitedOutput(ctx, source, dest, inputSize, maxOutputSize);
524 LZ4_free(ctx);
525 return result;
526#else
527 if (inputSize < (int)LZ4_64KLIMIT) return LZ4_compress64k_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
528 return LZ4_compress_stack_limitedOutput(source, dest, inputSize, maxOutputSize);
529#endif
530}
531
532
533//****************************
534// Decompression functions
535//****************************
536
537typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
538typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } end_directive;
539typedef enum { full = 0, partial = 1 } exit_directive;
540
541
542// This generic decompression function cover all use cases.
543// It shall be instanciated several times, using different sets of directives
544// Note that it is essential this generic function is really inlined,
545// in order to remove useless branches during compilation optimisation.
546forceinline int LZ4_decompress_generic(
547 const char* source,
548 char* dest,
549 int inputSize, //
550 int outputSize, // OutputSize must be != 0; if endOnInput==endOnInputSize, this value is the max size of Output Buffer.
551
552 int endOnInput, // endOnOutputSize, endOnInputSize
553 int prefix64k, // noPrefix, withPrefix
554 int partialDecoding, // full, partial
555 int targetOutputSize // only used if partialDecoding==partial
556 )
557{
558 // Local Variables
559 const BYTE* restrict ip = (const BYTE*) source;
560 const BYTE* ref;
561 const BYTE* const iend = ip + inputSize;
562
563 BYTE* op = (BYTE*) dest;
564 BYTE* const oend = op + outputSize;
565 BYTE* cpy;
566 BYTE* oexit = op + targetOutputSize;
567
568 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
569#if LZ4_ARCH64
570 size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
571#endif
572
573
574 // Special case
575 if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; // targetOutputSize too high => decode everything
576 if ((endOnInput) && unlikely(outputSize==0)) return ((inputSize==1) && (*ip==0)) ? 0 : -1; // Empty output buffer
577 if ((!endOnInput) && unlikely(outputSize==0)) return (*ip==0?1:-1);
578
579
580 // Main Loop
581 while (1)
582 {
583 unsigned token;
584 size_t length;
585
586 // get runlength
587 token = *ip++;
588 if ((length=(token>>ML_BITS)) == RUN_MASK)
589 {
590 unsigned s=255;
591 while (((endOnInput)?ip<iend:1) && (s==255))
592 {
593 s = *ip++;
594 length += s;
595 }
596 }
597
598 // copy literals
599 cpy = op+length;
600 if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
601 || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
602 {
603 if (partialDecoding)
604 {
605 if (cpy > oend) goto _output_error; // Error : write attempt beyond end of output buffer
606 if ((endOnInput) && (ip+length > iend)) goto _output_error; // Error : read attempt beyond end of input buffer
607 }
608 else
609 {
610 if ((!endOnInput) && (cpy != oend)) goto _output_error; // Error : block decoding must stop exactly there
611 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; // Error : input must be consumed
612 }
613 memcpy(op, ip, length);
614 ip += length;
615 op += length;
616 break; // Necessarily EOF, due to parsing restrictions
617 }
618 LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
619
620 // get offset
621 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
622 if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error; // Error : offset outside destination buffer
623
624 // get matchlength
625 if ((length=(token&ML_MASK)) == ML_MASK)
626 {
627 for ( ; (!endOnInput) || (ip<iend-(LASTLITERALS+1)) ; ) // Ensure enough bytes remain for LASTLITERALS + token
628 {
629 unsigned s = *ip++;
630 length += s;
631 if (s==255) continue;
632 break;
633 }
634 }
635
636 // copy repeated sequence
637 if unlikely((op-ref)<STEPSIZE)
638 {
639#if LZ4_ARCH64
640 size_t dec64 = dec64table[op-ref];
641#else
642 const size_t dec64 = 0;
643#endif
644 op[0] = ref[0];
645 op[1] = ref[1];
646 op[2] = ref[2];
647 op[3] = ref[3];
648 op += 4, ref += 4; ref -= dec32table[op-ref];
649 A32(op) = A32(ref);
650 op += STEPSIZE-4; ref -= dec64;
651 } else { LZ4_COPYSTEP(ref,op); }
652 cpy = op + length - (STEPSIZE-4);
653
654 if unlikely(cpy>oend-(COPYLENGTH)-(STEPSIZE-4))
655 {
656 if (cpy > oend-LASTLITERALS) goto _output_error; // Error : last 5 bytes must be literals
657 LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
658 while(op<cpy) *op++=*ref++;
659 op=cpy;
660 continue;
661 }
662 LZ4_WILDCOPY(ref, op, cpy);
663 op=cpy; // correction
664 }
665
666 // end of decoding
667 if (endOnInput)
668 return (int) (((char*)op)-dest); // Nb of output bytes decoded
669 else
670 return (int) (((char*)ip)-source); // Nb of input bytes read
671
672 // Overflow error detected
673_output_error:
674 return (int) (-(((char*)ip)-source))-1;
675}
676
677
678int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
679{
680 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
681}
682
683int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
684{
685 return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
686}
687
688int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
689{
690 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
691}
692
693int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
694{
695 return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
696}
697
698int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
699{
700 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);
701}
702
703