GRASS 8 Programmer's Manual 8.6.0dev(2026)-1d1e47ad9d
Loading...
Searching...
No Matches
lz4.c
Go to the documentation of this file.
1/*
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2023, Yann Collet.
4
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - LZ4 homepage : http://www.lz4.org
32 - LZ4 source repository : https://github.com/lz4/lz4
33*/
34
35/*-************************************
36 * Tuning parameters
37 **************************************/
38/*
39 * LZ4_HEAPMODE :
40 * Select how stateless compression functions like `LZ4_compress_default()`
41 * allocate memory for their hash table,
42 * in memory stack (0:default, fastest), or in memory heap (1:requires
43 * malloc()).
44 */
45#ifndef LZ4_HEAPMODE
46#define LZ4_HEAPMODE 0
47#endif
48
49/*
50 * LZ4_ACCELERATION_DEFAULT :
51 * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
52 */
53#define LZ4_ACCELERATION_DEFAULT 1
54/*
55 * LZ4_ACCELERATION_MAX :
56 * Any "acceleration" value higher than this threshold
57 * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
58 */
59#define LZ4_ACCELERATION_MAX 65537
60
61/*-************************************
62 * CPU Feature Detection
63 **************************************/
64/* LZ4_FORCE_MEMORY_ACCESS
65 * By default, access to unaligned memory is controlled by `memcpy()`, which is
66 * safe and portable. Unfortunately, on some target/compiler combinations, the
67 * generated assembly is sub-optimal. The below switch allow to select different
68 * access method for improved performance. Method 0 (default) : use `memcpy()`.
69 * Safe and portable. Method 1 : `__packed` statement. It depends on compiler
70 * extension (ie, not portable). This method is safe if your compiler supports
71 * it, and *generally* as fast or faster than `memcpy`. Method 2 : direct
72 * access. This method is portable but violate C standard. It can generate buggy
73 * code on targets which assembly generation depends on alignment. But in some
74 * circumstances, it's the only known way to get the most performance (ie GCC +
75 * ARMv6) See
76 * https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html
77 * for details. Prefer these methods in priority order (0 > 1 > 2)
78 */
79#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
80#if defined(__GNUC__) && \
81 (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
82 defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
83 defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
84#define LZ4_FORCE_MEMORY_ACCESS 2
85#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || \
86 defined(_MSC_VER)
87#define LZ4_FORCE_MEMORY_ACCESS 1
88#endif
89#endif
90
91/*
92 * LZ4_FORCE_SW_BITCOUNT
93 * Define this parameter if your target system or compiler does not support
94 * hardware bit count
95 */
96#if defined(_MSC_VER) && \
97 defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware \
98 bit count */
99#undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
100#define LZ4_FORCE_SW_BITCOUNT
101#endif
102
103/*-************************************
104 * Dependency
105 **************************************/
106/*
107 * LZ4_SRC_INCLUDED:
108 * Amalgamation flag, whether lz4.c is included
109 */
110#ifndef LZ4_SRC_INCLUDED
111#define LZ4_SRC_INCLUDED 1
112#endif
113
114#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
115#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to \
116 LZ4_decompress_safe_withPrefix64k */
117#endif
118
119#ifndef LZ4_STATIC_LINKING_ONLY
120#define LZ4_STATIC_LINKING_ONLY
121#endif
122#include "lz4.h"
123/* see also "memory routines" below */
124
125/*-************************************
126 * Compiler Options
127 **************************************/
128#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
129#include <intrin.h> /* only present in VS2005+ */
130#pragma warning( \
131 disable : 4127) /* disable: C4127: conditional expression is constant */
132#pragma warning( \
133 disable : 6237) /* disable: C6237: conditional expression is always 0 */
134#pragma warning( \
135 disable : 6239) /* disable: C6239: (<non-zero constant> && <expression>) \
136 always evaluates to the result of <expression> */
137#pragma warning( \
138 disable : 6240) /* disable: C6240: (<expression> && <non-zero constant>) \
139 always evaluates to the result of <expression> */
140#pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a \
141 constant with another constant */
142#endif /* _MSC_VER */
143
144#ifndef LZ4_FORCE_INLINE
145#if defined(_MSC_VER) && !defined(__clang__) /* MSVC */
146#define LZ4_FORCE_INLINE static __forceinline
147#else
148#if defined(__cplusplus) || \
149 defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
150#if defined(__GNUC__) || defined(__clang__)
151#define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
152#else
153#define LZ4_FORCE_INLINE static inline
154#endif
155#else
156#define LZ4_FORCE_INLINE static
157#endif /* __STDC_VERSION__ */
158#endif /* _MSC_VER */
159#endif /* LZ4_FORCE_INLINE */
160
161/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
162 * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
163 * together with a simple 8-byte copy loop as a fall-back path.
164 * However, this optimization hurts the decompression speed by >30%,
165 * because the execution does not go to the optimized loop
166 * for typical compressible data, and all of the preamble checks
167 * before going to the fall-back path become useless overhead.
168 * This optimization happens only with the -O3 flag, and -O2 generates
169 * a simple 8-byte copy loop.
170 * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
171 * functions are annotated with __attribute__((optimize("O2"))),
172 * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
173 * of LZ4_wildCopy8 does not affect the compression speed.
174 */
175#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && \
176 !defined(__clang__)
177#define LZ4_FORCE_O2 __attribute__((optimize("O2")))
178#undef LZ4_FORCE_INLINE
179#define LZ4_FORCE_INLINE \
180 static __inline __attribute__((optimize("O2"), always_inline))
181#else
182#define LZ4_FORCE_O2
183#endif
184
185#if (defined(__GNUC__) && (__GNUC__ >= 3)) || \
186 (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \
187 defined(__clang__)
188#define expect(expr, value) (__builtin_expect((expr), (value)))
189#else
190#define expect(expr, value) (expr)
191#endif
192
193#ifndef likely
194#define likely(expr) expect((expr) != 0, 1)
195#endif
196#ifndef unlikely
197#define unlikely(expr) expect((expr) != 0, 0)
198#endif
199
200/* Should the alignment test prove unreliable, for some reason,
201 * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
202#ifndef LZ4_ALIGN_TEST /* can be externally provided */
203#define LZ4_ALIGN_TEST 1
204#endif
205
206/*-************************************
207 * Memory routines
208 **************************************/
209
210/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :
211 * Disable relatively high-level LZ4/HC functions that use dynamic memory
212 * allocation functions (malloc(), calloc(), free()).
213 *
214 * Note that this is a compile-time switch. And since it disables
215 * public/stable LZ4 v1 API functions, we don't recommend using this
216 * symbol to generate a library for distribution.
217 *
218 * The following public functions are removed when this symbol is defined.
219 * - lz4 : LZ4_createStream, LZ4_freeStream,
220 * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create
221 * (deprecated)
222 * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,
223 * LZ4_createHC (deprecated), LZ4_freeHC (deprecated)
224 * - lz4frame, lz4file : All LZ4F_* functions
225 */
226#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
227#define ALLOC(s) lz4_error_memory_allocation_is_disabled
228#define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled
229#define FREEMEM(p) lz4_error_memory_allocation_is_disabled
230#elif defined(LZ4_USER_MEMORY_FUNCTIONS)
231/* memory management functions can be customized by user project.
232 * Below functions must exist somewhere in the Project
233 * and be available at link time */
234void *LZ4_malloc(size_t s);
235void *LZ4_calloc(size_t n, size_t s);
236void LZ4_free(void *p);
237#define ALLOC(s) LZ4_malloc(s)
238#define ALLOC_AND_ZERO(s) LZ4_calloc(1, s)
239#define FREEMEM(p) LZ4_free(p)
240#else
241#include <stdlib.h> /* malloc, calloc, free */
242#define ALLOC(s) malloc(s)
243#define ALLOC_AND_ZERO(s) calloc(1, s)
244#define FREEMEM(p) free(p)
245#endif
246
247#if !LZ4_FREESTANDING
248#include <string.h> /* memset, memcpy */
249#endif
250#if !defined(LZ4_memset)
251#define LZ4_memset(p, v, s) memset((p), (v), (s))
252#endif
253#define MEM_INIT(p, v, s) LZ4_memset((p), (v), (s))
254
255/*-************************************
256 * Common Constants
257 **************************************/
258#define MINMATCH 4
260#define WILDCOPYLENGTH 8
261#define LASTLITERALS \
262 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions \
263 */
264#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
265#define MATCH_SAFEGUARD_DISTANCE \
266 ((2 * WILDCOPYLENGTH) - \
267 MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without \
268 overflowing output buffer */
269#define FASTLOOP_SAFE_DISTANCE 64
270static const int LZ4_minLength = (MFLIMIT + 1);
272#define KB *(1 << 10)
273#define MB *(1 << 20)
274#define GB *(1U << 30)
275
276#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
277#if (LZ4_DISTANCE_MAX > \
278 LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
279#error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
280#endif
282#define ML_BITS 4
283#define ML_MASK ((1U << ML_BITS) - 1)
284#define RUN_BITS (8 - ML_BITS)
285#define RUN_MASK ((1U << RUN_BITS) - 1)
286
287/*-************************************
288 * Error detection
289 **************************************/
290#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 1)
291#include <assert.h>
292#else
293#ifndef assert
294#define assert(condition) ((void)0)
295#endif
296#endif
297
298#define LZ4_STATIC_ASSERT(c) \
299 { \
300 enum { LZ4_static_assert = 1 / (int)(!!(c)) }; \
301 } /* use after variable declarations */
302
303#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2)
304#include <stdio.h>
305static int g_debuglog_enable = 1;
306#define DEBUGLOG(l, ...) \
307 { \
308 if ((g_debuglog_enable) && (l <= LZ4_DEBUG)) { \
309 fprintf(stderr, __FILE__ " %i: ", __LINE__); \
310 fprintf(stderr, __VA_ARGS__); \
311 fprintf(stderr, " \n"); \
312 } \
313 }
314#else
315#define DEBUGLOG(l, ...) \
316 { \
317 } /* disabled */
318#endif
319
320static int LZ4_isAligned(const void *ptr, size_t alignment)
321{
322 return ((size_t)ptr & (alignment - 1)) == 0;
323}
324
325/*-************************************
326 * Types
327 **************************************/
328#include <limits.h>
329#if defined(__cplusplus) || \
330 (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
331#include <stdint.h>
332typedef uint8_t BYTE;
333typedef uint16_t U16;
334typedef uint32_t U32;
335typedef int32_t S32;
336typedef uint64_t U64;
337typedef uintptr_t uptrval;
338#else
339#if UINT_MAX != 4294967295UL
340#error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
341#endif
342typedef unsigned char BYTE;
343typedef unsigned short U16;
344typedef unsigned int U32;
345typedef signed int S32;
346typedef unsigned long long U64;
347typedef size_t uptrval; /* generally true, except OpenVMS-64 */
348#endif
349
350#if defined(__x86_64__)
351typedef U64 reg_t; /* 64-bits in x32 mode */
352#else
353typedef size_t reg_t; /* 32-bits in x32 mode */
354#endif
356typedef enum {
358 limitedOutput = 1,
359 fillOutput = 2
361
362/*-************************************
363 * Reading and writing into memory
364 **************************************/
365
366/**
367 * LZ4 relies on memcpy with a constant size being inlined. In freestanding
368 * environments, the compiler can't assume the implementation of memcpy() is
369 * standard compliant, so it can't apply its specialized memcpy() inlining
370 * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
371 * memcpy() as if it were standard compliant, so it can inline it in
372 * freestanding environments. This is needed when decompressing the Linux
373 * Kernel, for example.
374 */
375#if !defined(LZ4_memcpy)
376#if defined(__GNUC__) && (__GNUC__ >= 4)
377#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
378#else
379#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
380#endif
381#endif
382
383#if !defined(LZ4_memmove)
384#if defined(__GNUC__) && (__GNUC__ >= 4)
385#define LZ4_memmove __builtin_memmove
386#else
387#define LZ4_memmove memmove
388#endif
389#endif
390
391static unsigned LZ4_isLittleEndian(void)
392{
393 const union {
394 U32 u;
395 BYTE c[4];
396 } one = {1}; /* don't use static : performance detrimental */
397 return one.c[0];
398}
399
400#if defined(__GNUC__) || defined(__INTEL_COMPILER)
401#define LZ4_PACK(__Declaration__) __Declaration__ __attribute__((__packed__))
402#elif defined(_MSC_VER)
403#define LZ4_PACK(__Declaration__) \
404 __pragma(pack(push, 1)) __Declaration__ __pragma(pack(pop))
405#endif
406
407#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 2)
408/* lie to the compiler about data alignment; use with caution */
409
410static U16 LZ4_read16(const void *memPtr)
411{
412 return *(const U16 *)memPtr;
413}
414static U32 LZ4_read32(const void *memPtr)
415{
416 return *(const U32 *)memPtr;
417}
418static reg_t LZ4_read_ARCH(const void *memPtr)
419{
420 return *(const reg_t *)memPtr;
421}
422
423static void LZ4_write16(void *memPtr, U16 value)
424{
425 *(U16 *)memPtr = value;
426}
427static void LZ4_write32(void *memPtr, U32 value)
428{
429 *(U32 *)memPtr = value;
430}
431
432#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 1)
433
434/* __pack instructions are safer, but compiler specific, hence potentially
435 * problematic for some compilers */
436/* currently only defined for gcc and icc */
437LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;
438LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;
439LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;
440
441static U16 LZ4_read16(const void *ptr)
442{
443 return ((const LZ4_unalign16 *)ptr)->u16;
444}
445static U32 LZ4_read32(const void *ptr)
446{
447 return ((const LZ4_unalign32 *)ptr)->u32;
448}
449static reg_t LZ4_read_ARCH(const void *ptr)
450{
451 return ((const LZ4_unalignST *)ptr)->uArch;
452}
453
454static void LZ4_write16(void *memPtr, U16 value)
455{
456 ((LZ4_unalign16 *)memPtr)->u16 = value;
457}
458static void LZ4_write32(void *memPtr, U32 value)
459{
460 ((LZ4_unalign32 *)memPtr)->u32 = value;
461}
462
463#else /* safe and portable access using memcpy() */
464
465static U16 LZ4_read16(const void *memPtr)
466{
467 U16 val;
468 LZ4_memcpy(&val, memPtr, sizeof(val));
469 return val;
470}
471
472static U32 LZ4_read32(const void *memPtr)
473{
474 U32 val;
475 LZ4_memcpy(&val, memPtr, sizeof(val));
476 return val;
477}
478
479static reg_t LZ4_read_ARCH(const void *memPtr)
480{
481 reg_t val;
482 LZ4_memcpy(&val, memPtr, sizeof(val));
483 return val;
484}
485
486static void LZ4_write16(void *memPtr, U16 value)
487{
488 LZ4_memcpy(memPtr, &value, sizeof(value));
489}
490
491static void LZ4_write32(void *memPtr, U32 value)
492{
493 LZ4_memcpy(memPtr, &value, sizeof(value));
494}
495
496#endif /* LZ4_FORCE_MEMORY_ACCESS */
497
498static U16 LZ4_readLE16(const void *memPtr)
499{
500 if (LZ4_isLittleEndian()) {
501 return LZ4_read16(memPtr);
502 }
503 else {
504 const BYTE *p = (const BYTE *)memPtr;
505 return (U16)((U16)p[0] | (p[1] << 8));
506 }
507}
508
509#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
510static U32 LZ4_readLE32(const void *memPtr)
511{
512 if (LZ4_isLittleEndian()) {
513 return LZ4_read32(memPtr);
514 }
515 else {
516 const BYTE *p = (const BYTE *)memPtr;
517 return (U32)p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
518 }
519}
520#endif
521
522static void LZ4_writeLE16(void *memPtr, U16 value)
523{
524 if (LZ4_isLittleEndian()) {
525 LZ4_write16(memPtr, value);
526 }
527 else {
528 BYTE *p = (BYTE *)memPtr;
529 p[0] = (BYTE)value;
530 p[1] = (BYTE)(value >> 8);
531 }
532}
533
534/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd
535 */
537void LZ4_wildCopy8(void *dstPtr, const void *srcPtr, void *dstEnd)
538{
539 BYTE *d = (BYTE *)dstPtr;
540 const BYTE *s = (const BYTE *)srcPtr;
541 BYTE *const e = (BYTE *)dstEnd;
542
543 do {
544 LZ4_memcpy(d, s, 8);
545 d += 8;
546 s += 8;
547 } while (d < e);
548}
549
550static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
551static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
552
553#ifndef LZ4_FAST_DEC_LOOP
554#if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
555#define LZ4_FAST_DEC_LOOP 1
556#elif defined(__aarch64__) && defined(__APPLE__)
557#define LZ4_FAST_DEC_LOOP 1
558#elif defined(__aarch64__) && !defined(__clang__)
559/* On non-Apple aarch64, we disable this optimization for clang because
560 * on certain mobile chipsets, performance is reduced with clang. For
561 * more information refer to https://github.com/lz4/lz4/pull/707 */
562#define LZ4_FAST_DEC_LOOP 1
563#else
564#define LZ4_FAST_DEC_LOOP 0
565#endif
566#endif
567
568#if LZ4_FAST_DEC_LOOP
569
571 const BYTE *srcPtr,
572 BYTE *dstEnd,
573 const size_t offset)
574{
575 assert(srcPtr + offset == dstPtr);
576 if (offset < 8) {
577 LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
578 dstPtr[0] = srcPtr[0];
579 dstPtr[1] = srcPtr[1];
580 dstPtr[2] = srcPtr[2];
581 dstPtr[3] = srcPtr[3];
582 srcPtr += inc32table[offset];
583 LZ4_memcpy(dstPtr + 4, srcPtr, 4);
584 srcPtr -= dec64table[offset];
585 dstPtr += 8;
586 }
587 else {
589 dstPtr += 8;
590 srcPtr += 8;
591 }
592
593 LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
594}
595
596/* customized variant of memcpy, which can overwrite up to 32 bytes beyond
597 * dstEnd this version copies two times 16 bytes (instead of one time 32 bytes)
598 * because it must be compatible with offsets >= 16. */
599LZ4_FORCE_INLINE void LZ4_wildCopy32(void *dstPtr, const void *srcPtr,
600 void *dstEnd)
601{
602 BYTE *d = (BYTE *)dstPtr;
603 const BYTE *s = (const BYTE *)srcPtr;
604 BYTE *const e = (BYTE *)dstEnd;
605
606 do {
607 LZ4_memcpy(d, s, 16);
608 LZ4_memcpy(d + 16, s + 16, 16);
609 d += 32;
610 s += 32;
611 } while (d < e);
612}
613
614/* LZ4_memcpy_using_offset() presumes :
615 * - dstEnd >= dstPtr + MINMATCH
616 * - there is at least 12 bytes available to write after dstEnd */
618 BYTE *dstEnd, const size_t offset)
619{
620 BYTE v[8];
621
623
624 switch (offset) {
625 case 1:
626 MEM_INIT(v, *srcPtr, 8);
627 break;
628 case 2:
629 LZ4_memcpy(v, srcPtr, 2);
630 LZ4_memcpy(&v[2], srcPtr, 2);
631#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier \
632 */
633#pragma warning(push)
634#pragma warning(disable \
635 : 6385) /* warning C6385: Reading invalid data from 'v'. */
636#endif
637 LZ4_memcpy(&v[4], v, 4);
638#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier \
639 */
640#pragma warning(pop)
641#endif
642 break;
643 case 4:
644 LZ4_memcpy(v, srcPtr, 4);
645 LZ4_memcpy(&v[4], srcPtr, 4);
646 break;
647 default:
649 return;
650 }
651
652 LZ4_memcpy(dstPtr, v, 8);
653 dstPtr += 8;
654 while (dstPtr < dstEnd) {
655 LZ4_memcpy(dstPtr, v, 8);
656 dstPtr += 8;
657 }
658}
659#endif
660
661/*-************************************
662 * Common functions
663 **************************************/
664static unsigned LZ4_NbCommonBytes(reg_t val)
665{
666 assert(val != 0);
667 if (LZ4_isLittleEndian()) {
668 if (sizeof(val) == 8) {
669#if defined(_MSC_VER) && (_MSC_VER >= 1800) && \
670 (defined(_M_AMD64) && !defined(_M_ARM64EC)) && \
671 !defined(LZ4_FORCE_SW_BITCOUNT)
672/*-*************************************************************************************************
673 * ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications
674 *on ARM64 Windows 11. The ARM64EC ABI does not support AVX/AVX2/AVX512
675 *instructions, nor their relevant intrinsics including _tzcnt_u64. Therefore,
676 *we need to neuter the _tzcnt_u64 code path for ARM64EC.
677 ****************************************************************************************************/
678#if defined(__clang__) && (__clang_major__ < 10)
679 /* Avoid undefined clang-cl intrinsics issue.
680 * See https://github.com/lz4/lz4/pull/1017 for details. */
681 return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;
682#else
683 /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
684 return (unsigned)_tzcnt_u64(val) >> 3;
685#endif
686#elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
687 unsigned long r = 0;
688 _BitScanForward64(&r, (U64)val);
689 return (unsigned)r >> 3;
690#elif (defined(__clang__) || \
691 (defined(__GNUC__) && \
692 ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
693 !defined(LZ4_FORCE_SW_BITCOUNT)
694 return (unsigned)__builtin_ctzll((U64)val) >> 3;
695#else
696 const U64 m = 0x0101010101010101ULL;
697 val ^= val - 1;
698 return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
699#endif
700 }
701 else /* 32 bits */ {
702#if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
703 unsigned long r;
704 _BitScanForward(&r, (U32)val);
705 return (unsigned)r >> 3;
706#elif (defined(__clang__) || \
707 (defined(__GNUC__) && \
708 ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
709 !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
710 return (unsigned)__builtin_ctz((U32)val) >> 3;
711#else
712 const U32 m = 0x01010101;
713 return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
714#endif
715 }
716 }
717 else /* Big Endian CPU */ {
718 if (sizeof(val) == 8) {
719#if (defined(__clang__) || \
720 (defined(__GNUC__) && \
721 ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
722 !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
723 return (unsigned)__builtin_clzll((U64)val) >> 3;
724#else
725#if 1
726 /* this method is probably faster,
727 * but adds a 128 bytes lookup table */
728 static const unsigned char ctz7_tab[128] = {
729 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1,
730 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0,
731 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3,
732 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0,
733 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1,
734 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0,
735 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
736 };
737 U64 const mask = 0x0101010101010101ULL;
738 U64 const t = (((val >> 8) - mask) | val) & mask;
739 return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
740#else
741 /* this method doesn't consume memory space like the previous one,
742 * but it contains several branches,
743 * that may end up slowing execution */
744 static const U32 by32 =
745 sizeof(val) * 4; /* 32 on 64 bits (goal), 16 on 32 bits.
746Just to avoid some static analyzer complaining about shift by 32 on 32-bits
747target. Note that this code path is never triggered in 32-bits mode. */
748 unsigned r;
749 if (!(val >> by32)) {
750 r = 4;
751 }
752 else {
753 r = 0;
754 val >>= by32;
755 }
756 if (!(val >> 16)) {
757 r += 2;
758 val >>= 8;
759 }
760 else {
761 val >>= 24;
762 }
763 r += (!val);
764 return r;
765#endif
766#endif
767 }
768 else /* 32 bits */ {
769#if (defined(__clang__) || \
770 (defined(__GNUC__) && \
771 ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
772 !defined(LZ4_FORCE_SW_BITCOUNT)
773 return (unsigned)__builtin_clz((U32)val) >> 3;
774#else
775 val >>= 8;
776 val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
777 (val + 0x00FF0000)) >>
778 24;
779 return (unsigned)val ^ 3;
780#endif
781 }
783}
784
785#define STEPSIZE sizeof(reg_t)
787unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
788{
789 const BYTE *const pStart = pIn;
790
791 if (likely(pIn < pInLimit - (STEPSIZE - 1))) {
792 reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
793 if (!diff) {
794 pIn += STEPSIZE;
795 pMatch += STEPSIZE;
796 }
797 else {
798 return LZ4_NbCommonBytes(diff);
799 }
800 }
801
802 while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
803 reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
804 if (!diff) {
805 pIn += STEPSIZE;
806 pMatch += STEPSIZE;
807 continue;
808 }
809 pIn += LZ4_NbCommonBytes(diff);
810 return (unsigned)(pIn - pStart);
811 }
812
813 if ((STEPSIZE == 8) && (pIn < (pInLimit - 3)) &&
814 (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
815 pIn += 4;
816 pMatch += 4;
817 }
818 if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
819 pIn += 2;
820 pMatch += 2;
821 }
822 if ((pIn < pInLimit) && (*pMatch == *pIn))
823 pIn++;
824 return (unsigned)(pIn - pStart);
825}
826
827#ifndef LZ4_COMMONDEFS_ONLY
828/*-************************************
829 * Local Constants
830 **************************************/
831static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT - 1));
832static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run
833 slower on incompressible data */
834
835/*-************************************
836 * Local Structures and types
837 **************************************/
838typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
839
840/**
841 * This enum distinguishes several different modes of accessing previous
842 * content in the stream.
843 *
844 * - noDict : There is no preceding content.
845 * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
846 * blob being compressed are valid and refer to the preceding
847 * content (of length ctx->dictSize), which is available
848 * contiguously preceding in memory the content currently
849 * being compressed.
850 * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
851 * else in memory, starting at ctx->dictionary with length
852 * ctx->dictSize.
853 * - usingDictCtx : Everything concerning the preceding content is
854 * in a separate context, pointed to by ctx->dictCtx.
855 * ctx->dictionary, ctx->dictSize, and table entries
856 * in the current context that refer to positions
857 * preceding the beginning of the current compression are
858 * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
859 * ->dictSize describe the location and size of the preceding
860 * content, and matches are found by looking in the ctx
861 * ->dictCtx->hashTable.
862 */
863typedef enum {
864 noDict = 0,
869typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
870
871/*-************************************
872 * Local Utils
873 **************************************/
874int LZ4_versionNumber(void)
876 return LZ4_VERSION_NUMBER;
877}
878const char *LZ4_versionString(void)
880 return LZ4_VERSION_STRING;
881}
884 return LZ4_COMPRESSBOUND(isize);
885}
886int LZ4_sizeofState(void)
887{
888 return sizeof(LZ4_stream_t);
889}
890
891/*-****************************************
892 * Internal Definitions, used only in Tests
893 *******************************************/
894#if defined(__cplusplus)
895extern "C" {
896#endif
897
899 char *dest, int srcSize);
900
903 const void *dictStart, size_t dictSize);
905 const char *source, char *dest, int compressedSize, int targetOutputSize,
906 int dstCapacity, const void *dictStart, size_t dictSize);
907#if defined(__cplusplus)
908}
909#endif
910
911/*-******************************
912 * Compression functions
913 ********************************/
914LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
915{
916 if (tableType == byU16)
917 return ((sequence * 2654435761U) >>
918 ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
919 else
920 return ((sequence * 2654435761U) >> ((MINMATCH * 8) - LZ4_HASHLOG));
921}
922
923LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
924{
925 const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG;
926 if (LZ4_isLittleEndian()) {
927 const U64 prime5bytes = 889523592379ULL;
928 return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
929 }
930 else {
931 const U64 prime8bytes = 11400714785074694791ULL;
932 return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
933 }
934}
935
936LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void *const p,
937 tableType_t const tableType)
938{
939 if ((sizeof(reg_t) == 8) && (tableType != byU16))
940 return LZ4_hash5(LZ4_read_ARCH(p), tableType);
941
942#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
943 return LZ4_hash4(LZ4_readLE32(p), tableType);
944#else
945 return LZ4_hash4(LZ4_read32(p), tableType);
946#endif
947}
948
949LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void *tableBase,
950 tableType_t const tableType)
951{
952 switch (tableType) {
953 default: /* fallthrough */
954 case clearedTable: { /* illegal! */
955 assert(0);
956 return;
957 }
958 case byPtr: {
959 const BYTE **hashTable = (const BYTE **)tableBase;
960 hashTable[h] = NULL;
961 return;
962 }
963 case byU32: {
964 U32 *hashTable = (U32 *)tableBase;
965 hashTable[h] = 0;
966 return;
967 }
968 case byU16: {
969 U16 *hashTable = (U16 *)tableBase;
970 hashTable[h] = 0;
971 return;
972 }
973 }
974}
975
976LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void *tableBase,
977 tableType_t const tableType)
978{
979 switch (tableType) {
980 default: /* fallthrough */
981 case clearedTable: /* fallthrough */
982 case byPtr: { /* illegal! */
983 assert(0);
984 return;
985 }
986 case byU32: {
987 U32 *hashTable = (U32 *)tableBase;
988 hashTable[h] = idx;
989 return;
990 }
991 case byU16: {
992 U16 *hashTable = (U16 *)tableBase;
993 assert(idx < 65536);
994 hashTable[h] = (U16)idx;
995 return;
996 }
997 }
998}
999
1000/* LZ4_putPosition*() : only used in byPtr mode */
1001LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE *p, U32 h,
1002 void *tableBase,
1003 tableType_t const tableType)
1004{
1005 const BYTE **const hashTable = (const BYTE **)tableBase;
1006 assert(tableType == byPtr);
1007 (void)tableType;
1008 hashTable[h] = p;
1009}
1010
1011LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase,
1012 tableType_t tableType)
1013{
1014 U32 const h = LZ4_hashPosition(p, tableType);
1015 LZ4_putPositionOnHash(p, h, tableBase, tableType);
1016}
1017
1018/* LZ4_getIndexOnHash() :
1019 * Index of match position registered in hash table.
1020 * hash position must be calculated by using base+index, or dictBase+index.
1021 * Assumption 1 : only valid if tableType == byU32 or byU16.
1022 * Assumption 2 : h is presumed valid (within limits of hash table)
1023 */
1024LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void *tableBase,
1025 tableType_t tableType)
1026{
1028 if (tableType == byU32) {
1029 const U32 *const hashTable = (const U32 *)tableBase;
1030 assert(h < (1U << (LZ4_MEMORY_USAGE - 2)));
1031 return hashTable[h];
1032 }
1033 if (tableType == byU16) {
1034 const U16 *const hashTable = (const U16 *)tableBase;
1035 assert(h < (1U << (LZ4_MEMORY_USAGE - 1)));
1036 return hashTable[h];
1037 }
1038 assert(0);
1039 return 0; /* forbidden case */
1040}
1041
1042static const BYTE *LZ4_getPositionOnHash(U32 h, const void *tableBase,
1043 tableType_t tableType)
1044{
1045 assert(tableType == byPtr);
1046 (void)tableType;
1047 {
1048 const BYTE *const *hashTable = (const BYTE *const *)tableBase;
1049 return hashTable[h];
1050 }
1051}
1052
1053LZ4_FORCE_INLINE const BYTE *
1054LZ4_getPosition(const BYTE *p, const void *tableBase, tableType_t tableType)
1055{
1056 U32 const h = LZ4_hashPosition(p, tableType);
1057 return LZ4_getPositionOnHash(h, tableBase, tableType);
1058}
1059
1060LZ4_FORCE_INLINE void LZ4_prepareTable(LZ4_stream_t_internal *const cctx,
1061 const int inputSize,
1062 const tableType_t tableType)
1063{
1064 /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
1065 * therefore safe to use no matter what mode we're in. Otherwise, we figure
1066 * out if it's safe to leave as is or whether it needs to be reset.
1067 */
1068 if ((tableType_t)cctx->tableType != clearedTable) {
1069 assert(inputSize >= 0);
1070 if ((tableType_t)cctx->tableType != tableType ||
1071 ((tableType == byU16) &&
1072 cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) ||
1073 ((tableType == byU32) && cctx->currentOffset > 1 GB) ||
1074 tableType == byPtr || inputSize >= 4 KB) {
1075 DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
1076 MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
1077 cctx->currentOffset = 0;
1078 cctx->tableType = (U32)clearedTable;
1079 }
1080 else {
1081 DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
1082 }
1083 }
1084
1085 /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
1086 * is faster than compressing without a gap.
1087 * However, compressing with currentOffset == 0 is faster still,
1088 * so we preserve that case.
1089 */
1090 if (cctx->currentOffset != 0 && tableType == byU32) {
1091 DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
1092 cctx->currentOffset += 64 KB;
1093 }
1094
1095 /* Finally, clear history */
1096 cctx->dictCtx = NULL;
1097 cctx->dictionary = NULL;
1098 cctx->dictSize = 0;
1099}
1100
1101/** LZ4_compress_generic_validated() :
1102 * inlined, to ensure branches are decided at compilation time.
1103 * The following conditions are presumed already validated:
1104 * - source != NULL
1105 * - inputSize > 0
1106 */
1107LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
1108 LZ4_stream_t_internal *const cctx, const char *const source,
1109 char *const dest, const int inputSize,
1110 int *inputConsumed, /* only written when outputDirective == fillOutput */
1112 const tableType_t tableType, const dict_directive dictDirective,
1114{
1115 int result;
1116 const BYTE *ip = (const BYTE *)source;
1117
1118 U32 const startIndex = cctx->currentOffset;
1119 const BYTE *base = (const BYTE *)source - startIndex;
1120 const BYTE *lowLimit;
1121
1122 const LZ4_stream_t_internal *dictCtx =
1123 (const LZ4_stream_t_internal *)cctx->dictCtx;
1124 const BYTE *const dictionary =
1125 dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
1126 const U32 dictSize =
1127 dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
1129 ? startIndex - dictCtx->currentOffset
1130 : 0; /* make indexes in dictCtx comparable with
1131 indexes in current context */
1132
1133 int const maybe_extMem =
1135 U32 const prefixIdxLimit =
1136 startIndex - dictSize; /* used when dictDirective == dictSmall */
1137 const BYTE *const dictEnd = dictionary ? dictionary + dictSize : dictionary;
1138 const BYTE *anchor = (const BYTE *)source;
1139 const BYTE *const iend = ip + inputSize;
1140 const BYTE *const mflimitPlusOne = iend - MFLIMIT + 1;
1141 const BYTE *const matchlimit = iend - LASTLITERALS;
1142
1143 /* the dictCtx currentOffset is indexed on the start of the dictionary,
1144 * while a dictionary in the current context precedes the currentOffset */
1145 const BYTE *dictBase = (dictionary == NULL) ? NULL
1147 ? dictionary + dictSize - dictCtx->currentOffset
1148 : dictionary + dictSize - startIndex;
1149
1150 BYTE *op = (BYTE *)dest;
1151 BYTE *const olimit = op + maxOutputSize;
1152
1153 U32 offset = 0;
1154 U32 forwardH;
1155
1156 DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u",
1157 inputSize, tableType);
1158 assert(ip != NULL);
1159 if (tableType == byU16)
1161 LZ4_64Klimit); /* Size too large (not within 64K limit) */
1162 if (tableType == byPtr)
1164 noDict); /* only supported use case with byPtr */
1165 /* If init conditions are not met, we don't have to mark stream
1166 * as having dirty context, since no action was taken yet */
1168 return 0;
1169 } /* Impossible to store anything */
1170 assert(acceleration >= 1);
1171
1172 lowLimit =
1173 (const BYTE *)source - (dictDirective == withPrefix64k ? dictSize : 0);
1174
1175 /* Update context state */
1176 if (dictDirective == usingDictCtx) {
1177 /* Subsequent linked blocks can't use the dictionary. */
1178 /* Instead, they use the block we just compressed. */
1179 cctx->dictCtx = NULL;
1180 cctx->dictSize = (U32)inputSize;
1181 }
1182 else {
1183 cctx->dictSize += (U32)inputSize;
1184 }
1185 cctx->currentOffset += (U32)inputSize;
1186 cctx->tableType = (U32)tableType;
1187
1188 if (inputSize < LZ4_minLength)
1189 goto _last_literals; /* Input too small, no compression (all literals)
1190 */
1191
1192 /* First Byte */
1193 {
1194 U32 const h = LZ4_hashPosition(ip, tableType);
1195 if (tableType == byPtr) {
1196 LZ4_putPositionOnHash(ip, h, cctx->hashTable, byPtr);
1197 }
1198 else {
1199 LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);
1200 }
1201 }
1202 ip++;
1203 forwardH = LZ4_hashPosition(ip, tableType);
1204
1205 /* Main Loop */
1206 for (;;) {
1207 const BYTE *match;
1208 BYTE *token;
1209 const BYTE *filledIp;
1210
1211 /* Find a match */
1212 if (tableType == byPtr) {
1213 const BYTE *forwardIp = ip;
1214 int step = 1;
1215 int searchMatchNb = acceleration << LZ4_skipTrigger;
1216 do {
1217 U32 const h = forwardH;
1218 ip = forwardIp;
1219 forwardIp += step;
1220 step = (searchMatchNb++ >> LZ4_skipTrigger);
1221
1223 goto _last_literals;
1224 assert(ip < mflimitPlusOne);
1225
1226 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);
1227 forwardH = LZ4_hashPosition(forwardIp, tableType);
1228 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);
1229
1230 } while ((match + LZ4_DISTANCE_MAX < ip) ||
1231 (LZ4_read32(match) != LZ4_read32(ip)));
1232 }
1233 else { /* byU32, byU16 */
1234
1235 const BYTE *forwardIp = ip;
1236 int step = 1;
1237 int searchMatchNb = acceleration << LZ4_skipTrigger;
1238 do {
1239 U32 const h = forwardH;
1240 U32 const current = (U32)(forwardIp - base);
1241 U32 matchIndex =
1242 LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1243 assert(matchIndex <= current);
1244 assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
1245 ip = forwardIp;
1246 forwardIp += step;
1247 step = (searchMatchNb++ >> LZ4_skipTrigger);
1248
1250 goto _last_literals;
1251 assert(ip < mflimitPlusOne);
1252
1253 if (dictDirective == usingDictCtx) {
1254 if (matchIndex < startIndex) {
1255 /* there was no match, try the dictionary */
1256 assert(tableType == byU32);
1257 matchIndex =
1258 LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1260 matchIndex +=
1261 dictDelta; /* make dictCtx index comparable with
1262 current context */
1263 lowLimit = dictionary;
1264 }
1265 else {
1266 match = base + matchIndex;
1267 lowLimit = (const BYTE *)source;
1268 }
1269 }
1270 else if (dictDirective == usingExtDict) {
1271 if (matchIndex < startIndex) {
1272 DEBUGLOG(7,
1273 "extDict candidate: matchIndex=%5u < "
1274 "startIndex=%5u",
1279 lowLimit = dictionary;
1280 }
1281 else {
1282 match = base + matchIndex;
1283 lowLimit = (const BYTE *)source;
1284 }
1285 }
1286 else { /* single continuous memory segment */
1287 match = base + matchIndex;
1288 }
1289 forwardH = LZ4_hashPosition(forwardIp, tableType);
1290 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1291
1292 DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex,
1293 current - matchIndex);
1294 if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) {
1295 continue;
1296 } /* match outside of valid area */
1297 assert(matchIndex < current);
1298 if (((tableType != byU16) ||
1300 (matchIndex + LZ4_DISTANCE_MAX < current)) {
1301 continue;
1302 } /* too far */
1303 assert(
1304 (current - matchIndex) <=
1305 LZ4_DISTANCE_MAX); /* match now expected within distance */
1306
1307 if (LZ4_read32(match) == LZ4_read32(ip)) {
1308 if (maybe_extMem)
1309 offset = current - matchIndex;
1310 break; /* match found */
1311 }
1312
1313 } while (1);
1314 }
1315
1316 /* Catch up */
1317 filledIp = ip;
1318 assert(ip > anchor); /* this is always true as ip has been advanced
1319 before entering the main loop */
1320 if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {
1321 do {
1322 ip--;
1323 match--;
1324 } while (((ip > anchor) & (match > lowLimit)) &&
1325 (unlikely(ip[-1] == match[-1])));
1326 }
1327
1328 /* Encode Literals */
1329 {
1330 unsigned const litLength = (unsigned)(ip - anchor);
1331 token = op++;
1332 if ((outputDirective ==
1333 limitedOutput) && /* Check output buffer overflow */
1334 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) +
1335 (litLength / 255) >
1336 olimit))) {
1337 return 0; /* cannot compress within `dst` budget. Stored indexes
1338 in hash table are nonetheless fine */
1339 }
1340 if ((outputDirective == fillOutput) &&
1341 (unlikely(op + (litLength + 240) / 255 /* litlen */ +
1342 litLength /* literals */ + 2 /* offset */ +
1343 1 /* token */ + MFLIMIT -
1344 MINMATCH /* min last literals so last match is <=
1345 end - MFLIMIT */
1346 > olimit))) {
1347 op--;
1348 goto _last_literals;
1349 }
1350 if (litLength >= RUN_MASK) {
1351 unsigned len = litLength - RUN_MASK;
1352 *token = (RUN_MASK << ML_BITS);
1353 for (; len >= 255; len -= 255)
1354 *op++ = 255;
1355 *op++ = (BYTE)len;
1356 }
1357 else
1358 *token = (BYTE)(litLength << ML_BITS);
1359
1360 /* Copy Literals */
1361 LZ4_wildCopy8(op, anchor, op + litLength);
1362 op += litLength;
1363 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1364 (int)(anchor - (const BYTE *)source), litLength,
1365 (int)(ip - (const BYTE *)source));
1366 }
1367
1369 /* at this stage, the following variables must be correctly set :
1370 * - ip : at start of LZ operation
1371 * - match : at start of previous pattern occurrence; can be within
1372 * current prefix, or within extDict
1373 * - offset : if maybe_ext_memSegment==1 (constant)
1374 * - lowLimit : must be == dictionary to mean "match is within extDict";
1375 * must be == source otherwise
1376 * - token and *token : position to write 4-bits for match length;
1377 * higher 4-bits for literal length supposed already written
1378 */
1379
1380 if ((outputDirective == fillOutput) &&
1381 (op + 2 /* offset */ + 1 /* token */ + MFLIMIT -
1382 MINMATCH /* min last literals so last match is <= end - MFLIMIT
1383 */
1384 > olimit)) {
1385 /* the match was too close to the end, rewind and go to last
1386 * literals */
1387 op = token;
1388 goto _last_literals;
1389 }
1390
1391 /* Encode Offset */
1392 if (maybe_extMem) { /* static test */
1393 DEBUGLOG(6, " with offset=%u (ext if > %i)", offset,
1394 (int)(ip - (const BYTE *)source));
1396 LZ4_writeLE16(op, (U16)offset);
1397 op += 2;
1398 }
1399 else {
1400 DEBUGLOG(6, " with offset=%u (same segment)",
1401 (U32)(ip - match));
1402 assert(ip - match <= LZ4_DISTANCE_MAX);
1403 LZ4_writeLE16(op, (U16)(ip - match));
1404 op += 2;
1405 }
1406
1407 /* Encode MatchLength */
1408 {
1409 unsigned matchCode;
1410
1411 if ((dictDirective == usingExtDict ||
1413 (lowLimit == dictionary) /* match within extDict */) {
1414 const BYTE *limit = ip + (dictEnd - match);
1415 assert(dictEnd > match);
1416 if (limit > matchlimit)
1417 limit = matchlimit;
1418 matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, limit);
1419 ip += (size_t)matchCode + MINMATCH;
1420 if (ip == limit) {
1421 unsigned const more =
1422 LZ4_count(limit, (const BYTE *)source, matchlimit);
1423 matchCode += more;
1424 ip += more;
1425 }
1426 DEBUGLOG(6,
1427 " with matchLength=%u starting in extDict",
1429 }
1430 else {
1431 matchCode =
1432 LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit);
1433 ip += (size_t)matchCode + MINMATCH;
1434 DEBUGLOG(6, " with matchLength=%u",
1436 }
1437
1438 if ((outputDirective) && /* Check output buffer overflow */
1439 (unlikely(op + (1 + LASTLITERALS) + (matchCode + 240) / 255 >
1440 olimit))) {
1441 if (outputDirective == fillOutput) {
1442 /* Match description too long : reduce it */
1444 15 /* in token */ -
1445 1 /* to avoid needing a zero byte */ +
1446 ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
1447 ip -= matchCode - newMatchCode;
1450 if (unlikely(ip <= filledIp)) {
1451 /* We have already filled up to filledIp so if ip ends
1452 * up less than filledIp we have positions in the hash
1453 * table beyond the current position. This is a problem
1454 * if we reuse the hash table. So we have to remove
1455 * these positions from the hash table.
1456 */
1457 const BYTE *ptr;
1458 DEBUGLOG(5, "Clearing %u positions",
1459 (U32)(filledIp - ip));
1460 for (ptr = ip; ptr <= filledIp; ++ptr) {
1461 U32 const h = LZ4_hashPosition(ptr, tableType);
1462 LZ4_clearHash(h, cctx->hashTable, tableType);
1463 }
1464 }
1465 }
1466 else {
1468 return 0; /* cannot compress within `dst` budget. Stored
1469 indexes in hash table are nonetheless fine */
1470 }
1471 }
1472 if (matchCode >= ML_MASK) {
1473 *token += ML_MASK;
1474 matchCode -= ML_MASK;
1475 LZ4_write32(op, 0xFFFFFFFF);
1476 while (matchCode >= 4 * 255) {
1477 op += 4;
1478 LZ4_write32(op, 0xFFFFFFFF);
1479 matchCode -= 4 * 255;
1480 }
1481 op += matchCode / 255;
1482 *op++ = (BYTE)(matchCode % 255);
1483 }
1484 else
1485 *token += (BYTE)(matchCode);
1486 }
1487 /* Ensure we have enough space for the last literals. */
1488 assert(
1490
1491 anchor = ip;
1492
1493 /* Test end of chunk */
1494 if (ip >= mflimitPlusOne)
1495 break;
1496
1497 /* Fill table */
1498 {
1499 U32 const h = LZ4_hashPosition(ip - 2, tableType);
1500 if (tableType == byPtr) {
1501 LZ4_putPositionOnHash(ip - 2, h, cctx->hashTable, byPtr);
1502 }
1503 else {
1504 U32 const idx = (U32)((ip - 2) - base);
1505 LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);
1506 }
1507 }
1508
1509 /* Test next position */
1510 if (tableType == byPtr) {
1511
1512 match = LZ4_getPosition(ip, cctx->hashTable, tableType);
1513 LZ4_putPosition(ip, cctx->hashTable, tableType);
1514 if ((match + LZ4_DISTANCE_MAX >= ip) &&
1515 (LZ4_read32(match) == LZ4_read32(ip))) {
1516 token = op++;
1517 *token = 0;
1518 goto _next_match;
1519 }
1520 }
1521 else { /* byU32, byU16 */
1522
1523 U32 const h = LZ4_hashPosition(ip, tableType);
1524 U32 const current = (U32)(ip - base);
1525 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1526 assert(matchIndex < current);
1527 if (dictDirective == usingDictCtx) {
1528 if (matchIndex < startIndex) {
1529 /* there was no match, try the dictionary */
1530 assert(tableType == byU32);
1531 matchIndex =
1532 LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1534 lowLimit =
1535 dictionary; /* required for match length counter */
1537 }
1538 else {
1539 match = base + matchIndex;
1540 lowLimit = (const BYTE *)
1541 source; /* required for match length counter */
1542 }
1543 }
1544 else if (dictDirective == usingExtDict) {
1545 if (matchIndex < startIndex) {
1548 lowLimit =
1549 dictionary; /* required for match length counter */
1550 }
1551 else {
1552 match = base + matchIndex;
1553 lowLimit = (const BYTE *)
1554 source; /* required for match length counter */
1555 }
1556 }
1557 else { /* single memory segment */
1558 match = base + matchIndex;
1559 }
1560 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1561 assert(matchIndex < current);
1563 : 1) &&
1564 (((tableType == byU16) &&
1566 ? 1
1567 : (matchIndex + LZ4_DISTANCE_MAX >= current)) &&
1568 (LZ4_read32(match) == LZ4_read32(ip))) {
1569 token = op++;
1570 *token = 0;
1571 if (maybe_extMem)
1572 offset = current - matchIndex;
1573 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1574 (int)(anchor - (const BYTE *)source), 0,
1575 (int)(ip - (const BYTE *)source));
1576 goto _next_match;
1577 }
1578 }
1579
1580 /* Prepare next loop */
1581 forwardH = LZ4_hashPosition(++ip, tableType);
1582 }
1583
1585 /* Encode Last Literals */
1586 {
1587 size_t lastRun = (size_t)(iend - anchor);
1588 if ((outputDirective) && /* Check output buffer overflow */
1589 (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) > olimit)) {
1590 if (outputDirective == fillOutput) {
1591 /* adapt lastRun to fill 'dst' */
1592 assert(olimit >= op);
1593 lastRun = (size_t)(olimit - op) - 1 /*token*/;
1594 lastRun -= (lastRun + 256 - RUN_MASK) /
1595 256; /*additional length tokens*/
1596 }
1597 else {
1599 return 0; /* cannot compress within `dst` budget. Stored indexes
1600 in hash table are nonetheless fine */
1601 }
1602 }
1603 DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
1604 if (lastRun >= RUN_MASK) {
1605 size_t accumulator = lastRun - RUN_MASK;
1606 *op++ = RUN_MASK << ML_BITS;
1607 for (; accumulator >= 255; accumulator -= 255)
1608 *op++ = 255;
1609 *op++ = (BYTE)accumulator;
1610 }
1611 else {
1612 *op++ = (BYTE)(lastRun << ML_BITS);
1613 }
1614 LZ4_memcpy(op, anchor, lastRun);
1615 ip = anchor + lastRun;
1616 op += lastRun;
1617 }
1618
1619 if (outputDirective == fillOutput) {
1620 *inputConsumed = (int)(((const char *)ip) - source);
1621 }
1622 result = (int)(((char *)op) - dest);
1623 assert(result > 0);
1624 DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes",
1625 inputSize, result);
1626 return result;
1627}
1628
1629/** LZ4_compress_generic() :
1630 * inlined, to ensure branches are decided at compilation time;
1631 * takes care of src == (NULL, 0)
1632 * and forward the rest to LZ4_compress_generic_validated */
1633LZ4_FORCE_INLINE int LZ4_compress_generic(
1634 LZ4_stream_t_internal *const cctx, const char *const src, char *const dst,
1635 const int srcSize,
1636 int *inputConsumed, /* only written when outputDirective == fillOutput */
1638 const tableType_t tableType, const dict_directive dictDirective,
1640{
1641 DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i", srcSize,
1642 dstCapacity);
1643
1645 return 0;
1646 } /* Unsupported srcSize, too large (or negative) */
1647 if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
1648 if (outputDirective != notLimited && dstCapacity <= 0)
1649 return 0; /* no output, can't write anything */
1650 DEBUGLOG(5, "Generating an empty block");
1652 assert(dst != NULL);
1653 dst[0] = 0;
1654 if (outputDirective == fillOutput) {
1656 *inputConsumed = 0;
1657 }
1658 return 1;
1659 }
1660 assert(src != NULL);
1661
1662 return LZ4_compress_generic_validated(
1663 cctx, src, dst, srcSize,
1664 inputConsumed, /* only written into if outputDirective == fillOutput */
1667}
1668
1669int LZ4_compress_fast_extState(void *state, const char *source, char *dest,
1670 int inputSize, int maxOutputSize,
1671 int acceleration)
1672{
1673 LZ4_stream_t_internal *const ctx =
1675 assert(ctx != NULL);
1676 if (acceleration < 1)
1681 if (inputSize < LZ4_64Klimit) {
1682 return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0,
1684 acceleration);
1685 }
1686 else {
1687 const tableType_t tableType =
1688 ((sizeof(void *) == 4) && ((uptrval)source > LZ4_DISTANCE_MAX))
1689 ? byPtr
1690 : byU32;
1691 return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0,
1692 notLimited, tableType, noDict,
1694 }
1695 }
1696 else {
1697 if (inputSize < LZ4_64Klimit) {
1698 return LZ4_compress_generic(ctx, source, dest, inputSize, NULL,
1701 }
1702 else {
1703 const tableType_t tableType =
1704 ((sizeof(void *) == 4) && ((uptrval)source > LZ4_DISTANCE_MAX))
1705 ? byPtr
1706 : byU32;
1707 return LZ4_compress_generic(ctx, source, dest, inputSize, NULL,
1708 maxOutputSize, limitedOutput, tableType,
1710 }
1711 }
1712}
1713
1714/**
1715 * LZ4_compress_fast_extState_fastReset() :
1716 * A variant of LZ4_compress_fast_extState().
1717 *
1718 * Using this variant avoids an expensive initialization step. It is only safe
1719 * to call if the state buffer is known to be correctly initialized already
1720 * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
1721 * "correctly initialized").
1722 */
1723int LZ4_compress_fast_extState_fastReset(void *state, const char *src,
1724 char *dst, int srcSize,
1725 int dstCapacity, int acceleration)
1726{
1727 LZ4_stream_t_internal *const ctx =
1728 &((LZ4_stream_t *)state)->internal_donotuse;
1729 if (acceleration < 1)
1733 assert(ctx != NULL);
1734
1736 if (srcSize < LZ4_64Klimit) {
1737 const tableType_t tableType = byU16;
1738 LZ4_prepareTable(ctx, srcSize, tableType);
1739 if (ctx->currentOffset) {
1740 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
1741 notLimited, tableType, noDict,
1743 }
1744 else {
1745 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
1746 notLimited, tableType, noDict,
1748 }
1749 }
1750 else {
1751 const tableType_t tableType =
1752 ((sizeof(void *) == 4) && ((uptrval)src > LZ4_DISTANCE_MAX))
1753 ? byPtr
1754 : byU32;
1755 LZ4_prepareTable(ctx, srcSize, tableType);
1756 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
1757 notLimited, tableType, noDict,
1759 }
1760 }
1761 else {
1762 if (srcSize < LZ4_64Klimit) {
1763 const tableType_t tableType = byU16;
1764 LZ4_prepareTable(ctx, srcSize, tableType);
1765 if (ctx->currentOffset) {
1766 return LZ4_compress_generic(
1768 tableType, noDict, dictSmall, acceleration);
1769 }
1770 else {
1771 return LZ4_compress_generic(
1773 tableType, noDict, noDictIssue, acceleration);
1774 }
1775 }
1776 else {
1777 const tableType_t tableType =
1778 ((sizeof(void *) == 4) && ((uptrval)src > LZ4_DISTANCE_MAX))
1779 ? byPtr
1780 : byU32;
1781 LZ4_prepareTable(ctx, srcSize, tableType);
1782 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL,
1783 dstCapacity, limitedOutput, tableType,
1785 }
1787}
1788
1789int LZ4_compress_fast(const char *src, char *dest, int srcSize, int dstCapacity,
1790 int acceleration)
1791{
1792 int result;
1793#if (LZ4_HEAPMODE)
1795 sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1796 if (ctxPtr == NULL)
1797 return 0;
1798#else
1800 LZ4_stream_t *const ctxPtr = &ctx;
1801#endif
1803 acceleration);
1804
1805#if (LZ4_HEAPMODE)
1806 FREEMEM(ctxPtr);
1807#endif
1808 return result;
1809}
1810
1811int LZ4_compress_default(const char *src, char *dst, int srcSize,
1812 int dstCapacity)
1813{
1814 return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);
1815}
1816
1817/* Note!: This function leaves the stream in an unclean/broken state!
1818 * It is not safe to subsequently use the same state with a _fastReset() or
1819 * _continue() call without resetting it. */
1820static int LZ4_compress_destSize_extState_internal(LZ4_stream_t *state,
1821 const char *src, char *dst,
1822 int *srcSizePtr,
1823 int targetDstSize,
1824 int acceleration)
1825{
1826 void *const s = LZ4_initStream(state, sizeof(*state));
1827 assert(s != NULL);
1828 (void)s;
1829
1830 if (targetDstSize >=
1832 *srcSizePtr)) { /* compression success is guaranteed */
1833 return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr,
1835 }
1836 else {
1837 if (*srcSizePtr < LZ4_64Klimit) {
1838 return LZ4_compress_generic(&state->internal_donotuse, src, dst,
1841 acceleration);
1842 }
1843 else {
1844 tableType_t const addrMode =
1845 ((sizeof(void *) == 4) && ((uptrval)src > LZ4_DISTANCE_MAX))
1846 ? byPtr
1847 : byU32;
1848 return LZ4_compress_generic(&state->internal_donotuse, src, dst,
1852 }
1854}
1855
1856int LZ4_compress_destSize_extState(void *state, const char *src, char *dst,
1857 int *srcSizePtr, int targetDstSize,
1858 int acceleration)
1859{
1860 int const r = LZ4_compress_destSize_extState_internal(
1862 acceleration);
1863 /* clean the state on exit */
1865 return r;
1866}
1867
1868int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr,
1869 int targetDstSize)
1870{
1871#if (LZ4_HEAPMODE)
1872 LZ4_stream_t *const ctx = (LZ4_stream_t *)ALLOC(
1873 sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1874 if (ctx == NULL)
1875 return 0;
1876#else
1878 LZ4_stream_t *const ctx = &ctxBody;
1879#endif
1880
1881 int result = LZ4_compress_destSize_extState_internal(
1882 ctx, src, dst, srcSizePtr, targetDstSize, 1);
1883
1884#if (LZ4_HEAPMODE)
1885 FREEMEM(ctx);
1886#endif
1887 return result;
1888}
1889
1890/*-******************************
1891 * Streaming functions
1892 ********************************/
1893
1894#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1896{
1897 LZ4_stream_t *const lz4s = (LZ4_stream_t *)ALLOC(sizeof(LZ4_stream_t));
1899 DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1900 if (lz4s == NULL)
1901 return NULL;
1902 LZ4_initStream(lz4s, sizeof(*lz4s));
1903 return lz4s;
1904}
1905#endif
1906
1907static size_t LZ4_stream_t_alignment(void)
1908{
1909#if LZ4_ALIGN_TEST
1910 typedef struct {
1911 char c;
1913 } t_a;
1914 return sizeof(t_a) - sizeof(LZ4_stream_t);
1915#else
1916 return 1; /* effectively disabled */
1917#endif
1918}
1919
1920LZ4_stream_t *LZ4_initStream(void *buffer, size_t size)
1921{
1922 DEBUGLOG(5, "LZ4_initStream");
1923 if (buffer == NULL) {
1924 return NULL;
1925 }
1926 if (size < sizeof(LZ4_stream_t)) {
1927 return NULL;
1928 }
1929 if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment()))
1930 return NULL;
1931 MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
1932 return (LZ4_stream_t *)buffer;
1933}
1935/* resetStream is now deprecated,
1936 * prefer initStream() which is more general */
1938{
1939 DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1944{
1945 LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
1947
1948#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1950{
1951 if (!LZ4_stream)
1952 return 0; /* support free on NULL */
1953 DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
1955 return (0);
1957#endif
1959typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;
1960#define HASH_UNIT sizeof(reg_t)
1961int LZ4_loadDict_internal(LZ4_stream_t *LZ4_dict, const char *dictionary,
1962 int dictSize, LoadDict_mode_e _ld)
1963{
1964 LZ4_stream_t_internal *const dict = &LZ4_dict->internal_donotuse;
1965 const tableType_t tableType = byU32;
1966 const BYTE *p = (const BYTE *)dictionary;
1967 const BYTE *const dictEnd = p + dictSize;
1968 U32 idx32;
1969
1970 DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary,
1971 LZ4_dict);
1972
1973 /* It's necessary to reset the context,
1974 * and not just continue it with prepareTable()
1975 * to avoid any risk of generating overflowing matchIndex
1976 * when compressing using this dictionary */
1978
1979 /* We always increment the offset by 64 KB, since, if the dict is longer,
1980 * we truncate it to the last 64k, and if it's shorter, we still want to
1981 * advance by a whole window length so we can provide the guarantee that
1982 * there are only valid offsets in the window, which allows an optimization
1983 * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1984 * dictionary isn't a full 64k. */
1985 dict->currentOffset += 64 KB;
1986
1987 if (dictSize < (int)HASH_UNIT) {
1988 return 0;
1989 }
1990
1991 if ((dictEnd - p) > 64 KB)
1992 p = dictEnd - 64 KB;
1993 dict->dictionary = p;
1994 dict->dictSize = (U32)(dictEnd - p);
1995 dict->tableType = (U32)tableType;
1996 idx32 = dict->currentOffset - dict->dictSize;
1997
1998 while (p <= dictEnd - HASH_UNIT) {
1999 U32 const h = LZ4_hashPosition(p, tableType);
2000 /* Note: overwriting => favors positions end of dictionary */
2001 LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
2002 p += 3;
2003 idx32 += 3;
2004 }
2005
2006 if (_ld == _ld_slow) {
2007 /* Fill hash table with additional references, to improve compression
2008 * capability */
2009 p = dict->dictionary;
2010 idx32 = dict->currentOffset - dict->dictSize;
2011 while (p <= dictEnd - HASH_UNIT) {
2012 U32 const h = LZ4_hashPosition(p, tableType);
2013 U32 const limit = dict->currentOffset - 64 KB;
2014 if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {
2015 /* Note: not overwriting => favors positions beginning of
2016 * dictionary */
2017 LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
2018 }
2019 p++;
2020 idx32++;
2021 }
2022 }
2023
2024 return (int)dict->dictSize;
2025}
2026
2027int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
2028{
2029 return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);
2030}
2031
2032int LZ4_loadDictSlow(LZ4_stream_t *LZ4_dict, const char *dictionary,
2033 int dictSize)
2034{
2035 return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);
2036}
2037
2040{
2041 const LZ4_stream_t_internal *dictCtx =
2043 : &(dictionaryStream->internal_donotuse);
2044
2045 DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", workingStream,
2046 dictionaryStream, dictCtx != NULL ? dictCtx->dictSize : 0);
2047
2048 if (dictCtx != NULL) {
2049 /* If the current offset is zero, we will never look in the
2050 * external dictionary context, since there is no value a table
2051 * entry can take that indicate a miss. In that case, we need
2052 * to bump the offset to something non-zero.
2053 */
2054 if (workingStream->internal_donotuse.currentOffset == 0) {
2055 workingStream->internal_donotuse.currentOffset = 64 KB;
2056 }
2057
2058 /* Don't actually attach an empty dictionary.
2059 */
2060 if (dictCtx->dictSize == 0) {
2061 dictCtx = NULL;
2062 }
2063 }
2064 workingStream->internal_donotuse.dictCtx = dictCtx;
2065}
2066
2067static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, int nextSize)
2068{
2069 assert(nextSize >= 0);
2070 if (LZ4_dict->currentOffset + (unsigned)nextSize >
2071 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
2072 /* rescale hash table */
2073 U32 const delta = LZ4_dict->currentOffset - 64 KB;
2074 const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
2075 int i;
2076 DEBUGLOG(4, "LZ4_renormDictT");
2077 for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
2078 if (LZ4_dict->hashTable[i] < delta)
2079 LZ4_dict->hashTable[i] = 0;
2080 else
2081 LZ4_dict->hashTable[i] -= delta;
2082 }
2083 LZ4_dict->currentOffset = 64 KB;
2084 if (LZ4_dict->dictSize > 64 KB)
2085 LZ4_dict->dictSize = 64 KB;
2086 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
2088}
2089
2091 char *dest, int inputSize, int maxOutputSize,
2092 int acceleration)
2093{
2094 const tableType_t tableType = byU32;
2095 LZ4_stream_t_internal *const streamPtr = &LZ4_stream->internal_donotuse;
2096 const char *dictEnd =
2097 streamPtr->dictSize
2098 ? (const char *)streamPtr->dictionary + streamPtr->dictSize
2099 : NULL;
2100
2101 DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)",
2102 inputSize, streamPtr->dictSize);
2103
2104 LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */
2105 if (acceleration < 1)
2109
2110 /* invalidate tiny dictionaries */
2111 if ((streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */
2112 && (dictEnd != source) /* prefix mode */
2113 && (inputSize > 0) /* tolerance : don't lose history, in case next
2114 invocation would use prefix mode */
2115 && (streamPtr->dictCtx == NULL) /* usingDictCtx */
2116 ) {
2117 DEBUGLOG(
2118 5,
2119 "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small",
2120 streamPtr->dictSize, streamPtr->dictionary);
2121 /* remove dictionary existence from history, to employ faster prefix
2122 * mode */
2123 streamPtr->dictSize = 0;
2124 streamPtr->dictionary = (const BYTE *)source;
2125 dictEnd = source;
2126 }
2127
2128 /* Check overlapping input/dictionary space */
2129 {
2130 const char *const sourceEnd = source + inputSize;
2131 if ((sourceEnd > (const char *)streamPtr->dictionary) &&
2132 (sourceEnd < dictEnd)) {
2133 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
2134 if (streamPtr->dictSize > 64 KB)
2135 streamPtr->dictSize = 64 KB;
2136 if (streamPtr->dictSize < 4)
2137 streamPtr->dictSize = 0;
2138 streamPtr->dictionary = (const BYTE *)dictEnd - streamPtr->dictSize;
2139 }
2140 }
2141
2142 /* prefix mode : source data follows dictionary */
2143 if (dictEnd == source) {
2144 if ((streamPtr->dictSize < 64 KB) &&
2145 (streamPtr->dictSize < streamPtr->currentOffset))
2146 return LZ4_compress_generic(streamPtr, source, dest, inputSize,
2148 tableType, withPrefix64k, dictSmall,
2149 acceleration);
2150 else
2151 return LZ4_compress_generic(streamPtr, source, dest, inputSize,
2153 tableType, withPrefix64k, noDictIssue,
2154 acceleration);
2155 }
2156
2157 /* external dictionary mode */
2158 {
2159 int result;
2160 if (streamPtr->dictCtx) {
2161 /* We depend here on the fact that dictCtx'es (produced by
2162 * LZ4_loadDict) guarantee that their tables contain no references
2163 * to offsets between dictCtx->currentOffset - 64 KB and
2164 * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
2165 * to use noDictIssue even when the dict isn't a full 64 KB.
2166 */
2167 if (inputSize > 4 KB) {
2168 /* For compressing large blobs, it is faster to pay the setup
2169 * cost to copy the dictionary's tables into the active context,
2170 * so that the compression loop is only looking into one table.
2171 */
2172 LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
2173 result = LZ4_compress_generic(
2176 acceleration);
2177 }
2178 else {
2179 result = LZ4_compress_generic(
2182 acceleration);
2183 }
2184 }
2185 else { /* small data <= 4 KB */
2186 if ((streamPtr->dictSize < 64 KB) &&
2187 (streamPtr->dictSize < streamPtr->currentOffset)) {
2188 result = LZ4_compress_generic(
2191 acceleration);
2192 }
2193 else {
2194 result = LZ4_compress_generic(
2197 acceleration);
2198 }
2199 }
2200 streamPtr->dictionary = (const BYTE *)source;
2201 streamPtr->dictSize = (U32)inputSize;
2202 return result;
2203 }
2205
2206/* Hidden debug function, to force-test external dictionary mode */
2208 char *dest, int srcSize)
2209{
2210 LZ4_stream_t_internal *const streamPtr = &LZ4_dict->internal_donotuse;
2211 int result;
2212
2213 LZ4_renormDictT(streamPtr, srcSize);
2214
2215 if ((streamPtr->dictSize < 64 KB) &&
2216 (streamPtr->dictSize < streamPtr->currentOffset)) {
2217 result =
2218 LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0,
2220 }
2221 else {
2222 result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0,
2224 noDictIssue, 1);
2225 }
2226
2227 streamPtr->dictionary = (const BYTE *)source;
2228 streamPtr->dictSize = (U32)srcSize;
2229
2230 return result;
2231}
2232
2233/*! LZ4_saveDict() :
2234 * If previously compressed data block is not guaranteed to remain available at
2235 * its memory location, save it into a safer place (char* safeBuffer). Note : no
2236 * need to call LZ4_loadDict() afterwards, dictionary is immediately usable, one
2237 * can therefore call LZ4_compress_fast_continue() right after.
2238 * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if
2239 * error.
2240 */
2241int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
2242{
2243 LZ4_stream_t_internal *const dict = &LZ4_dict->internal_donotuse;
2244
2245 DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize,
2246 safeBuffer);
2247
2248 if ((U32)dictSize > 64 KB) {
2249 dictSize = 64 KB;
2250 } /* useless to define a dictionary > 64 KB */
2251 if ((U32)dictSize > dict->dictSize) {
2252 dictSize = (int)dict->dictSize;
2253 }
2254
2255 if (safeBuffer == NULL)
2256 assert(dictSize == 0);
2257 if (dictSize > 0) {
2258 const BYTE *const previousDictEnd = dict->dictionary + dict->dictSize;
2259 assert(dict->dictionary);
2260 LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);
2261 }
2262
2263 dict->dictionary = (const BYTE *)safeBuffer;
2264 dict->dictSize = (U32)dictSize;
2265
2266 return dictSize;
2267}
2268
2269/*-*******************************
2270 * Decompression functions
2271 ********************************/
2272
2274
2275#undef MIN
2276#define MIN(a, b) ((a) < (b) ? (a) : (b))
2277
2278/* variant for decompress_unsafe()
2279 * does not know end of input
2280 * presumes input is well formed
2281 * note : will consume at least one byte */
2282static size_t read_long_length_no_check(const BYTE **pp)
2283{
2284 size_t b, l = 0;
2285 do {
2286 b = **pp;
2287 (*pp)++;
2288 l += b;
2289 } while (b == 255);
2290 DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes",
2291 l, l / 255 + 1)
2292 return l;
2293}
2294
2295/* core decoder variant for LZ4_decompress_fast*()
2296 * for legacy support only : these entry points are deprecated.
2297 * - Presumes input is correctly formed (no defense vs malformed inputs)
2298 * - Does not know input size (presume input buffer is "large enough")
2299 * - Decompress a full block (only)
2300 * @return : nb of bytes read from input.
2301 * Note : this variant is not optimized for speed, just for maintenance.
2302 * the goal is to remove support of decompress_fast*() variants by v2.0
2303 **/
2304LZ4_FORCE_INLINE int LZ4_decompress_unsafe_generic(
2306
2307 size_t prefixSize,
2308 const BYTE *const dictStart, /* only if dict==usingExtDict */
2309 const size_t dictSize /* note: =0 if dictStart==NULL */
2310)
2311{
2312 const BYTE *ip = istart;
2313 BYTE *op = (BYTE *)ostart;
2314 BYTE *const oend = ostart + decompressedSize;
2315 const BYTE *const prefixStart = ostart - prefixSize;
2316
2317 DEBUGLOG(5, "LZ4_decompress_unsafe_generic");
2318 if (dictStart == NULL)
2319 assert(dictSize == 0);
2320
2321 while (1) {
2322 /* start new sequence */
2323 unsigned token = *ip++;
2324
2325 /* literals */
2326 {
2327 size_t ll = token >> ML_BITS;
2328 if (ll == 15) {
2329 /* long literal length */
2330 ll += read_long_length_no_check(&ip);
2331 }
2332 if ((size_t)(oend - op) < ll)
2333 return -1; /* output buffer overflow */
2334 LZ4_memmove(op, ip, ll); /* support in-place decompression */
2335 op += ll;
2336 ip += ll;
2337 if ((size_t)(oend - op) < MFLIMIT) {
2338 if (op == oend)
2339 break; /* end of block */
2340 DEBUGLOG(
2341 5,
2342 "invalid: literals end at distance %zi from end of block",
2343 oend - op);
2344 /* incorrect end of block :
2345 * last match must start at least MFLIMIT==12 bytes before end
2346 * of output block */
2347 return -1;
2348 }
2349 }
2350
2351 /* match */
2352 {
2353 size_t ml = token & 15;
2354 size_t const offset = LZ4_readLE16(ip);
2355 ip += 2;
2356
2357 if (ml == 15) {
2358 /* long literal length */
2359 ml += read_long_length_no_check(&ip);
2360 }
2361 ml += MINMATCH;
2362
2363 if ((size_t)(oend - op) < ml)
2364 return -1; /* output buffer overflow */
2365
2366 {
2367 const BYTE *match = op - offset;
2368
2369 /* out of range */
2370 if (offset > (size_t)(op - prefixStart) + dictSize) {
2371 DEBUGLOG(6, "offset out of range");
2372 return -1;
2373 }
2374
2375 /* check special case : extDict */
2376 if (offset > (size_t)(op - prefixStart)) {
2377 /* extDict scenario */
2378 const BYTE *const dictEnd = dictStart + dictSize;
2379 const BYTE *extMatch =
2380 dictEnd - (offset - (size_t)(op - prefixStart));
2381 size_t const extml = (size_t)(dictEnd - extMatch);
2382 if (extml > ml) {
2383 /* match entirely within extDict */
2385 op += ml;
2386 ml = 0;
2387 }
2388 else {
2389 /* match split between extDict & prefix */
2391 op += extml;
2392 ml -= extml;
2393 }
2395 }
2396
2397 /* match copy - slow variant, supporting overlap copy */
2398 {
2399 size_t u;
2400 for (u = 0; u < ml; u++) {
2401 op[u] = match[u];
2402 }
2403 }
2404 }
2405 op += ml;
2406 if ((size_t)(oend - op) < LASTLITERALS) {
2407 DEBUGLOG(
2408 5, "invalid: match ends at distance %zi from end of block",
2409 oend - op);
2410 /* incorrect end of block :
2411 * last match must stop at least LASTLITERALS==5 bytes before
2412 * end of output block */
2413 return -1;
2414 }
2415 } /* match */
2416 } /* main loop */
2417 return (int)(ip - istart);
2418}
2419
2420/* Read the variable-length literal or match length.
2421 *
2422 * @ip : input pointer
2423 * @ilimit : position after which if length is not decoded, the input is
2424 *necessarily corrupted.
2425 * @initial_check - check ip >= ipmax before start of loop. Returns
2426 *initial_error if so.
2427 * @error (output) - error code. Must be set to 0 before call.
2428 **/
2429typedef size_t Rvl_t;
2430static const Rvl_t rvl_error = (Rvl_t)(-1);
2431LZ4_FORCE_INLINE Rvl_t read_variable_length(const BYTE **ip, const BYTE *ilimit,
2432 int initial_check)
2433{
2434 Rvl_t s, length = 0;
2435 assert(ip != NULL);
2436 assert(*ip != NULL);
2437 assert(ilimit != NULL);
2438 if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */
2439 return rvl_error;
2440 }
2441 s = **ip;
2442 (*ip)++;
2443 length += s;
2444 if (unlikely((*ip) > ilimit)) { /* read limit reached */
2445 return rvl_error;
2446 }
2447 /* accumulator overflow detection (32-bit mode only) */
2448 if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1) / 2))) {
2449 return rvl_error;
2450 }
2451 if (likely(s != 255))
2452 return length;
2453 do {
2454 s = **ip;
2455 (*ip)++;
2456 length += s;
2457 if (unlikely((*ip) > ilimit)) { /* read limit reached */
2458 return rvl_error;
2459 }
2460 /* accumulator overflow detection (32-bit mode only) */
2461 if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1) / 2))) {
2462 return rvl_error;
2463 }
2464 } while (s == 255);
2465
2466 return length;
2467}
2468
2469/*! LZ4_decompress_generic() :
2470 * This generic decompression function covers all use cases.
2471 * It shall be instantiated several times, using different sets of directives.
2472 * Note that it is important for performance that this function really get
2473 * inlined, in order to remove useless branches during compilation optimization.
2474 */
2475LZ4_FORCE_INLINE int LZ4_decompress_generic(
2476 const char *const src, char *const dst, int srcSize,
2477 int outputSize, /* If endOnInput==endOnInputSize, this value is
2478 `dstCapacity` */
2479
2480 earlyEnd_directive partialDecoding, /* full, partial */
2481 dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
2482 const BYTE *const lowPrefix, /* always <= dst, == dst when no prefix */
2483 const BYTE *const dictStart, /* only if dict==usingExtDict */
2484 const size_t dictSize /* note : = 0 if noDict */
2485)
2486{
2487 if ((src == NULL) || (outputSize < 0)) {
2488 return -1;
2489 }
2490
2491 {
2492 const BYTE *ip = (const BYTE *)src;
2493 const BYTE *const iend = ip + srcSize;
2494
2495 BYTE *op = (BYTE *)dst;
2496 BYTE *const oend = op + outputSize;
2497 BYTE *cpy;
2498
2499 const BYTE *const dictEnd =
2500 (dictStart == NULL) ? NULL : dictStart + dictSize;
2501
2502 const int checkOffset = (dictSize < (int)(64 KB));
2503
2504 /* Set up the "end" pointers for the shortcut. */
2505 const BYTE *const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;
2506 const BYTE *const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;
2507
2508 const BYTE *match;
2509 size_t offset;
2510 unsigned token;
2511 size_t length;
2512
2513 DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize,
2514 outputSize);
2515
2516 /* Special cases */
2517 assert(lowPrefix <= op);
2518 if (unlikely(outputSize == 0)) {
2519 /* Empty output buffer */
2520 if (partialDecoding)
2521 return 0;
2522 return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
2523 }
2524 if (unlikely(srcSize == 0)) {
2525 return -1;
2526 }
2527
2528 /* LZ4_FAST_DEC_LOOP:
2529 * designed for modern OoO performance cpus,
2530 * where copying reliably 32-bytes is preferable to an unpredictable
2531 * branch. note : fast loop may show a regression for some client arm
2532 * chips. */
2533#if LZ4_FAST_DEC_LOOP
2534 if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
2535 DEBUGLOG(6, "move to safe decode loop");
2536 goto safe_decode;
2537 }
2538
2539 /* Fast loop : decode sequences as long as output <
2540 * oend-FASTLOOP_SAFE_DISTANCE */
2541 DEBUGLOG(6, "using fast decode loop");
2542 while (1) {
2543 /* Main fastloop assertion: We can always wildcopy
2544 * FASTLOOP_SAFE_DISTANCE */
2546 assert(ip < iend);
2547 token = *ip++;
2548 length = token >> ML_BITS; /* literal length */
2549 DEBUGLOG(7, "blockPos%6u: litLength token = %u",
2550 (unsigned)(op - (BYTE *)dst), (unsigned)length);
2551
2552 /* decode literal length */
2553 if (length == RUN_MASK) {
2554 size_t const addl =
2555 read_variable_length(&ip, iend - RUN_MASK, 1);
2556 if (addl == rvl_error) {
2557 DEBUGLOG(6, "error reading long literal length");
2558 goto _output_error;
2559 }
2560 length += addl;
2561 if (unlikely((uptrval)(op) + length < (uptrval)(op))) {
2562 goto _output_error;
2563 } /* overflow detection */
2564 if (unlikely((uptrval)(ip) + length < (uptrval)(ip))) {
2565 goto _output_error;
2566 } /* overflow detection */
2567
2568 /* copy literals */
2570 if ((op + length > oend - 32) || (ip + length > iend - 32)) {
2571 goto safe_literal_copy;
2572 }
2573 LZ4_wildCopy32(op, ip, op + length);
2574 ip += length;
2575 op += length;
2576 }
2577 else if (ip <= iend - (16 + 1 /*max lit + offset + nextToken*/)) {
2578 /* We don't need to check oend, since we check it once for each
2579 * loop below */
2580 DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe",
2581 (unsigned)length);
2582 /* Literals can only be <= 14, but hope compilers optimize
2583 * better when copy by a register size */
2584 LZ4_memcpy(op, ip, 16);
2585 ip += length;
2586 op += length;
2587 }
2588 else {
2589 goto safe_literal_copy;
2590 }
2591
2592 /* get offset */
2593 offset = LZ4_readLE16(ip);
2594 ip += 2;
2595 DEBUGLOG(6, "blockPos%6u: offset = %u",
2596 (unsigned)(op - (BYTE *)dst), (unsigned)offset);
2597 match = op - offset;
2598 assert(match <= op); /* overflow check */
2599
2600 /* get matchlength */
2601 length = token & ML_MASK;
2602 DEBUGLOG(7, " match length token = %u (len==%u)", (unsigned)length,
2603 (unsigned)length + MINMATCH);
2604
2605 if (length == ML_MASK) {
2606 size_t const addl =
2607 read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
2608 if (addl == rvl_error) {
2609 DEBUGLOG(5, "error reading long match length");
2610 goto _output_error;
2611 }
2612 length += addl;
2613 length += MINMATCH;
2614 DEBUGLOG(7, " long match length == %u", (unsigned)length);
2615 if (unlikely((uptrval)(op) + length < (uptrval)op)) {
2616 goto _output_error;
2617 } /* overflow detection */
2618 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
2619 goto safe_match_copy;
2620 }
2621 }
2622 else {
2623 length += MINMATCH;
2624 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
2625 DEBUGLOG(7, "moving to safe_match_copy (ml==%u)",
2626 (unsigned)length);
2627 goto safe_match_copy;
2628 }
2629
2630 /* Fastpath check: skip LZ4_wildCopy32 when true */
2631 if ((dict == withPrefix64k) || (match >= lowPrefix)) {
2632 if (offset >= 8) {
2634 assert(match <= op);
2635 assert(op + 18 <= oend);
2636
2637 LZ4_memcpy(op, match, 8);
2638 LZ4_memcpy(op + 8, match + 8, 8);
2639 LZ4_memcpy(op + 16, match + 16, 2);
2640 op += length;
2641 continue;
2642 }
2643 }
2644 }
2645
2646 if (checkOffset && (unlikely(match + dictSize < lowPrefix))) {
2647 DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers",
2648 op - lowPrefix, op - match);
2649 goto _output_error;
2650 }
2651 /* match starting within external dictionary */
2652 if ((dict == usingExtDict) && (match < lowPrefix)) {
2653 assert(dictEnd != NULL);
2654 if (unlikely(op + length > oend - LASTLITERALS)) {
2655 if (partialDecoding) {
2656 DEBUGLOG(7, "partialDecoding: dictionary match, close "
2657 "to dstEnd");
2658 length = MIN(length, (size_t)(oend - op));
2659 }
2660 else {
2661 DEBUGLOG(6, "end-of-block condition violated")
2663 }
2664 }
2665
2666 if (length <= (size_t)(lowPrefix - match)) {
2667 /* match fits entirely within external dictionary : just
2668 * copy */
2669 LZ4_memmove(op, dictEnd - (lowPrefix - match), length);
2670 op += length;
2671 }
2672 else {
2673 /* match stretches into both external dictionary and current
2674 * block */
2675 size_t const copySize = (size_t)(lowPrefix - match);
2676 size_t const restSize = length - copySize;
2678 op += copySize;
2679 if (restSize >
2680 (size_t)(op - lowPrefix)) { /* overlap copy */
2681 BYTE *const endOfMatch = op + restSize;
2682 const BYTE *copyFrom = lowPrefix;
2683 while (op < endOfMatch) {
2684 *op++ = *copyFrom++;
2685 }
2686 }
2687 else {
2689 op += restSize;
2690 }
2691 }
2692 continue;
2693 }
2694
2695 /* copy match within block */
2696 cpy = op + length;
2697
2698 assert((op <= oend) && (oend - op >= 32));
2699 if (unlikely(offset < 16)) {
2701 }
2702 else {
2704 }
2705
2706 op = cpy; /* wildcopy correction */
2707 }
2709#endif
2710
2711 /* Main Loop : decode remaining sequences where output <
2712 * FASTLOOP_SAFE_DISTANCE */
2713 DEBUGLOG(6, "using safe decode loop");
2714 while (1) {
2715 assert(ip < iend);
2716 token = *ip++;
2717 length = token >> ML_BITS; /* literal length */
2718 DEBUGLOG(7, "blockPos%6u: litLength token = %u",
2719 (unsigned)(op - (BYTE *)dst), (unsigned)length);
2720
2721 /* A two-stage shortcut for the most common case:
2722 * 1) If the literal length is 0..14, and there is enough space,
2723 * enter the shortcut and copy 16 bytes on behalf of the literals
2724 * (in the fast mode, only 8 bytes can be safely copied this way).
2725 * 2) Further if the match length is 4..18, copy 18 bytes in a
2726 * similar manner; but we ensure that there's enough space in the
2727 * output for those 18 bytes earlier, upon entering the shortcut (in
2728 * other words, there is a combined check for both stages).
2729 */
2730 if ((length != RUN_MASK)
2731 /* strictly "less than" on input, to re-enter the loop with at
2732 least one byte */
2733 && likely((ip < shortiend) & (op <= shortoend))) {
2734 /* Copy the literals */
2735 LZ4_memcpy(op, ip, 16);
2736 op += length;
2737 ip += length;
2738
2739 /* The second stage: prepare for match copying, decode full
2740 * info. If it doesn't work out, the info won't be wasted. */
2741 length = token & ML_MASK; /* match length */
2742 DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)",
2743 (unsigned)(op - (BYTE *)dst), (unsigned)length,
2744 (unsigned)length + 4);
2745 offset = LZ4_readLE16(ip);
2746 ip += 2;
2747 match = op - offset;
2748 assert(match <= op); /* check overflow */
2749
2750 /* Do not deal with overlapping matches. */
2751 if ((length != ML_MASK) && (offset >= 8) &&
2752 (dict == withPrefix64k || match >= lowPrefix)) {
2753 /* Copy the match. */
2754 LZ4_memcpy(op + 0, match + 0, 8);
2755 LZ4_memcpy(op + 8, match + 8, 8);
2756 LZ4_memcpy(op + 16, match + 16, 2);
2757 op += length + MINMATCH;
2758 /* Both stages worked, load the next token. */
2759 continue;
2760 }
2761
2762 /* The second stage didn't work out, but the info is ready.
2763 * Propel it right to the point of match copying. */
2764 goto _copy_match;
2765 }
2766
2767 /* decode literal length */
2768 if (length == RUN_MASK) {
2769 size_t const addl =
2770 read_variable_length(&ip, iend - RUN_MASK, 1);
2771 if (addl == rvl_error) {
2772 goto _output_error;
2773 }
2774 length += addl;
2775 if (unlikely((uptrval)(op) + length < (uptrval)(op))) {
2776 goto _output_error;
2777 } /* overflow detection */
2778 if (unlikely((uptrval)(ip) + length < (uptrval)(ip))) {
2779 goto _output_error;
2780 } /* overflow detection */
2781 }
2782
2783#if LZ4_FAST_DEC_LOOP
2785#endif
2786 /* copy literals */
2787 cpy = op + length;
2788
2790 if ((cpy > oend - MFLIMIT) ||
2791 (ip + length > iend - (2 + 1 + LASTLITERALS))) {
2792 /* We've either hit the input parsing restriction or the output
2793 * parsing restriction. In the normal scenario, decoding a full
2794 * block, it must be the last sequence, otherwise it's an error
2795 * (invalid input or dimensions). In partialDecoding scenario,
2796 * it's necessary to ensure there is no buffer overflow.
2797 */
2798 if (partialDecoding) {
2799 /* Since we are partial decoding we may be in this block
2800 * because of the output parsing restriction, which is not
2801 * valid since the output buffer is allowed to be
2802 * undersized.
2803 */
2804 DEBUGLOG(7, "partialDecoding: copying literals, close to "
2805 "input or output end")
2806 DEBUGLOG(7, "partialDecoding: literal length = %u",
2807 (unsigned)length);
2808 DEBUGLOG(
2809 7, "partialDecoding: remaining space in dstBuffer : %i",
2810 (int)(oend - op));
2811 DEBUGLOG(
2812 7, "partialDecoding: remaining space in srcBuffer : %i",
2813 (int)(iend - ip));
2814 /* Finishing in the middle of a literals segment,
2815 * due to lack of input.
2816 */
2817 if (ip + length > iend) {
2818 length = (size_t)(iend - ip);
2819 cpy = op + length;
2820 }
2821 /* Finishing in the middle of a literals segment,
2822 * due to lack of output space.
2823 */
2824 if (cpy > oend) {
2825 cpy = oend;
2826 assert(op <= oend);
2827 length = (size_t)(oend - op);
2828 }
2829 }
2830 else {
2831 /* We must be on the last sequence (or invalid) because of
2832 * the parsing limitations so check that we exactly consume
2833 * the input and don't overrun the output buffer.
2834 */
2835 if ((ip + length != iend) || (cpy > oend)) {
2836 DEBUGLOG(5, "should have been last run of literals")
2837 DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", ip,
2838 (int)length, ip + length, iend);
2839 DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", cpy,
2840 oend - MFLIMIT);
2841 DEBUGLOG(5,
2842 "after writing %u bytes / %i bytes available",
2843 (unsigned)(op - (BYTE *)dst), outputSize);
2845 }
2846 }
2847 LZ4_memmove(op, ip,
2848 length); /* supports overlapping memory regions, for
2849 in-place decompression scenarios */
2850 ip += length;
2851 op += length;
2852 /* Necessarily EOF when !partialDecoding.
2853 * When partialDecoding, it is EOF if we've either
2854 * filled the output buffer or
2855 * can't proceed with reading an offset for following match.
2856 */
2857 if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2))) {
2858 break;
2859 }
2860 }
2861 else {
2862 LZ4_wildCopy8(op, ip,
2863 cpy); /* can overwrite up to 8 bytes beyond cpy */
2864 ip += length;
2865 op = cpy;
2866 }
2867
2868 /* get offset */
2869 offset = LZ4_readLE16(ip);
2870 ip += 2;
2871 match = op - offset;
2872
2873 /* get matchlength */
2874 length = token & ML_MASK;
2875 DEBUGLOG(7, "blockPos%6u: matchLength token = %u",
2876 (unsigned)(op - (BYTE *)dst), (unsigned)length);
2877
2879 if (length == ML_MASK) {
2880 size_t const addl =
2881 read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
2882 if (addl == rvl_error) {
2883 goto _output_error;
2884 }
2885 length += addl;
2886 if (unlikely((uptrval)(op) + length < (uptrval)op))
2887 goto _output_error; /* overflow detection */
2888 }
2889 length += MINMATCH;
2890
2891#if LZ4_FAST_DEC_LOOP
2893#endif
2894 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix)))
2895 goto _output_error; /* Error : offset outside buffers */
2896 /* match starting within external dictionary */
2897 if ((dict == usingExtDict) && (match < lowPrefix)) {
2898 assert(dictEnd != NULL);
2899 if (unlikely(op + length > oend - LASTLITERALS)) {
2900 if (partialDecoding)
2901 length = MIN(length, (size_t)(oend - op));
2902 else
2903 goto _output_error; /* doesn't respect parsing
2904 restriction */
2905 }
2906
2907 if (length <= (size_t)(lowPrefix - match)) {
2908 /* match fits entirely within external dictionary : just
2909 * copy */
2910 LZ4_memmove(op, dictEnd - (lowPrefix - match), length);
2911 op += length;
2912 }
2913 else {
2914 /* match stretches into both external dictionary and current
2915 * block */
2916 size_t const copySize = (size_t)(lowPrefix - match);
2917 size_t const restSize = length - copySize;
2919 op += copySize;
2920 if (restSize >
2921 (size_t)(op - lowPrefix)) { /* overlap copy */
2922 BYTE *const endOfMatch = op + restSize;
2923 const BYTE *copyFrom = lowPrefix;
2924 while (op < endOfMatch)
2925 *op++ = *copyFrom++;
2926 }
2927 else {
2929 op += restSize;
2930 }
2931 }
2932 continue;
2933 }
2935
2936 /* copy match within block */
2937 cpy = op + length;
2938
2939 /* partialDecoding : may end anywhere within the block */
2940 assert(op <= oend);
2942 size_t const mlen = MIN(length, (size_t)(oend - op));
2943 const BYTE *const matchEnd = match + mlen;
2944 BYTE *const copyEnd = op + mlen;
2945 if (matchEnd > op) { /* overlap copy */
2946 while (op < copyEnd) {
2947 *op++ = *match++;
2948 }
2949 }
2950 else {
2952 }
2953 op = copyEnd;
2954 if (op == oend) {
2955 break;
2956 }
2957 continue;
2958 }
2959
2960 if (unlikely(offset < 8)) {
2961 LZ4_write32(op, 0); /* silence msan warning when offset==0 */
2962 op[0] = match[0];
2963 op[1] = match[1];
2964 op[2] = match[2];
2965 op[3] = match[3];
2966 match += inc32table[offset];
2967 LZ4_memcpy(op + 4, match, 4);
2968 match -= dec64table[offset];
2969 }
2970 else {
2971 LZ4_memcpy(op, match, 8);
2972 match += 8;
2973 }
2974 op += 8;
2975
2977 BYTE *const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
2978 if (cpy > oend - LASTLITERALS) {
2979 goto _output_error;
2980 } /* Error : last LASTLITERALS bytes must be literals
2981 (uncompressed) */
2982 if (op < oCopyLimit) {
2983 LZ4_wildCopy8(op, match, oCopyLimit);
2984 match += oCopyLimit - op;
2985 op = oCopyLimit;
2986 }
2987 while (op < cpy) {
2988 *op++ = *match++;
2989 }
2990 }
2991 else {
2992 LZ4_memcpy(op, match, 8);
2993 if (length > 16) {
2994 LZ4_wildCopy8(op + 8, match + 8, cpy);
2995 }
2996 }
2997 op = cpy; /* wildcopy correction */
2998 }
2999
3000 /* end of decoding */
3001 DEBUGLOG(5, "decoded %i bytes", (int)(((char *)op) - dst));
3002 return (int)(((char *)op) - dst); /* Nb of output bytes decoded */
3003
3004 /* Overflow error detected */
3006 return (int)(-(((const char *)ip) - src)) - 1;
3007 }
3008}
3009
3010/*===== Instantiate the API decoding functions. =====*/
3011
3013int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
3015{
3016 return LZ4_decompress_generic(source, dest, compressedSize,
3018 noDict, (BYTE *)dest, NULL, 0);
3019}
3020
3022int LZ4_decompress_safe_partial(const char *src, char *dst, int compressedSize,
3024{
3026 return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
3027 partial_decode, noDict, (BYTE *)dst, NULL, 0);
3028}
3029
3031int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
3032{
3033 DEBUGLOG(5, "LZ4_decompress_fast");
3034 return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
3035 originalSize, 0, NULL, 0);
3036}
3037
3038/*===== Instantiate a few more decoding cases, used more than once. =====*/
3039
3040LZ4_FORCE_O2 /* Exported, an obsolete API function. */
3041 int
3044{
3045 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
3047 (BYTE *)dest - 64 KB, NULL, 0);
3048}
3049
3052 char *dest,
3053 int compressedSize,
3054 int targetOutputSize,
3055 int dstCapacity)
3056{
3058 return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
3060 (BYTE *)dest - 64 KB, NULL, 0);
3061}
3062
3063/* Another obsolete API function, paired with the previous one. */
3064int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest,
3065 int originalSize)
3066{
3067 return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
3068 originalSize, 64 KB, NULL, 0);
3069}
3070
3072static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
3073 int compressedSize,
3074 int maxOutputSize,
3075 size_t prefixSize)
3076{
3077 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
3079 (BYTE *)dest - prefixSize, NULL, 0);
3080}
3081
3084 const char *source, char *dest, int compressedSize, int targetOutputSize,
3085 int dstCapacity, size_t prefixSize)
3086{
3088 return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
3090 (BYTE *)dest - prefixSize, NULL, 0);
3091}
3092
3094int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
3096 const void *dictStart, size_t dictSize)
3097{
3098 DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict");
3099 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
3101 (const BYTE *)dictStart, dictSize);
3102}
3103
3106 const char *source, char *dest, int compressedSize, int targetOutputSize,
3107 int dstCapacity, const void *dictStart, size_t dictSize)
3108{
3110 return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
3112 (const BYTE *)dictStart, dictSize);
3113}
3114
3116static int LZ4_decompress_fast_extDict(const char *source, char *dest,
3117 int originalSize, const void *dictStart,
3118 size_t dictSize)
3119{
3120 return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
3121 originalSize, 0,
3122 (const BYTE *)dictStart, dictSize);
3123}
3124
3125/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
3126 * of the dictionary is passed as prefix, and the second via dictStart +
3127 * dictSize. These routines are used only once, in LZ4_decompress_*_continue().
3128 */
3130int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
3132 size_t prefixSize, const void *dictStart,
3133 size_t dictSize)
3134{
3135 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
3137 (BYTE *)dest - prefixSize,
3138 (const BYTE *)dictStart, dictSize);
3139}
3140
3141/*===== streaming decompression functions =====*/
3142
3143#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
3145{
3149}
3150
3152{
3153 if (LZ4_stream == NULL) {
3154 return 0;
3155 } /* support free on NULL */
3157 return 0;
3158}
3159#endif
3160
3161/*! LZ4_setStreamDecode() :
3162 * Use this function to instruct where to find the dictionary.
3163 * This function is not necessary if previous data is still available where it
3164 * was decoded. Loading a size of 0 is allowed (same effect as no dictionary).
3165 * @return : 1 if OK, 0 if error
3166 */
3168 const char *dictionary, int dictSize)
3169{
3170 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
3171 lz4sd->prefixSize = (size_t)dictSize;
3172 if (dictSize) {
3173 assert(dictionary != NULL);
3174 lz4sd->prefixEnd = (const BYTE *)dictionary + dictSize;
3175 }
3176 else {
3177 lz4sd->prefixEnd = (const BYTE *)dictionary;
3178 }
3179 lz4sd->externalDict = NULL;
3180 lz4sd->extDictSize = 0;
3181 return 1;
3182}
3183
3184/*! LZ4_decoderRingBufferSize() :
3185 * when setting a ring buffer for streaming decompression (optional scenario),
3186 * provides the minimum size of this ring buffer
3187 * to be compatible with any source respecting maxBlockSize condition.
3188 * Note : in a ring buffer scenario,
3189 * blocks are presumed decompressed next to each other.
3190 * When not enough space remains for next block (remainingSize < maxBlockSize),
3191 * decoding resumes from beginning of ring buffer.
3192 * @return : minimum ring buffer size,
3193 * or 0 if there is an error (invalid maxBlockSize).
3194 */
3196{
3197 if (maxBlockSize < 0)
3198 return 0;
3200 return 0;
3201 if (maxBlockSize < 16)
3202 maxBlockSize = 16;
3204}
3205
3206/*
3207*_continue() :
3208 These decoding functions allow decompression of multiple blocks in
3209"streaming" mode. Previously decoded blocks must still be available at the
3210memory position where they were decoded. If it's not possible, save the relevant
3211part of decoded data into a safe buffer, and indicate where it stands using
3212LZ4_setStreamDecode()
3213*/
3216 const char *source, char *dest,
3218{
3219 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
3220 int result;
3221
3222 if (lz4sd->prefixSize == 0) {
3223 /* The first call, no dictionary yet. */
3224 assert(lz4sd->extDictSize == 0);
3225 result =
3227 if (result <= 0)
3228 return result;
3229 lz4sd->prefixSize = (size_t)result;
3230 lz4sd->prefixEnd = (BYTE *)dest + result;
3231 }
3232 else if (lz4sd->prefixEnd == (BYTE *)dest) {
3233 /* They're rolling the current segment. */
3234 if (lz4sd->prefixSize >= 64 KB - 1)
3237 else if (lz4sd->extDictSize == 0)
3239 source, dest, compressedSize, maxOutputSize, lz4sd->prefixSize);
3240 else
3243 lz4sd->externalDict, lz4sd->extDictSize);
3244 if (result <= 0)
3245 return result;
3246 lz4sd->prefixSize += (size_t)result;
3247 lz4sd->prefixEnd += result;
3248 }
3249 else {
3250 /* The buffer wraps around, or they're switching to another buffer. */
3251 lz4sd->extDictSize = lz4sd->prefixSize;
3252 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
3254 source, dest, compressedSize, maxOutputSize, lz4sd->externalDict,
3255 lz4sd->extDictSize);
3256 if (result <= 0)
3257 return result;
3258 lz4sd->prefixSize = (size_t)result;
3259 lz4sd->prefixEnd = (BYTE *)dest + result;
3260 }
3261
3262 return result;
3263}
3264
3265LZ4_FORCE_O2 int
3267 const char *source, char *dest, int originalSize)
3268{
3271 &LZ4_streamDecode->internal_donotuse);
3272 int result;
3273
3274 DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize);
3275 assert(originalSize >= 0);
3276
3277 if (lz4sd->prefixSize == 0) {
3278 DEBUGLOG(5, "first invocation : no prefix nor extDict");
3279 assert(lz4sd->extDictSize == 0);
3281 if (result <= 0)
3282 return result;
3283 lz4sd->prefixSize = (size_t)originalSize;
3284 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
3285 }
3286 else if (lz4sd->prefixEnd == (BYTE *)dest) {
3287 DEBUGLOG(5, "continue using existing prefix");
3288 result = LZ4_decompress_unsafe_generic(
3289 (const BYTE *)source, (BYTE *)dest, originalSize, lz4sd->prefixSize,
3290 lz4sd->externalDict, lz4sd->extDictSize);
3291 if (result <= 0)
3292 return result;
3293 lz4sd->prefixSize += (size_t)originalSize;
3294 lz4sd->prefixEnd += originalSize;
3295 }
3296 else {
3297 DEBUGLOG(5, "prefix becomes extDict");
3298 lz4sd->extDictSize = lz4sd->prefixSize;
3299 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
3301 lz4sd->externalDict,
3302 lz4sd->extDictSize);
3303 if (result <= 0)
3304 return result;
3305 lz4sd->prefixSize = (size_t)originalSize;
3306 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
3307 }
3308
3309 return result;
3310}
3311
3312/*
3313Advanced decoding functions :
3314*_usingDict() :
3315 These decoding functions work the same as "_continue" ones,
3316 the dictionary must be explicitly provided within parameters
3317*/
3318
3319int LZ4_decompress_safe_usingDict(const char *source, char *dest,
3321 const char *dictStart, int dictSize)
3322{
3323 if (dictSize == 0)
3325 if (dictStart + dictSize == dest) {
3326 if (dictSize >= 64 KB - 1) {
3329 }
3330 assert(dictSize >= 0);
3332 source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
3333 }
3334 assert(dictSize >= 0);
3337 (size_t)dictSize);
3338}
3339
3341 int compressedSize,
3343 const char *dictStart, int dictSize)
3344{
3345 if (dictSize == 0)
3348 if (dictStart + dictSize == dest) {
3349 if (dictSize >= 64 KB - 1) {
3352 }
3353 assert(dictSize >= 0);
3356 (size_t)dictSize);
3357 }
3358 assert(dictSize >= 0);
3361 (size_t)dictSize);
3362}
3363
3364int LZ4_decompress_fast_usingDict(const char *source, char *dest,
3365 int originalSize, const char *dictStart,
3366 int dictSize)
3367{
3368 if (dictSize == 0 || dictStart + dictSize == dest)
3369 return LZ4_decompress_unsafe_generic((const BYTE *)source, (BYTE *)dest,
3370 originalSize, (size_t)dictSize,
3371 NULL, 0);
3372 assert(dictSize >= 0);
3374 (size_t)dictSize);
3375}
3376
3377/*=*************************************************
3378 * Obsolete Functions
3379 ***************************************************/
3380/* obsolete compression functions */
3381int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize,
3382 int maxOutputSize)
3383{
3385}
3386int LZ4_compress(const char *src, char *dest, int srcSize)
3387{
3389}
3390int LZ4_compress_limitedOutput_withState(void *state, const char *src,
3391 char *dst, int srcSize, int dstSize)
3392{
3393 return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
3394}
3395int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize)
3396{
3397 return LZ4_compress_fast_extState(state, src, dst, srcSize,
3399}
3401 const char *src, char *dst, int srcSize,
3402 int dstCapacity)
3403{
3405 dstCapacity, 1);
3406}
3408 char *dest, int inputSize)
3409{
3412}
3413
3414/*
3415These decompression functions are deprecated and should no longer be used.
3416They are only provided here for compatibility with older user programs.
3417- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
3418- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
3419*/
3420int LZ4_uncompress(const char *source, char *dest, int outputSize)
3421{
3423}
3424int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
3425 int maxOutputSize)
3426{
3428}
3429
3430/* Obsolete Streaming functions */
3431
3432int LZ4_sizeofStreamState(void)
3433{
3434 return sizeof(LZ4_stream_t);
3435}
3436
3437int LZ4_resetStreamState(void *state, char *inputBuffer)
3438{
3440 LZ4_resetStream((LZ4_stream_t *)state);
3441 return 0;
3442}
3443
3444#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
3445void *LZ4_create(char *inputBuffer)
3446{
3448 return LZ4_createStream();
3449}
3450#endif
3451
3452char *LZ4_slideInputBuffer(void *state)
3453{
3454 /* avoid const char * -> char * conversion warning */
3455 return (char *)(uptrval)((LZ4_stream_t *)state)
3456 ->internal_donotuse.dictionary;
3457}
3458
3459#endif /* LZ4_COMMONDEFS_ONLY */
#define NULL
Definition ccmath.h:32
int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const void *dictStart, size_t dictSize)
#define STEPSIZE
Definition lz4.c:782
#define KB
Definition lz4.c:269
int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c:1666
#define LZ4_STATIC_ASSERT(c)
Definition lz4.c:295
unsigned long long U64
Definition lz4.c:343
earlyEnd_directive
Definition lz4.c:2270
@ partial_decode
Definition lz4.c:2270
@ decode_full_block
Definition lz4.c:2270
size_t reg_t
Definition lz4.c:350
int LZ4_compressBound(int isize)
Definition lz4.c:879
int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
Definition lz4.c:2024
#define MINMATCH
Definition lz4.c:257
int LZ4_decompress_safe_partial_forceExtDict(const char *source, char *dest, int compressedSize, int targetOutputSize, int dstCapacity, const void *dictStart, size_t dictSize)
#define MIN(a, b)
Definition lz4.c:2273
#define GB
Definition lz4.c:271
unsigned char BYTE
Definition lz4.c:339
int LZ4_loadDict_internal(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize, LoadDict_mode_e _ld)
Definition lz4.c:1958
int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr, int targetDstSize)
Definition lz4.c:1865
int LZ4_sizeofState(void)
Definition lz4.c:883
signed int S32
Definition lz4.c:342
#define LZ4_memcpy(dst, src, size)
Definition lz4.c:376
void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
Definition lz4.c:1934
#define LZ4_DISTANCE_ABSOLUTE_MAX
Definition lz4.c:273
void LZ4_resetStream_fast(LZ4_stream_t *ctx)
Definition lz4.c:1940
int LZ4_freeStream(LZ4_stream_t *LZ4_stream)
Definition lz4.c:1946
#define MFLIMIT
Definition lz4.c:262
const char * LZ4_versionString(void)
Definition lz4.c:875
LZ4_stream_t * LZ4_initStream(void *buffer, size_t size)
Definition lz4.c:1917
#define LASTLITERALS
Definition lz4.c:260
#define DEBUGLOG(l,...)
Definition lz4.c:312
int LZ4_loadDictSlow(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
Definition lz4.c:2029
#define ALLOC(s)
Definition lz4.c:241
#define FREEMEM(p)
Definition lz4.c:243
int LZ4_versionNumber(void)
Definition lz4.c:871
dictIssue_directive
Definition lz4.c:866
@ noDictIssue
Definition lz4.c:866
@ dictSmall
Definition lz4.c:866
LZ4_stream_t * LZ4_createStream(void)
Definition lz4.c:1892
#define likely(expr)
Definition lz4.c:193
int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source, char *dest, int srcSize)
Definition lz4.c:2204
#define FASTLOOP_SAFE_DISTANCE
Definition lz4.c:266
#define ML_BITS
Definition lz4.c:279
#define ML_MASK
Definition lz4.c:280
#define ALLOC_AND_ZERO(s)
Definition lz4.c:242
#define MEM_INIT(p, v, s)
Definition lz4.c:252
int LZ4_compress_default(const char *src, char *dst, int srcSize, int dstCapacity)
Definition lz4.c:1808
#define HASH_UNIT
Definition lz4.c:1957
dict_directive
Definition lz4.c:860
@ noDict
Definition lz4.c:861
@ withPrefix64k
Definition lz4.c:862
@ usingExtDict
Definition lz4.c:863
@ usingDictCtx
Definition lz4.c:864
size_t uptrval
Definition lz4.c:344
limitedOutput_directive
Definition lz4.c:353
@ limitedOutput
Definition lz4.c:355
@ fillOutput
Definition lz4.c:356
@ notLimited
Definition lz4.c:354
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
Definition lz4.c:2238
size_t Rvl_t
Definition lz4.c:2426
#define LZ4_FORCE_INLINE
Definition lz4.c:155
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c:2087
LoadDict_mode_e
Definition lz4.c:1956
@ _ld_slow
Definition lz4.c:1956
@ _ld_fast
Definition lz4.c:1956
unsigned int U32
Definition lz4.c:341
#define LZ4_ACCELERATION_MAX
Definition lz4.c:59
#define MATCH_SAFEGUARD_DISTANCE
Definition lz4.c:263
#define assert(condition)
Definition lz4.c:291
#define unlikely(expr)
Definition lz4.c:196
int LZ4_compress_fast_extState_fastReset(void *state, const char *src, char *dst, int srcSize, int dstCapacity, int acceleration)
Definition lz4.c:1720
#define WILDCOPYLENGTH
Definition lz4.c:259
#define LZ4_ACCELERATION_DEFAULT
Definition lz4.c:53
unsigned short U16
Definition lz4.c:340
void LZ4_attach_dictionary(LZ4_stream_t *workingStream, const LZ4_stream_t *dictionaryStream)
Definition lz4.c:2035
tableType_t
Definition lz4.c:835
@ clearedTable
Definition lz4.c:835
@ byU16
Definition lz4.c:835
@ byPtr
Definition lz4.c:835
@ byU32
Definition lz4.c:835
#define RUN_MASK
Definition lz4.c:282
int LZ4_compress_fast(const char *src, char *dest, int srcSize, int dstCapacity, int acceleration)
Definition lz4.c:1786
#define LZ4_FORCE_O2
Definition lz4.c:181
int LZ4_compress_destSize_extState(void *state, const char *src, char *dst, int *srcSizePtr, int targetDstSize, int acceleration)
Definition lz4.c:1853
#define LZ4_memmove
Definition lz4.c:384
int LZ4_decompress_fast_withPrefix64k(const char *src, char *dst, int originalSize)
int LZ4_decompress_fast_usingDict(const char *src, char *dst, int originalSize, const char *dictStart, int dictSize)
#define LZ4_HASHTABLESIZE
Definition lz4.h:802
int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_streamPtr, const char *source, char *dest, int inputSize, int maxOutputSize)
#define LZ4_COMPRESSBOUND(isize)
Definition lz4.h:242
#define LZ4_MEMORY_USAGE
Definition lz4.h:169
#define LZ4_VERSION_STRING
Definition lz4.h:146
int LZ4_decompress_safe_partial(const char *src, char *dst, int srcSize, int targetOutputSize, int dstCapacity)
int LZ4_compress_withState(void *state, const char *source, char *dest, int inputSize)
int LZ4_decoderRingBufferSize(int maxBlockSize)
union LZ4_stream_u LZ4_stream_t
Definition lz4.h:363
int LZ4_compress(const char *src, char *dest, int srcSize)
int LZ4_decompress_safe_partial_usingDict(const char *src, char *dst, int compressedSize, int targetOutputSize, int maxOutputSize, const char *dictStart, int dictSize)
#define LZ4_HASH_SIZE_U32
Definition lz4.h:803
int LZ4_uncompress(const char *source, char *dest, int outputSize)
char * LZ4_slideInputBuffer(void *state)
int LZ4_decompress_safe_withPrefix64k(const char *src, char *dst, int compressedSize, int maxDstSize)
int LZ4_decompress_fast(const char *src, char *dst, int originalSize)
#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize)
Definition lz4.h:566
int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize, int maxOutputSize)
int LZ4_compress_limitedOutput(const char *src, char *dest, int srcSize, int maxOutputSize)
#define LZ4_MAX_INPUT_SIZE
Definition lz4.h:241
int LZ4_sizeofStreamState(void)
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize)
#define LZ4_VERSION_NUMBER
Definition lz4.h:139
int LZ4_compress_limitedOutput_withState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize)
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *src, char *dst, int srcSize, int dstCapacity)
#define LZ4_HASHLOG
Definition lz4.h:801
int LZ4_resetStreamState(void *state, char *inputBuffer)
int LZ4_decompress_safe(const char *src, char *dst, int compressedSize, int dstCapacity)
int LZ4_compress_continue(LZ4_stream_t *LZ4_streamPtr, const char *source, char *dest, int inputSize)
int LZ4_decompress_safe_usingDict(const char *src, char *dst, int srcSize, int dstCapacity, const char *dictStart, int dictSize)
int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *src, char *dst, int originalSize)
int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream)
LZ4_streamDecode_t * LZ4_createStreamDecode(void)
void * LZ4_create(char *inputBuffer)
struct state state
Definition parser.c:103
double b
Definition r_raster.c:39
double l
Definition r_raster.c:39
double t
Definition r_raster.c:39
double r
Definition r_raster.c:39
LZ4_u32 dictSize
Definition lz4.h:833
LZ4_stream_t_internal internal_donotuse
Definition lz4.h:842