make style

This commit is contained in:
Philippe Teuwen 2020-06-08 03:15:10 +02:00
parent a1eb8e78b6
commit 224cb2ffd7
38 changed files with 1531 additions and 1550 deletions

View file

@ -20,15 +20,13 @@ void *memcpy(void *dest, const void *src, int len) {
return dest;
}
void *memmove (void *dest, const void *src, size_t len)
{
void *memmove(void *dest, const void *src, size_t len) {
char *d = dest;
const char *s = src;
if (d < s)
while (len--)
*d++ = *s++;
else
{
else {
char *lasts = (char *)s + (len - 1);
char *lastd = d + (len - 1);
while (len--)

View file

@ -273,8 +273,7 @@ typedef enum {
/*-************************************
* Reading and writing into memory
**************************************/
static unsigned LZ4_isLittleEndian(void)
{
static unsigned LZ4_isLittleEndian(void) {
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
@ -305,36 +304,36 @@ static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = val
#else /* safe and portable access using memcpy() */
static U16 LZ4_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
static U16 LZ4_read16(const void *memPtr) {
U16 val;
memcpy(&val, memPtr, sizeof(val));
return val;
}
static U32 LZ4_read32(const void* memPtr)
{
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
static U32 LZ4_read32(const void *memPtr) {
U32 val;
memcpy(&val, memPtr, sizeof(val));
return val;
}
static reg_t LZ4_read_ARCH(const void* memPtr)
{
reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
static reg_t LZ4_read_ARCH(const void *memPtr) {
reg_t val;
memcpy(&val, memPtr, sizeof(val));
return val;
}
static void LZ4_write16(void* memPtr, U16 value)
{
static void LZ4_write16(void *memPtr, U16 value) {
memcpy(memPtr, &value, sizeof(value));
}
static void LZ4_write32(void* memPtr, U32 value)
{
static void LZ4_write32(void *memPtr, U32 value) {
memcpy(memPtr, &value, sizeof(value));
}
#endif /* LZ4_FORCE_MEMORY_ACCESS */
static U16 LZ4_readLE16(const void* memPtr)
{
static U16 LZ4_readLE16(const void *memPtr) {
if (LZ4_isLittleEndian()) {
return LZ4_read16(memPtr);
} else {
@ -343,8 +342,7 @@ static U16 LZ4_readLE16(const void* memPtr)
}
}
static void LZ4_writeLE16(void* memPtr, U16 value)
{
static void LZ4_writeLE16(void *memPtr, U16 value) {
if (LZ4_isLittleEndian()) {
LZ4_write16(memPtr, value);
} else {
@ -356,13 +354,13 @@ static void LZ4_writeLE16(void* memPtr, U16 value)
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
LZ4_FORCE_O2_INLINE_GCC_PPC64LE
void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
{
void LZ4_wildCopy8(void *dstPtr, const void *srcPtr, void *dstEnd) {
BYTE *d = (BYTE *)dstPtr;
const BYTE *s = (const BYTE *)srcPtr;
BYTE *const e = (BYTE *)dstEnd;
do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
do { memcpy(d, s, 8); d += 8; s += 8; }
while (d < e);
}
static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
@ -385,8 +383,7 @@ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
#if LZ4_FAST_DEC_LOOP
LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
LZ4_memcpy_using_offset_base(BYTE *dstPtr, const BYTE *srcPtr, BYTE *dstEnd, const size_t offset) {
if (offset < 8) {
dstPtr[0] = srcPtr[0];
dstPtr[1] = srcPtr[1];
@ -409,21 +406,20 @@ LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, con
* this version copies two times 16 bytes (instead of one time 32 bytes)
* because it must be compatible with offsets >= 16. */
LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
{
LZ4_wildCopy32(void *dstPtr, const void *srcPtr, void *dstEnd) {
BYTE *d = (BYTE *)dstPtr;
const BYTE *s = (const BYTE *)srcPtr;
BYTE *const e = (BYTE *)dstEnd;
do { memcpy(d,s,16); memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
do { memcpy(d, s, 16); memcpy(d + 16, s + 16, 16); d += 32; s += 32; }
while (d < e);
}
/* LZ4_memcpy_using_offset() presumes :
* - dstEnd >= dstPtr + MINMATCH
* - there is at least 8 bytes available to write after dstEnd */
LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
LZ4_memcpy_using_offset(BYTE *dstPtr, const BYTE *srcPtr, BYTE *dstEnd, const size_t offset) {
BYTE v[8];
assert(dstEnd >= dstPtr + MINMATCH);
@ -460,8 +456,7 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
/*-************************************
* Common functions
**************************************/
static unsigned LZ4_NbCommonBytes (reg_t val)
{
static unsigned LZ4_NbCommonBytes(reg_t val) {
if (LZ4_isLittleEndian()) {
if (sizeof(val) == 8) {
# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
@ -478,7 +473,8 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
7, 0, 1, 2, 3, 3, 4, 6,
2, 6, 5, 5, 3, 4, 5, 6,
7, 1, 2, 4, 6, 4, 4, 5,
7, 2, 6, 5, 7, 6, 7, 7 };
7, 2, 6, 5, 7, 6, 7, 7
};
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
# endif
} else /* 32 bits */ {
@ -492,11 +488,12 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
3, 2, 2, 1, 3, 2, 0, 1,
3, 3, 1, 2, 2, 2, 2, 0,
3, 1, 2, 0, 1, 0, 1, 1 };
3, 1, 2, 0, 1, 0, 1, 1
};
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
# endif
}
} else /* Big Endian CPU */ {
} else { /* Big Endian CPU */
if (sizeof(val) == 8) { /* 64-bits */
# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
@ -509,8 +506,10 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
Note that this code path is never triggered in 32-bits mode. */
unsigned r;
if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
if (!(val >> by32)) { r = 4; }
else { r = 0; val >>= by32; }
if (!(val >> 16)) { r += 2; val >>= 8; }
else { val >>= 24; }
r += (!val);
return r;
# endif
@ -523,7 +522,8 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
return (unsigned)__builtin_clz((U32)val) >> 3;
# else
unsigned r;
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
if (!(val >> 16)) { r = 2; val >>= 8; }
else { r = 0; val >>= 24; }
r += (!val);
return r;
# endif
@ -533,17 +533,18 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
#define STEPSIZE sizeof(reg_t)
LZ4_FORCE_INLINE
unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
{
unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit) {
const BYTE *const pStart = pIn;
if (likely(pIn < pInLimit - (STEPSIZE - 1))) {
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
if (!diff) {
pIn+=STEPSIZE; pMatch+=STEPSIZE;
pIn += STEPSIZE;
pMatch += STEPSIZE;
} else {
return LZ4_NbCommonBytes(diff);
} }
}
}
while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
@ -628,16 +629,14 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
/*-******************************
* Compression functions
********************************/
static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
{
static U32 LZ4_hash4(U32 sequence, tableType_t const tableType) {
if (tableType == byU16)
return ((sequence * 2654435761U) >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
else
return ((sequence * 2654435761U) >> ((MINMATCH * 8) - LZ4_HASHLOG));
}
static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
{
static U32 LZ4_hash5(U64 sequence, tableType_t const tableType) {
const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG;
if (LZ4_isLittleEndian()) {
const U64 prime5bytes = 889523592379ULL;
@ -648,51 +647,51 @@ static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
}
}
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
{
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void *const p, tableType_t const tableType) {
if ((sizeof(reg_t) == 8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
return LZ4_hash4(LZ4_read32(p), tableType);
}
static void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
{
switch (tableType)
{
static void LZ4_clearHash(U32 h, void *tableBase, tableType_t const tableType) {
switch (tableType) {
default: /* fallthrough */
case clearedTable: { /* illegal! */ assert(0); return; }
case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
case byPtr:
{ const BYTE **hashTable = (const BYTE **)tableBase; hashTable[h] = NULL; return; }
case byU32:
{ U32 *hashTable = (U32 *) tableBase; hashTable[h] = 0; return; }
case byU16:
{ U16 *hashTable = (U16 *) tableBase; hashTable[h] = 0; return; }
}
}
static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
{
switch (tableType)
{
static void LZ4_putIndexOnHash(U32 idx, U32 h, void *tableBase, tableType_t const tableType) {
switch (tableType) {
default: /* fallthrough */
case clearedTable: /* fallthrough */
case byPtr: { /* illegal! */ assert(0); return; }
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
case byU32:
{ U32 *hashTable = (U32 *) tableBase; hashTable[h] = idx; return; }
case byU16:
{ U16 *hashTable = (U16 *) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
}
}
static void LZ4_putPositionOnHash(const BYTE *p, U32 h,
void *tableBase, tableType_t const tableType,
const BYTE* srcBase)
{
switch (tableType)
{
const BYTE *srcBase) {
switch (tableType) {
case clearedTable: { /* illegal! */ assert(0); return; }
case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
case byPtr:
{ const BYTE **hashTable = (const BYTE **)tableBase; hashTable[h] = p; return; }
case byU32:
{ U32 *hashTable = (U32 *) tableBase; hashTable[h] = (U32)(p - srcBase); return; }
case byU16:
{ U16 *hashTable = (U16 *) tableBase; hashTable[h] = (U16)(p - srcBase); return; }
}
}
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase) {
U32 const h = LZ4_hashPosition(p, tableType);
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
}
@ -703,8 +702,7 @@ LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_
* Assumption 1 : only valid if tableType == byU32 or byU16.
* Assumption 2 : h is presumed valid (within limits of hash table)
*/
static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
{
static U32 LZ4_getIndexOnHash(U32 h, const void *tableBase, tableType_t tableType) {
LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
if (tableType == byU32) {
const U32 *const hashTable = (const U32 *) tableBase;
@ -716,11 +714,11 @@ static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableTyp
assert(h < (1U << (LZ4_MEMORY_USAGE - 1)));
return hashTable[h];
}
assert(0); return 0; /* forbidden case */
assert(0);
return 0; /* forbidden case */
}
static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
{
static const BYTE *LZ4_getPositionOnHash(U32 h, const void *tableBase, tableType_t tableType, const BYTE *srcBase) {
if (tableType == byPtr) { const BYTE *const *hashTable = (const BYTE * const *) tableBase; return hashTable[h]; }
if (tableType == byU32) { const U32 *const hashTable = (const U32 *) tableBase; return hashTable[h] + srcBase; }
{ const U16 *const hashTable = (const U16 *) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
@ -729,8 +727,7 @@ static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType
LZ4_FORCE_INLINE const BYTE *
LZ4_getPosition(const BYTE *p,
const void *tableBase, tableType_t tableType,
const BYTE* srcBase)
{
const BYTE *srcBase) {
U32 const h = LZ4_hashPosition(p, tableType);
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
@ -758,8 +755,7 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
|| ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
|| ((tableType == byU32) && cctx->currentOffset > 1 GB)
|| tableType == byPtr
|| inputSize >= 4 KB)
{
|| inputSize >= 4 KB) {
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
cctx->currentOffset = 0;
@ -797,8 +793,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
const tableType_t tableType,
const dict_directive dictDirective,
const dictIssue_directive dictIssue,
const int acceleration)
{
const int acceleration) {
int result;
const BYTE *ip = (const BYTE *) source;
@ -860,7 +855,8 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
/* First Byte */
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
ip++; forwardH = LZ4_hashPosition(ip, tableType);
ip++;
forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
for (; ;) {
@ -957,7 +953,8 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1]))) { ip--; match--; }
/* Encode Literals */
{ unsigned const litLength = (unsigned)(ip - anchor);
{
unsigned const litLength = (unsigned)(ip - anchor);
token = op++;
if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
(unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength / 255) > olimit))) {
@ -973,8 +970,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
*token = (RUN_MASK << ML_BITS);
for (; len >= 255 ; len -= 255) * op++ = 255;
*op++ = (BYTE)len;
}
else *token = (BYTE)(litLength<<ML_BITS);
} else *token = (BYTE)(litLength << ML_BITS);
/* Copy Literals */
LZ4_wildCopy8(op, anchor, op + litLength);
@ -1003,15 +999,18 @@ _next_match:
if (maybe_extMem) { /* static test */
DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE *)source));
assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
LZ4_writeLE16(op, (U16)offset); op+=2;
LZ4_writeLE16(op, (U16)offset);
op += 2;
} else {
DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
assert(ip - match <= LZ4_DISTANCE_MAX);
LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
LZ4_writeLE16(op, (U16)(ip - match));
op += 2;
}
/* Encode MatchLength */
{ unsigned matchCode;
{
unsigned matchCode;
if ((dictDirective == usingExtDict || dictDirective == usingDictCtx)
&& (lowLimit == dictionary) /* match within extDict */) {
@ -1141,7 +1140,8 @@ _next_match:
_last_literals:
/* Encode Last Literals */
{ size_t lastRun = (size_t)(iend - anchor);
{
size_t lastRun = (size_t)(iend - anchor);
if ((outputDirective) && /* Check output buffer overflow */
(op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) > olimit)) {
if (outputDirective == fillOutput) {
@ -1177,8 +1177,7 @@ _last_literals:
}
int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration) {
LZ4_stream_t_internal *const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
assert(ctx != NULL);
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
@ -1208,8 +1207,7 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
* (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
* "correctly initialized").
*/
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
{
int LZ4_compress_fast_extState_fastReset(void *state, const char *src, char *dst, int srcSize, int dstCapacity, int acceleration) {
LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
@ -1245,8 +1243,7 @@ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst
}
int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration) {
int result;
#if (LZ4_HEAPMODE)
LZ4_stream_t *ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
@ -1264,8 +1261,7 @@ int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutp
}
int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
{
int LZ4_compress_default(const char *src, char *dst, int srcSize, int maxOutputSize) {
return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
}
@ -1273,8 +1269,7 @@ int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputS
int LZ4_compress_fast_force(const char *src, char *dst, int srcSize, int dstCapacity, int acceleration);
/* hidden debug function */
/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
int LZ4_compress_fast_force(const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
{
int LZ4_compress_fast_force(const char *src, char *dst, int srcSize, int dstCapacity, int acceleration) {
LZ4_stream_t ctx;
LZ4_initStream(&ctx, sizeof(ctx));
@ -1290,10 +1285,10 @@ int LZ4_compress_fast_force(const char* src, char* dst, int srcSize, int dstCapa
/* Note!: This function leaves the stream in an unclean/broken state!
* It is not safe to subsequently use the same state with a _fastReset() or
* _continue() call without resetting it. */
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
static int LZ4_compress_destSize_extState(LZ4_stream_t *state, const char *src, char *dst, int *srcSizePtr, int targetDstSize) {
void *const s = LZ4_initStream(state, sizeof(*state));
assert(s != NULL); (void)s;
assert(s != NULL);
(void)s;
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
@ -1303,12 +1298,12 @@ static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src,
} else {
tableType_t const addrMode = ((sizeof(void *) == 4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
} }
}
}
}
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr, int targetDstSize) {
#if (LZ4_HEAPMODE)
LZ4_stream_t *ctx = (LZ4_stream_t *)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
if (ctx == NULL) return 0;
@ -1331,8 +1326,7 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe
* Streaming functions
********************************/
LZ4_stream_t* LZ4_createStream(void)
{
LZ4_stream_t *LZ4_createStream(void) {
LZ4_stream_t *const lz4s = (LZ4_stream_t *)ALLOC(sizeof(LZ4_stream_t));
LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
DEBUGLOG(4, "LZ4_createStream %p", lz4s);
@ -1344,15 +1338,13 @@ LZ4_stream_t* LZ4_createStream(void)
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
it reports an aligment of 8-bytes,
while actually aligning LZ4_stream_t on 4 bytes. */
static size_t LZ4_stream_t_alignment(void)
{
static size_t LZ4_stream_t_alignment(void) {
struct { char c; LZ4_stream_t t; } t_a;
return sizeof(t_a) - sizeof(t_a.t);
}
#endif
LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
{
LZ4_stream_t *LZ4_initStream(void *buffer, size_t size) {
DEBUGLOG(5, "LZ4_initStream");
if (buffer == NULL) { return NULL; }
if (size < sizeof(LZ4_stream_t)) { return NULL; }
@ -1367,8 +1359,7 @@ LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
/* resetStream is now deprecated,
* prefer initStream() which is more general */
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
{
void LZ4_resetStream(LZ4_stream_t *LZ4_stream) {
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
}
@ -1377,8 +1368,7 @@ void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
}
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
{
int LZ4_freeStream(LZ4_stream_t *LZ4_stream) {
if (!LZ4_stream) return 0; /* support free on NULL */
DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
FREEMEM(LZ4_stream);
@ -1387,8 +1377,7 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
#define HASH_UNIT sizeof(reg_t)
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
{
int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize) {
LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
const tableType_t tableType = byU32;
const BYTE *p = (const BYTE *)dictionary;
@ -1463,8 +1452,7 @@ void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dict
}
static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
{
static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, int nextSize) {
assert(nextSize >= 0);
if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
/* rescale hash table */
@ -1486,8 +1474,7 @@ static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream,
const char *source, char *dest,
int inputSize, int maxOutputSize,
int acceleration)
{
int acceleration) {
const tableType_t tableType = byU32;
LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
const BYTE *dictEnd = streamPtr->dictionary + streamPtr->dictSize;
@ -1508,7 +1495,8 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
}
/* Check overlapping input/dictionary space */
{ const BYTE* sourceEnd = (const BYTE*) source + inputSize;
{
const BYTE *sourceEnd = (const BYTE *) source + inputSize;
if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
@ -1526,7 +1514,8 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
}
/* external dictionary mode */
{ int result;
{
int result;
if (streamPtr->dictCtx) {
/* We depend here on the fact that dictCtx'es (produced by
* LZ4_loadDict) guarantee that their tables contain no references
@ -1559,8 +1548,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
/* Hidden debug function, to force-test external dictionary mode */
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
{
int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source, char *dest, int srcSize) {
LZ4_stream_t_internal *streamPtr = &LZ4_dict->internal_donotuse;
int result;
@ -1586,8 +1574,7 @@ int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char*
* dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
* Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
*/
int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
{
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize) {
LZ4_stream_t_internal *const dict = &LZ4_dict->internal_donotuse;
const BYTE *const previousDictEnd = dict->dictionary + dict->dictSize;
@ -1624,8 +1611,7 @@ typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
*/
typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
LZ4_FORCE_INLINE unsigned
read_variable_length(const BYTE**ip, const BYTE* lencheck, int loop_check, int initial_check, variable_length_error* error)
{
read_variable_length(const BYTE **ip, const BYTE *lencheck, int loop_check, int initial_check, variable_length_error *error) {
unsigned length = 0;
unsigned s;
if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
@ -1667,11 +1653,11 @@ LZ4_decompress_generic(
const BYTE *const lowPrefix, /* always <= dst, == dst when no prefix */
const BYTE *const dictStart, /* only if dict==usingExtDict */
const size_t dictSize /* note : = 0 if noDict */
)
{
) {
if (src == NULL) { return -1; }
{ const BYTE* ip = (const BYTE*) src;
{
const BYTE *ip = (const BYTE *) src;
const BYTE *const iend = ip + srcSize;
BYTE *op = (BYTE *) dst;
@ -1742,7 +1728,8 @@ LZ4_decompress_generic(
LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
* it doesn't know input length, and only relies on end-of-block properties */
}
ip += length; op = cpy;
ip += length;
op = cpy;
} else {
cpy = op + length;
if (endOnInput) { /* LZ4_decompress_safe() */
@ -1757,11 +1744,13 @@ LZ4_decompress_generic(
memcpy(op, ip, 8);
if (length > 8) { memcpy(op + 8, ip + 8, 8); }
}
ip += length; op = cpy;
ip += length;
op = cpy;
}
/* get offset */
offset = LZ4_readLE16(ip); ip+=2;
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
assert(match <= op);
@ -1796,7 +1785,9 @@ LZ4_decompress_generic(
memcpy(op + 16, match + 16, 2);
op += length;
continue;
} } }
}
}
}
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
/* match starting within external dictionary */
@ -1806,7 +1797,8 @@ LZ4_decompress_generic(
length = MIN(length, (size_t)(oend - op)); /* reach end of buffer */
} else {
goto _output_error; /* end-of-block condition violated */
} }
}
}
if (length <= (size_t)(lowPrefix - match)) {
/* match fits entirely within external dictionary : just copy */
@ -1825,7 +1817,8 @@ LZ4_decompress_generic(
} else {
memcpy(op, lowPrefix, restSize);
op += restSize;
} }
}
}
continue;
}
@ -1865,12 +1858,14 @@ LZ4_decompress_generic(
&& likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend))) {
/* Copy the literals */
memcpy(op, ip, endOnInput ? 16 : 8);
op += length; ip += length;
op += length;
ip += length;
/* The second stage: prepare for match copying, decode full info.
* If it doesn't work out, the info won't be wasted. */
length = token & ML_MASK; /* match length */
offset = LZ4_readLE16(ip); ip += 2;
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
assert(match <= op); /* check overflow */
@ -1908,8 +1903,7 @@ LZ4_decompress_generic(
#endif
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
if (((endOnInput) && ((cpy > oend - MFLIMIT) || (ip + length > iend - (2 + 1 + LASTLITERALS))))
|| ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
{
|| ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
/* We've either hit the input parsing restriction or the output parsing restriction.
* If we've hit the input parsing condition then this must be the last sequence.
* If we've hit the output parsing condition then we are either using partialDecoding
@ -1956,11 +1950,13 @@ LZ4_decompress_generic(
}
} else {
LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
ip += length; op = cpy;
ip += length;
op = cpy;
}
/* get offset */
offset = LZ4_readLE16(ip); ip+=2;
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
/* get matchlength */
@ -2003,7 +1999,8 @@ LZ4_decompress_generic(
} else {
memcpy(op, lowPrefix, restSize);
op += restSize;
} }
}
}
continue;
}
assert(match >= lowPrefix);
@ -2075,16 +2072,14 @@ LZ4_decompress_generic(
/*===== Instantiate the API decoding functions. =====*/
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
{
int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize) {
return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
endOnInputSize, decode_full_block, noDict,
(BYTE *)dest, NULL, 0);
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
{
int LZ4_decompress_safe_partial(const char *src, char *dst, int compressedSize, int targetOutputSize, int dstCapacity) {
dstCapacity = MIN(targetOutputSize, dstCapacity);
return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
endOnInputSize, partial_decode,
@ -2092,8 +2087,7 @@ int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize,
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
{
int LZ4_decompress_fast(const char *source, char *dest, int originalSize) {
return LZ4_decompress_generic(source, dest, 0, originalSize,
endOnOutputSize, decode_full_block, withPrefix64k,
(BYTE *)dest - 64 KB, NULL, 0);
@ -2102,16 +2096,14 @@ int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
/*===== Instantiate a few more decoding cases, used more than once. =====*/
LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */
int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
{
int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize) {
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
endOnInputSize, decode_full_block, withPrefix64k,
(BYTE *)dest - 64 KB, NULL, 0);
}
/* Another obsolete API function, paired with the previous one. */
int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
{
int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest, int originalSize) {
/* LZ4_decompress_fast doesn't validate match offsets,
* and thus serves well with any prefixed dictionary. */
return LZ4_decompress_fast(source, dest, originalSize);
@ -2119,8 +2111,7 @@ int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int origin
LZ4_FORCE_O2_GCC_PPC64LE
static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest, int compressedSize, int maxOutputSize,
size_t prefixSize)
{
size_t prefixSize) {
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
endOnInputSize, decode_full_block, noDict,
(BYTE *)dest - prefixSize, NULL, 0);
@ -2129,8 +2120,7 @@ static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, i
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
int compressedSize, int maxOutputSize,
const void* dictStart, size_t dictSize)
{
const void *dictStart, size_t dictSize) {
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
endOnInputSize, decode_full_block, usingExtDict,
(BYTE *)dest, (const BYTE *)dictStart, dictSize);
@ -2138,8 +2128,7 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
LZ4_FORCE_O2_GCC_PPC64LE
static int LZ4_decompress_fast_extDict(const char *source, char *dest, int originalSize,
const void* dictStart, size_t dictSize)
{
const void *dictStart, size_t dictSize) {
return LZ4_decompress_generic(source, dest, 0, originalSize,
endOnOutputSize, decode_full_block, usingExtDict,
(BYTE *)dest, (const BYTE *)dictStart, dictSize);
@ -2151,8 +2140,7 @@ static int LZ4_decompress_fast_extDict(const char* source, char* dest, int origi
*/
LZ4_FORCE_INLINE
int LZ4_decompress_safe_doubleDict(const char *source, char *dest, int compressedSize, int maxOutputSize,
size_t prefixSize, const void* dictStart, size_t dictSize)
{
size_t prefixSize, const void *dictStart, size_t dictSize) {
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
endOnInputSize, decode_full_block, usingExtDict,
(BYTE *)dest - prefixSize, (const BYTE *)dictStart, dictSize);
@ -2160,8 +2148,7 @@ int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compresse
LZ4_FORCE_INLINE
int LZ4_decompress_fast_doubleDict(const char *source, char *dest, int originalSize,
size_t prefixSize, const void* dictStart, size_t dictSize)
{
size_t prefixSize, const void *dictStart, size_t dictSize) {
return LZ4_decompress_generic(source, dest, 0, originalSize,
endOnOutputSize, decode_full_block, usingExtDict,
(BYTE *)dest - prefixSize, (const BYTE *)dictStart, dictSize);
@ -2169,15 +2156,13 @@ int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalS
/*===== streaming decompression functions =====*/
LZ4_streamDecode_t* LZ4_createStreamDecode(void)
{
LZ4_streamDecode_t *LZ4_createStreamDecode(void) {
LZ4_streamDecode_t *lz4s = (LZ4_streamDecode_t *) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
return lz4s;
}
int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
{
int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream) {
if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
FREEMEM(LZ4_stream);
return 0;
@ -2189,8 +2174,7 @@ int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
* Loading a size of 0 is allowed (same effect as no dictionary).
* @return : 1 if OK, 0 if error
*/
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
{
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize) {
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
lz4sd->prefixSize = (size_t) dictSize;
lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
@ -2210,8 +2194,7 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti
* @return : minimum ring buffer size,
* or 0 if there is an error (invalid maxBlockSize).
*/
int LZ4_decoderRingBufferSize(int maxBlockSize)
{
int LZ4_decoderRingBufferSize(int maxBlockSize) {
if (maxBlockSize < 0) return 0;
if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
if (maxBlockSize < 16) maxBlockSize = 16;
@ -2226,8 +2209,7 @@ int LZ4_decoderRingBufferSize(int maxBlockSize)
and indicate where it stands using LZ4_setStreamDecode()
*/
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
{
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxOutputSize) {
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
@ -2266,8 +2248,7 @@ int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
{
int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int originalSize) {
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
assert(originalSize >= 0);
@ -2308,8 +2289,7 @@ Advanced decoding functions :
the dictionary must be explicitly provided within parameters
*/
int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
{
int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const char *dictStart, int dictSize) {
if (dictSize == 0)
return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
if (dictStart + dictSize == dest) {
@ -2323,8 +2303,7 @@ int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressed
return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
}
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
{
int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize) {
if (dictSize == 0 || dictStart + dictSize == dest)
return LZ4_decompress_fast(source, dest, originalSize);
assert(dictSize >= 0);
@ -2336,28 +2315,22 @@ int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSi
* Obsolete Functions
***************************************************/
/* obsolete compression functions */
int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
{
int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize, int maxOutputSize) {
return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
}
int LZ4_compress(const char* src, char* dest, int srcSize)
{
int LZ4_compress(const char *src, char *dest, int srcSize) {
return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
}
int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
{
int LZ4_compress_limitedOutput_withState(void *state, const char *src, char *dst, int srcSize, int dstSize) {
return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
}
int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
{
int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize) {
return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
}
int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
{
int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_stream, const char *src, char *dst, int srcSize, int dstCapacity) {
return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
}
int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
{
int LZ4_compress_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize) {
return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
}
@ -2367,12 +2340,10 @@ They are only provided here for compatibility with older user programs.
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
*/
int LZ4_uncompress (const char* source, char* dest, int outputSize)
{
int LZ4_uncompress(const char *source, char *dest, int outputSize) {
return LZ4_decompress_fast(source, dest, outputSize);
}
int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
{
int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize, int maxOutputSize) {
return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
}
@ -2380,21 +2351,18 @@ int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize,
int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
int LZ4_resetStreamState(void* state, char* inputBuffer)
{
int LZ4_resetStreamState(void *state, char *inputBuffer) {
(void)inputBuffer;
LZ4_resetStream((LZ4_stream_t *)state);
return 0;
}
void* LZ4_create (char* inputBuffer)
{
void *LZ4_create(char *inputBuffer) {
(void)inputBuffer;
return LZ4_createStream();
}
char* LZ4_slideInputBuffer (void* state)
{
char *LZ4_slideInputBuffer(void *state) {
/* avoid const char * -> char * conversion warning */
return (char *)(uptrval)((LZ4_stream_t *)state)->internal_donotuse.dictionary;
}

View file

@ -90,14 +90,12 @@ static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)
/**************************************
* HC Compression
**************************************/
static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
{
static void LZ4HC_clearTables(LZ4HC_CCtx_internal *hc4) {
MEM_INIT((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
}
static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
{
static void LZ4HC_init_internal(LZ4HC_CCtx_internal *hc4, const BYTE *start) {
uptrval startingOffset = (uptrval)(hc4->end - hc4->base);
if (startingOffset > 1 GB) {
LZ4HC_clearTables(hc4);
@ -114,8 +112,7 @@ static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
/* Update chains up to ip (excluded) */
LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
{
LZ4_FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4, const BYTE *ip) {
U16 *const chainTable = hc4->chainTable;
U32 *const hashTable = hc4->hashTable;
const BYTE *const base = hc4->base;
@ -138,13 +135,14 @@ LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
* @return : negative value, nb of common bytes before ip/match */
LZ4_FORCE_INLINE
int LZ4HC_countBack(const BYTE *const ip, const BYTE *const match,
const BYTE* const iMin, const BYTE* const mMin)
{
const BYTE *const iMin, const BYTE *const mMin) {
int back = 0;
int const min = (int)MAX(iMin - ip, mMin - match);
assert(min <= 0);
assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
assert(ip >= iMin);
assert((size_t)(ip - iMin) < (1U << 31));
assert(match >= mMin);
assert((size_t)(match - mMin) < (1U << 31));
while ((back > min)
&& (ip[back - 1] == match[back - 1]))
back--;
@ -158,8 +156,7 @@ int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
#endif
static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
{
static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern) {
size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
if (bitsToRotate == 0)
return pattern;
@ -169,8 +166,7 @@ static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
/* LZ4HC_countPattern() :
* pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
static unsigned
LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
{
LZ4HC_countPattern(const BYTE *ip, const BYTE *const iEnd, U32 const pattern32) {
const BYTE *const iStart = ip;
reg_t const pattern = (sizeof(pattern) == 8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32;
@ -184,14 +180,16 @@ LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
if (LZ4_isLittleEndian()) {
reg_t patternByte = pattern;
while ((ip < iEnd) && (*ip == (BYTE)patternByte)) {
ip++; patternByte >>= 8;
ip++;
patternByte >>= 8;
}
} else { /* big endian */
U32 bitOffset = (sizeof(pattern) * 8) - 8;
while (ip < iEnd) {
BYTE const byte = (BYTE)(pattern >> bitOffset);
if (*ip != byte) break;
ip ++; bitOffset -= 8;
ip ++;
bitOffset -= 8;
}
}
@ -202,19 +200,21 @@ LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
* pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
* read using natural platform endianess */
static unsigned
LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
{
LZ4HC_reverseCountPattern(const BYTE *ip, const BYTE *const iLow, U32 pattern) {
const BYTE *const iStart = ip;
while (likely(ip >= iLow + 4)) {
if (LZ4_read32(ip - 4) != pattern) break;
ip -= 4;
}
{ const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */
{
const BYTE *bytePtr = (const BYTE *)(&pattern) + 3; /* works for any endianess */
while (likely(ip > iLow)) {
if (ip[-1] != *bytePtr) break;
ip--; bytePtr--;
} }
ip--;
bytePtr--;
}
}
return (unsigned)(iStart - ip);
}
@ -223,8 +223,7 @@ LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
* 4 byte MINMATCH would overflow.
* @returns true if the match index is okay.
*/
static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
{
static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex) {
return ((U32)((dictLimit - 1) - matchIndex) >= 3);
}
@ -244,8 +243,7 @@ LZ4HC_InsertAndGetWiderMatch (
const int patternAnalysis,
const int chainSwap,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed)
{
const HCfavor_e favorDecSpeed) {
U16 *const chainTable = hc4->chainTable;
U32 *const HashTable = hc4->hashTable;
const LZ4HC_CCtx_internal *const dictCtx = hc4->dictCtx;
@ -290,7 +288,9 @@ LZ4HC_InsertAndGetWiderMatch (
longest = matchLength;
*matchpos = matchPtr + back;
*startpos = ip + back;
} } }
}
}
}
} else { /* lowestMatchIndex <= matchIndex < dictLimit */
const BYTE *const matchPtr = dictBase + matchIndex;
if (LZ4_read32(matchPtr) == pattern) {
@ -307,7 +307,9 @@ LZ4HC_InsertAndGetWiderMatch (
longest = matchLength;
*matchpos = base + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
*startpos = ip + back;
} } }
}
}
}
if (chainSwap && matchLength == longest) { /* better match => select a better chain */
assert(lookBackLength == 0); /* search forward only */
@ -331,9 +333,12 @@ LZ4HC_InsertAndGetWiderMatch (
if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
matchIndex -= distanceToNextMatch;
continue;
} } }
}
}
}
{ U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
{
U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
if (patternAnalysis && distNextMatch == 1 && matchChainPos == 0) {
U32 const matchCandidateIdx = matchIndex - 1;
/* may be a repeated pattern */
@ -344,7 +349,8 @@ LZ4HC_InsertAndGetWiderMatch (
srcPatternLength = LZ4HC_countPattern(ip + sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
} else {
repeat = rep_not;
} }
}
}
if ((repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
&& LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx)) {
const int extDict = matchCandidateIdx < dictLimit;
@ -357,7 +363,8 @@ LZ4HC_InsertAndGetWiderMatch (
U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
}
{ const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
{
const BYTE *const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
size_t currentSegmentLength;
if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
@ -396,13 +403,20 @@ LZ4HC_InsertAndGetWiderMatch (
*matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
*startpos = ip;
}
{ U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
{
U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
if (distToNextPattern > matchIndex) break; /* avoid overflow */
matchIndex -= distToNextPattern;
} } } } }
}
}
}
}
}
continue;
} }
} } /* PA optimization */
}
}
}
} /* PA optimization */
/* follow current chain */
matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos);
@ -431,12 +445,16 @@ LZ4HC_InsertAndGetWiderMatch (
longest = mlt;
*matchpos = base + matchIndex + back;
*startpos = ip + back;
} }
}
}
{ U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
{
U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
dictMatchIndex -= nextOffset;
matchIndex -= nextOffset;
} } }
}
}
}
return longest;
}
@ -447,8 +465,7 @@ int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index tabl
const BYTE **matchpos,
const int maxNbAttempts,
const int patternAnalysis,
const dictCtx_directive dict)
{
const dictCtx_directive dict) {
const BYTE *uselessPtr = ip;
/* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
* but this won't be the case here, as we define iLowLimit==ip,
@ -466,8 +483,7 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
int matchLength,
const BYTE *const match,
limitedOutput_directive limit,
BYTE* oend)
{
BYTE *oend) {
size_t length;
BYTE *const token = (*op)++;
@ -506,7 +522,8 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
/* Encode Offset */
assert((*ip - match) <= LZ4_DISTANCE_MAX); /* note : consider providing offset as a value, rather than as a pointer difference */
LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2;
LZ4_writeLE16(*op, (U16)(*ip - match));
*op += 2;
/* Encode MatchLength */
assert(matchLength >= MINMATCH);
@ -538,8 +555,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
unsigned maxNbAttempts,
const limitedOutput_directive limit,
const dictCtx_directive dict
)
{
) {
const int inputSize = *srcSizePtr;
const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
@ -573,7 +589,9 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
if (ml < MINMATCH) { ip++; continue; }
/* saved, in case we would skip too much */
start0 = ip; ref0 = ref; ml0 = ml;
start0 = ip;
ref0 = ref;
ml0 = ml;
_Search2:
if (ip + ml <= mflimit) {
@ -592,8 +610,11 @@ _Search2:
if (start0 < ip) { /* first match was skipped at least once */
if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */
ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */
} }
ip = start0;
ref = ref0;
ml = ml0; /* restore initial ML1 */
}
}
/* Here, start0==ip */
if ((start2 - ip) < 3) { /* First Match too small : removed */
@ -697,10 +718,14 @@ _Search3:
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
/* ML2 becomes ML1 */
ip = start2; ref = ref2; ml = ml2;
ip = start2;
ref = ref2;
ml = ml2;
/* ML3 becomes ML2 */
start2 = start3; ref2 = ref3; ml2 = ml3;
start2 = start3;
ref2 = ref3;
ml2 = ml3;
/* let's find a new ML3 */
goto _Search3;
@ -708,7 +733,8 @@ _Search3:
_last_literals:
/* Encode Last Literals */
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
{
size_t lastRunSize = (size_t)(iend - anchor); /* literals */
size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
size_t const totalSize = 1 + litLength + lastRunSize;
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
@ -764,8 +790,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
int cLevel,
const limitedOutput_directive limit,
const dictCtx_directive dict
)
{
) {
typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
typedef struct {
lz4hc_strat_e strat;
@ -796,7 +821,8 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
ctx->end += *srcSizePtr;
if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */
cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
{ cParams_t const cParam = clTable[cLevel];
{
cParams_t const cParam = clTable[cLevel];
HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
int result;
@ -828,8 +854,7 @@ LZ4HC_compress_generic_noDictCtx (
int const dstCapacity,
int cLevel,
limitedOutput_directive limit
)
{
) {
assert(ctx->dictCtx == NULL);
return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
}
@ -843,8 +868,7 @@ LZ4HC_compress_generic_dictCtx (
int const dstCapacity,
int cLevel,
limitedOutput_directive limit
)
{
) {
const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
assert(ctx->dictCtx != NULL);
if (position >= 64 KB) {
@ -869,8 +893,7 @@ LZ4HC_compress_generic (
int const dstCapacity,
int cLevel,
limitedOutput_directive limit
)
{
) {
if (ctx->dictCtx == NULL) {
return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
} else {
@ -884,8 +907,7 @@ int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
* it reports an aligment of 8-bytes,
* while actually aligning LZ4_streamHC_t on 4 bytes. */
static size_t LZ4_streamHC_t_alignment(void)
{
static size_t LZ4_streamHC_t_alignment(void) {
struct { char c; LZ4_streamHC_t t; } t_a;
return sizeof(t_a) - sizeof(t_a.t);
}
@ -893,8 +915,7 @@ static size_t LZ4_streamHC_t_alignment(void)
/* state is presumed correctly initialized,
* in which case its size and alignment have already been validate */
int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
{
int LZ4_compress_HC_extStateHC_fastReset(void *state, const char *src, char *dst, int srcSize, int dstCapacity, int compressionLevel) {
LZ4HC_CCtx_internal *const ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
* it reports an aligment of 8-bytes,
@ -910,15 +931,13 @@ int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* ds
return LZ4HC_compress_generic(ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
}
int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
{
int LZ4_compress_HC_extStateHC(void *state, const char *src, char *dst, int srcSize, int dstCapacity, int compressionLevel) {
LZ4_streamHC_t *const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
if (ctx == NULL) return 0; /* init failure */
return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
}
int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
{
int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity, int compressionLevel) {
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
LZ4_streamHC_t *const statePtr = (LZ4_streamHC_t *)ALLOC(sizeof(LZ4_streamHC_t));
#else
@ -933,8 +952,7 @@ int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, in
}
/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
{
int LZ4_compress_HC_destSize(void *state, const char *source, char *dest, int *sourceSizePtr, int targetDestSize, int cLevel) {
LZ4_streamHC_t *const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
if (ctx == NULL) return 0; /* init failure */
LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE *) source);
@ -948,16 +966,14 @@ int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* s
* Streaming Functions
**************************************/
/* allocation */
LZ4_streamHC_t* LZ4_createStreamHC(void)
{
LZ4_streamHC_t *LZ4_createStreamHC(void) {
LZ4_streamHC_t *const LZ4_streamHCPtr = (LZ4_streamHC_t *)ALLOC(sizeof(LZ4_streamHC_t));
if (LZ4_streamHCPtr == NULL) return NULL;
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); /* full initialization, malloc'ed buffer can be full of garbage */
return LZ4_streamHCPtr;
}
int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
{
int LZ4_freeStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr) {
DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
if (!LZ4_streamHCPtr) return 0; /* support free on NULL */
FREEMEM(LZ4_streamHCPtr);
@ -965,8 +981,7 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
}
LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
{
LZ4_streamHC_t *LZ4_initStreamHC(void *buffer, size_t size) {
LZ4_streamHC_t *const LZ4_streamHCPtr = (LZ4_streamHC_t *)buffer;
if (buffer == NULL) return NULL;
if (size < sizeof(LZ4_streamHC_t)) return NULL;
@ -989,14 +1004,12 @@ LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
}
/* just a stub */
void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
{
void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel) {
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
}
void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
{
void LZ4_resetStreamHC_fast(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel) {
DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
if (LZ4_streamHCPtr->internal_donotuse.dirty) {
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
@ -1009,24 +1022,21 @@ void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLev
LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
}
void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
{
void LZ4_setCompressionLevel(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel) {
DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel);
if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
}
void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
{
void LZ4_favorDecompressionSpeed(LZ4_streamHC_t *LZ4_streamHCPtr, int favor) {
LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor != 0);
}
/* LZ4_loadDictHC() :
* LZ4_streamHCPtr is presumed properly initialized */
int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
const char* dictionary, int dictSize)
{
const char *dictionary, int dictSize) {
LZ4HC_CCtx_internal *const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
DEBUGLOG(4, "LZ4_loadDictHC(%p, %p, %d)", LZ4_streamHCPtr, dictionary, dictSize);
assert(LZ4_streamHCPtr != NULL);
@ -1035,7 +1045,8 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
dictSize = 64 KB;
}
/* need a full initialization, there are bad side-effects when using resetFast() */
{ int const cLevel = ctxPtr->compressionLevel;
{
int const cLevel = ctxPtr->compressionLevel;
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
}
@ -1051,8 +1062,7 @@ void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC
/* compression */
static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
{
static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal *ctxPtr, const BYTE *newBlock) {
DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
LZ4HC_Insert(ctxPtr, ctxPtr->end - 3); /* Referencing remaining dictionary content */
@ -1072,8 +1082,7 @@ static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBl
static int LZ4_compressHC_continue_generic(LZ4_streamHC_t *LZ4_streamHCPtr,
const char *src, char *dst,
int *srcSizePtr, int dstCapacity,
limitedOutput_directive limit)
{
limitedOutput_directive limit) {
LZ4HC_CCtx_internal *const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
DEBUGLOG(4, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d)",
LZ4_streamHCPtr, src, *srcSizePtr);
@ -1093,7 +1102,8 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
LZ4HC_setExternalDict(ctxPtr, (const BYTE *)src);
/* Check overlapping input/dictionary space */
{ const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
{
const BYTE *sourceEnd = (const BYTE *) src + *srcSizePtr;
const BYTE *const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
const BYTE *const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
if ((sourceEnd > dictBegin) && ((const BYTE *)src < dictEnd)) {
@ -1106,16 +1116,14 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
return LZ4HC_compress_generic(ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
}
int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
{
int LZ4_compress_HC_continue(LZ4_streamHC_t *LZ4_streamHCPtr, const char *src, char *dst, int srcSize, int dstCapacity) {
if (dstCapacity < LZ4_compressBound(srcSize))
return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
else
return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
}
int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
{
int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t *LZ4_streamHCPtr, const char *src, char *dst, int *srcSizePtr, int targetDestSize) {
return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
}
@ -1123,8 +1131,7 @@ int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const ch
/* dictionary saving */
int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
{
int LZ4_saveDictHC(LZ4_streamHC_t *LZ4_streamHCPtr, char *safeBuffer, int dictSize) {
LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
DEBUGLOG(4, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
@ -1132,7 +1139,8 @@ int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictS
if (dictSize < 4) dictSize = 0;
if (dictSize > prefixSize) dictSize = prefixSize;
memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
{
U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
streamPtr->end = (const BYTE *)safeBuffer + dictSize;
streamPtr->base = streamPtr->end - endIndex;
streamPtr->dictLimit = endIndex - (U32)dictSize;
@ -1167,41 +1175,35 @@ int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
* @return : 0 on success, !=0 if error */
int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
{
int LZ4_resetStreamStateHC(void *state, char *inputBuffer) {
LZ4_streamHC_t *const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
if (hc4 == NULL) return 1; /* init failed */
LZ4HC_init_internal(&hc4->internal_donotuse, (const BYTE *)inputBuffer);
return 0;
}
void* LZ4_createHC (const char* inputBuffer)
{
void *LZ4_createHC(const char *inputBuffer) {
LZ4_streamHC_t *const hc4 = LZ4_createStreamHC();
if (hc4 == NULL) return NULL; /* not enough memory */
LZ4HC_init_internal(&hc4->internal_donotuse, (const BYTE *)inputBuffer);
return hc4;
}
int LZ4_freeHC (void* LZ4HC_Data)
{
int LZ4_freeHC(void *LZ4HC_Data) {
if (!LZ4HC_Data) return 0; /* support free on NULL */
FREEMEM(LZ4HC_Data);
return 0;
}
int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
{
int LZ4_compressHC2_continue(void *LZ4HC_Data, const char *src, char *dst, int srcSize, int cLevel) {
return LZ4HC_compress_generic(&((LZ4_streamHC_t *)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
}
int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
{
int LZ4_compressHC2_limitedOutput_continue(void *LZ4HC_Data, const char *src, char *dst, int srcSize, int dstCapacity, int cLevel) {
return LZ4HC_compress_generic(&((LZ4_streamHC_t *)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
}
char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
{
char *LZ4_slideInputBufferHC(void *LZ4HC_Data) {
LZ4_streamHC_t *ctx = (LZ4_streamHC_t *)LZ4HC_Data;
const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
@ -1221,8 +1223,7 @@ typedef struct {
} LZ4HC_optimal_t;
/* price in bytes */
LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
{
LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen) {
int price = litlen;
assert(litlen >= 0);
if (litlen >= (int)RUN_MASK)
@ -1232,8 +1233,7 @@ LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
/* requires mlen >= MINMATCH */
LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
{
LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) {
int price = 1 + 2 ; /* token + 16-bit offset */
assert(litlen >= 0);
assert(mlen >= MINMATCH);
@ -1257,8 +1257,7 @@ LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
const BYTE *ip, const BYTE *const iHighLimit,
int minLen, int nbSearches,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed)
{
const HCfavor_e favorDecSpeed) {
LZ4HC_match_t match = { 0, 0 };
const BYTE *matchPtr = NULL;
/* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
@ -1285,8 +1284,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
const limitedOutput_directive limit,
int const fullUpdate,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed)
{
const HCfavor_e favorDecSpeed) {
#define TRAILING_LITERALS 3
LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
@ -1326,7 +1324,8 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
}
/* set prices for first positions (literals) */
{ int rPos;
{
int rPos;
for (rPos = 0 ; rPos < MINMATCH ; rPos++) {
int const cost = LZ4HC_literalsPrice(llen + rPos);
opt[rPos].mlen = 1;
@ -1335,9 +1334,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
opt[rPos].price = cost;
DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
rPos, cost, opt[rPos].litlen);
} }
}
}
/* set prices using initial match */
{ int mlen = MINMATCH;
{
int mlen = MINMATCH;
int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
int const offset = firstMatch.off;
assert(matchML < LZ4_OPT_NUM);
@ -1349,9 +1350,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
opt[mlen].price = cost;
DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
mlen, cost, mlen);
} }
}
}
last_match_pos = firstMatch.len;
{ int addLit;
{
int addLit;
for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
opt[last_match_pos + addLit].mlen = 1; /* literal */
opt[last_match_pos + addLit].off = 0;
@ -1359,7 +1362,8 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
opt[last_match_pos + addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
last_match_pos + addLit, opt[last_match_pos + addLit].price, addLit);
} }
}
}
/* check further positions */
for (cur = 1; cur < last_match_pos; cur++) {
@ -1398,7 +1402,8 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
}
/* before match : set price with literals at beginning */
{ int const baseLitlen = opt[cur].litlen;
{
int const baseLitlen = opt[cur].litlen;
int litlen;
for (litlen = 1; litlen < MINMATCH; litlen++) {
int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen + litlen);
@ -1410,10 +1415,13 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
opt[pos].price = price;
DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)",
pos, price, opt[pos].litlen);
} } }
}
}
}
/* set prices using match at position = cur */
{ int const matchML = newMatch.len;
{
int const matchML = newMatch.len;
int ml = MINMATCH;
assert(cur + newMatch.len < LZ4_OPT_NUM);
@ -1446,16 +1454,20 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
opt[pos].off = offset;
opt[pos].litlen = ll;
opt[pos].price = price;
} } }
}
}
}
/* complete following positions with literals */
{ int addLit;
{
int addLit;
for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
opt[last_match_pos + addLit].mlen = 1; /* literal */
opt[last_match_pos + addLit].off = 0;
opt[last_match_pos + addLit].litlen = addLit;
opt[last_match_pos + addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos + addLit, opt[last_match_pos + addLit].price, addLit);
} }
}
}
} /* for (cur = 1; cur <= last_match_pos; cur++) */
assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS);
@ -1467,7 +1479,8 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
assert(cur < LZ4_OPT_NUM);
assert(last_match_pos >= 1); /* == 1 when only one candidate */
DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
{ int candidate_pos = cur;
{
int candidate_pos = cur;
int selected_matchLength = best_mlen;
int selected_offset = best_off;
while (1) { /* from end to beginning */
@ -1481,10 +1494,12 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */
assert(next_matchLength > 0); /* can be 1, means literal */
candidate_pos -= next_matchLength;
} }
}
}
/* encode all recorded sequences in order */
{ int rPos = 0; /* relative position (to ip) */
{
int rPos = 0; /* relative position (to ip) */
while (rPos < last_match_pos) {
int const ml = opt[rPos].mlen;
int const offset = opt[rPos].off;
@ -1495,12 +1510,14 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
opSaved = op;
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend)) /* updates ip, op and anchor */
goto _dest_overflow;
} }
}
}
} /* while (ip <= mflimit) */
_last_literals:
/* Encode Last Literals */
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
{
size_t lastRunSize = (size_t)(iend - anchor); /* literals */
size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
size_t const totalSize = 1 + litLength + lastRunSize;
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */

View file

@ -202,8 +202,7 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
#include <stdint.h>
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
struct LZ4HC_CCtx_internal
{
struct LZ4HC_CCtx_internal {
uint32_t hashTable[LZ4HC_HASHTABLESIZE];
uint16_t chainTable[LZ4HC_MAXD];
const uint8_t *end; /* next block here to continue on current prefix */
@ -222,8 +221,7 @@ struct LZ4HC_CCtx_internal
#else
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
struct LZ4HC_CCtx_internal
{
struct LZ4HC_CCtx_internal {
unsigned int hashTable[LZ4HC_HASHTABLESIZE];
unsigned short chainTable[LZ4HC_MAXD];
const unsigned char *end; /* next block here to continue on current prefix */