Update to Zstandard 1.4.4

The full release notes can be found on Github:

  https://github.com/facebook/zstd/releases/tag/v1.4.4

Notable changes in this release include improved decompression speed (about
10%).  See the Github release notes for more details.

MFC after:	I'm not going to, but feel free
Relnotes:	yes
This commit is contained in:
Conrad Meyer 2019-11-16 16:39:08 +00:00
commit 9cbefe25d4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=354777
117 changed files with 4371 additions and 2334 deletions

View File

@ -1,3 +1,34 @@
v1.4.4
perf: Improved decompression speed, by > 10%, by @terrelln
perf: Better compression speed when re-using a context, by @felixhandte
perf: Fix compression ratio when compressing large files with small dictionary, by @senhuang42
perf: zstd reference encoder can generate RLE blocks, by @bimbashrestha
perf: minor generic speed optimization, by @davidbolvansky
api: new ability to extract sequences from the parser for analysis, by @bimbashrestha
api: fixed decoding of magic-less frames, by @terrelln
api: fixed ZSTD_initCStream_advanced() performance with fast modes, reported by @QrczakMK
cli: Named pipes support, by @bimbashrestha
cli: short tar's extension support, by @stokito
cli: command --output-dir-flat= , generates target files into requested directory, by @senhuang42
cli: commands --stream-size=# and --size-hint=#, by @nmagerko
cli: command --exclude-compressed, by @shashank0791
cli: faster `-t` test mode
cli: improved some error messages, by @vangyzen
cli: rare deadlock condition within dictionary builder, by @terrelln
build: single-file decoder with emscripten compilation script, by @cwoffenden
build: fixed zlibWrapper compilation on Visual Studio, reported by @bluenlive
build: fixed deprecation warning for certain gcc version, reported by @jasonma163
build: fix compilation on old gcc versions, by @cemeyer
build: improved installation directories for cmake script, by Dmitri Shubin
pack: modified pkgconfig, for better integration into openwrt, requested by @neheb
misc: Improved documentation : ZSTD_CLEVEL, DYNAMIC_BMI2, ZSTD_CDict, function deprecation, zstd format
misc: fixed educational decoder : accept larger literals section, and removed UNALIGNED() macro
v1.4.3
bug: Fix Dictionary Compression Ratio Regression by @cyan4973 (#1709)
bug: Fix Buffer Overflow in legacy v0.3 decompression by @felixhandte (#1722)
build: Add support for IAR C/C++ Compiler for Arm by @joseph0918 (#1705)
v1.4.2
bug: Fix bug in zstd-0.5 decoder by @terrelln (#1696)
bug: Fix seekable decompression in-memory API by @iburinoc (#1695)

View File

@ -69,6 +69,7 @@ test: MOREFLAGS += -g -DDEBUGLEVEL=$(DEBUGLEVEL) -Werror
test:
MOREFLAGS="$(MOREFLAGS)" $(MAKE) -j -C $(PRGDIR) allVariants
$(MAKE) -C $(TESTDIR) $@
ZSTD=../../programs/zstd $(MAKE) -C doc/educational_decoder test
## shortest: same as `make check`
.PHONY: shortest
@ -99,8 +100,8 @@ man:
contrib: lib
$(MAKE) -C contrib/pzstd all
$(MAKE) -C contrib/seekable_format/examples all
$(MAKE) -C contrib/adaptive-compression all
$(MAKE) -C contrib/largeNbDicts all
cd contrib/single_file_decoder/ ; ./build_test.sh
.PHONY: cleanTabs
cleanTabs:
@ -116,7 +117,6 @@ clean:
@$(MAKE) -C contrib/gen_html $@ > $(VOID)
@$(MAKE) -C contrib/pzstd $@ > $(VOID)
@$(MAKE) -C contrib/seekable_format/examples $@ > $(VOID)
@$(MAKE) -C contrib/adaptive-compression $@ > $(VOID)
@$(MAKE) -C contrib/largeNbDicts $@ > $(VOID)
@$(RM) zstd$(EXT) zstdmt$(EXT) tmp*
@$(RM) -r lz4

View File

@ -15,6 +15,7 @@ a list of known ports and bindings is provided on [Zstandard homepage](http://ww
[![Build status][AppveyorDevBadge]][AppveyorLink]
[![Build status][CircleDevBadge]][CircleLink]
[![Build status][CirrusDevBadge]][CirrusLink]
[![Fuzzing Status][OSSFuzzBadge]][OSSFuzzLink]
[travisDevBadge]: https://travis-ci.org/facebook/zstd.svg?branch=dev "Continuous Integration test suite"
[travisLink]: https://travis-ci.org/facebook/zstd
@ -24,6 +25,8 @@ a list of known ports and bindings is provided on [Zstandard homepage](http://ww
[CircleLink]: https://circleci.com/gh/facebook/zstd
[CirrusDevBadge]: https://api.cirrus-ci.com/github/facebook/zstd.svg?branch=dev
[CirrusLink]: https://cirrus-ci.com/github/facebook/zstd
[OSSFuzzBadge]: https://oss-fuzz-build-logs.storage.googleapis.com/badges/zstd.svg
[OSSFuzzLink]: https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:zstd
## Benchmarks

View File

@ -1,3 +1,7 @@
# Following tests are run _only_ on master branch
# To reproduce these tests, it's possible to push into a branch `appveyorTest`
# or a branch `visual*`, they will intentionnally trigger `master` tests
-
version: 1.0.{build}
branches:
@ -169,13 +173,16 @@
sh -e playTests.sh --test-large-data &&
fullbench.exe -i1 &&
fullbench.exe -i1 -P0 &&
fuzzer_VS2008_%PLATFORM%_Release.exe %FUZZERTEST% &&
fuzzer_VS2010_%PLATFORM%_Release.exe %FUZZERTEST% &&
fuzzer_VS2012_%PLATFORM%_Release.exe %FUZZERTEST% &&
fuzzer_VS2013_%PLATFORM%_Release.exe %FUZZERTEST% &&
fuzzer_VS2015_%PLATFORM%_Release.exe %FUZZERTEST%
)
# The following tests are for regular pushes
# into `dev` or some feature branch
# There run less tests, for shorter feedback loop
-
version: 1.0.{build}
environment:
@ -249,3 +256,11 @@
COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe &&
COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\
)
test_script:
- ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION%
- if [%HOST%]==[mingw] (
set "CC=%COMPILER%" &&
make check
)

View File

@ -1,3 +0,0 @@
# make artefact
gen_html
zstd_manual.html

View File

@ -1,2 +0,0 @@
# compilation result
pzstd

View File

@ -1,5 +0,0 @@
seekable_compression
seekable_decompression
seekable_decompression_mem
parallel_processing
parallel_compression

View File

@ -1,15 +1,33 @@
# ################################################################
# Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under both the BSD-style license (found in the
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
# in the COPYING file in the root directory of this source tree).
# ################################################################
ZSTD ?= zstd # note: requires zstd installation on local system
UNAME?= $(shell uname)
ifeq ($(UNAME), SunOS)
DIFF ?= gdiff
else
DIFF ?= diff
endif
HARNESS_FILES=*.c
MULTITHREAD_LDFLAGS = -pthread
DEBUGFLAGS= -g -DZSTD_DEBUG=1
CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
-I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
CFLAGS ?= -O3
CFLAGS ?= -O2
CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef \
-Wstrict-aliasing=1 -Wswitch-enum \
-Wredundant-decls -Wstrict-prototypes -Wundef \
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
-Wredundant-decls
-std=c99
CFLAGS += $(DEBUGFLAGS)
CFLAGS += $(MOREFLAGS)
FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(MULTITHREAD_LDFLAGS)
@ -18,17 +36,27 @@ harness: $(HARNESS_FILES)
$(CC) $(FLAGS) $^ -o $@
clean:
@$(RM) -f harness
@$(RM) -rf harness.dSYM
@$(RM) harness
@$(RM) -rf harness.dSYM # MacOS specific
test: harness
@zstd README.md -o tmp.zst
#
# Testing single-file decompression with educational decoder
#
@$(ZSTD) -f README.md -o tmp.zst
@./harness tmp.zst tmp
@diff -s tmp README.md
@$(RM) -f tmp*
@zstd --train harness.c zstd_decompress.c zstd_decompress.h README.md
@zstd -D dictionary README.md -o tmp.zst
@$(DIFF) -s tmp README.md
@$(RM) tmp*
#
# Testing dictionary decompression with education decoder
#
# note : files are presented multiple for training, to reach minimum threshold
@$(ZSTD) --train harness.c zstd_decompress.c zstd_decompress.h README.md \
harness.c zstd_decompress.c zstd_decompress.h README.md \
harness.c zstd_decompress.c zstd_decompress.h README.md \
-o dictionary
@$(ZSTD) -f README.md -D dictionary -o tmp.zst
@./harness tmp.zst tmp dictionary
@diff -s tmp README.md
@$(RM) -f tmp* dictionary
@make clean
@$(DIFF) -s tmp README.md
@$(RM) tmp* dictionary
@$(MAKE) clean

View File

@ -21,88 +21,90 @@ typedef unsigned char u8;
// Protect against allocating too much memory for output
#define MAX_OUTPUT_SIZE ((size_t)1024 * 1024 * 1024)
u8 *input;
u8 *output;
u8 *dict;
size_t read_file(const char *path, u8 **ptr) {
FILE *f = fopen(path, "rb");
static size_t read_file(const char *path, u8 **ptr)
{
FILE* const f = fopen(path, "rb");
if (!f) {
fprintf(stderr, "failed to open file %s\n", path);
fprintf(stderr, "failed to open file %s \n", path);
exit(1);
}
fseek(f, 0L, SEEK_END);
size_t size = ftell(f);
size_t const size = (size_t)ftell(f);
rewind(f);
*ptr = malloc(size);
if (!ptr) {
fprintf(stderr, "failed to allocate memory to hold %s\n", path);
fprintf(stderr, "failed to allocate memory to hold %s \n", path);
exit(1);
}
size_t pos = 0;
while (!feof(f)) {
size_t read = fread(&(*ptr)[pos], 1, size, f);
if (ferror(f)) {
fprintf(stderr, "error while reading file %s\n", path);
exit(1);
}
pos += read;
size_t const read = fread(*ptr, 1, size, f);
if (read != size) { /* must read everything in one pass */
fprintf(stderr, "error while reading file %s \n", path);
exit(1);
}
fclose(f);
return pos;
return read;
}
void write_file(const char *path, const u8 *ptr, size_t size) {
FILE *f = fopen(path, "wb");
static void write_file(const char *path, const u8 *ptr, size_t size)
{
FILE* const f = fopen(path, "wb");
if (!f) {
fprintf(stderr, "failed to open file %s \n", path);
exit(1);
}
size_t written = 0;
while (written < size) {
written += fwrite(&ptr[written], 1, size, f);
written += fwrite(ptr+written, 1, size, f);
if (ferror(f)) {
fprintf(stderr, "error while writing file %s\n", path);
exit(1);
}
}
} }
fclose(f);
}
int main(int argc, char **argv) {
int main(int argc, char **argv)
{
if (argc < 3) {
fprintf(stderr, "usage: %s <file.zst> <out_path> [dictionary]\n",
fprintf(stderr, "usage: %s <file.zst> <out_path> [dictionary] \n",
argv[0]);
return 1;
}
size_t input_size = read_file(argv[1], &input);
u8* input;
size_t const input_size = read_file(argv[1], &input);
u8* dict = NULL;
size_t dict_size = 0;
if (argc >= 4) {
dict_size = read_file(argv[3], &dict);
}
size_t decompressed_size = ZSTD_get_decompressed_size(input, input_size);
if (decompressed_size == (size_t)-1) {
decompressed_size = MAX_COMPRESSION_RATIO * input_size;
size_t out_capacity = ZSTD_get_decompressed_size(input, input_size);
if (out_capacity == (size_t)-1) {
out_capacity = MAX_COMPRESSION_RATIO * input_size;
fprintf(stderr, "WARNING: Compressed data does not contain "
"decompressed size, going to assume the compression "
"ratio is at most %d (decompressed size of at most "
"%zu)\n",
MAX_COMPRESSION_RATIO, decompressed_size);
"%u) \n",
MAX_COMPRESSION_RATIO, (unsigned)out_capacity);
}
if (decompressed_size > MAX_OUTPUT_SIZE) {
if (out_capacity > MAX_OUTPUT_SIZE) {
fprintf(stderr,
"Required output size too large for this implementation\n");
"Required output size too large for this implementation \n");
return 1;
}
output = malloc(decompressed_size);
u8* const output = malloc(out_capacity);
if (!output) {
fprintf(stderr, "failed to allocate memory\n");
fprintf(stderr, "failed to allocate memory \n");
return 1;
}
@ -110,16 +112,17 @@ int main(int argc, char **argv) {
if (dict) {
parse_dictionary(parsed_dict, dict, dict_size);
}
size_t decompressed =
ZSTD_decompress_with_dict(output, decompressed_size,
input, input_size, parsed_dict);
size_t const decompressed_size =
ZSTD_decompress_with_dict(output, out_capacity,
input, input_size,
parsed_dict);
free_dictionary(parsed_dict);
write_file(argv[2], output, decompressed);
write_file(argv[2], output, decompressed_size);
free(input);
free(output);
free(dict);
input = output = dict = NULL;
return 0;
}

View File

@ -395,7 +395,7 @@ size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len,
/* this decoder assumes decompression of a single frame */
decode_frame(&out, &in, parsed_dict);
return out.ptr - (u8 *)dst;
return (size_t)(out.ptr - (u8 *)dst);
}
/******* FRAME DECODING ******************************************************/
@ -416,7 +416,7 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out,
static void decode_frame(ostream_t *const out, istream_t *const in,
const dictionary_t *const dict) {
const u32 magic_number = IO_read_bits(in, 32);
const u32 magic_number = (u32)IO_read_bits(in, 32);
// Zstandard frame
//
// "Magic_Number
@ -497,7 +497,7 @@ static void parse_frame_header(frame_header_t *const header,
// 3 Reserved_bit
// 2 Content_Checksum_flag
// 1-0 Dictionary_ID_flag"
const u8 descriptor = IO_read_bits(in, 8);
const u8 descriptor = (u8)IO_read_bits(in, 8);
// decode frame header descriptor into flags
const u8 frame_content_size_flag = descriptor >> 6;
@ -521,7 +521,7 @@ static void parse_frame_header(frame_header_t *const header,
//
// Bit numbers 7-3 2-0
// Field name Exponent Mantissa"
u8 window_descriptor = IO_read_bits(in, 8);
u8 window_descriptor = (u8)IO_read_bits(in, 8);
u8 exponent = window_descriptor >> 3;
u8 mantissa = window_descriptor & 7;
@ -541,7 +541,7 @@ static void parse_frame_header(frame_header_t *const header,
const int bytes_array[] = {0, 1, 2, 4};
const int bytes = bytes_array[dictionary_id_flag];
header->dictionary_id = IO_read_bits(in, bytes * 8);
header->dictionary_id = (u32)IO_read_bits(in, bytes * 8);
} else {
header->dictionary_id = 0;
}
@ -633,8 +633,8 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out,
//
// The next 2 bits represent the Block_Type, while the remaining 21 bits
// represent the Block_Size. Format is little-endian."
last_block = IO_read_bits(in, 1);
const int block_type = IO_read_bits(in, 2);
last_block = (int)IO_read_bits(in, 1);
const int block_type = (int)IO_read_bits(in, 2);
const size_t block_len = IO_read_bits(in, 21);
switch (block_type) {
@ -748,8 +748,8 @@ static size_t decode_literals(frame_context_t *const ctx, istream_t *const in,
// types"
//
// size_format takes between 1 and 2 bits
int block_type = IO_read_bits(in, 2);
int size_format = IO_read_bits(in, 2);
int block_type = (int)IO_read_bits(in, 2);
int size_format = (int)IO_read_bits(in, 2);
if (block_type <= 1) {
// Raw or RLE literals block
@ -833,6 +833,7 @@ static size_t decode_literals_compressed(frame_context_t *const ctx,
// bits (0-1023)."
num_streams = 1;
// Fall through as it has the same size format
/* fallthrough */
case 1:
// "4 streams. Both Compressed_Size and Regenerated_Size use 10 bits
// (0-1023)."
@ -855,8 +856,7 @@ static size_t decode_literals_compressed(frame_context_t *const ctx,
// Impossible
IMPOSSIBLE();
}
if (regenerated_size > MAX_LITERALS_SIZE ||
compressed_size >= regenerated_size) {
if (regenerated_size > MAX_LITERALS_SIZE) {
CORRUPTION();
}
@ -1005,7 +1005,7 @@ static const i16 SEQ_MATCH_LENGTH_DEFAULT_DIST[53] = {
static const u32 SEQ_LITERAL_LENGTH_BASELINES[36] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40,
48, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65538};
48, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
static const u8 SEQ_LITERAL_LENGTH_EXTRA_BITS[36] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
@ -1021,7 +1021,7 @@ static const u8 SEQ_MATCH_LENGTH_EXTRA_BITS[53] = {
2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
/// Offset decoding is simpler so we just need a maximum code value
static const u8 SEQ_MAX_CODES[3] = {35, -1, 52};
static const u8 SEQ_MAX_CODES[3] = {35, (u8)-1, 52};
static void decompress_sequences(frame_context_t *const ctx,
istream_t *const in,
@ -1132,7 +1132,7 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in,
// a single 1-bit and then fills the byte with 0-7 0 bits of padding."
const int padding = 8 - highest_set_bit(src[len - 1]);
// The offset starts at the end because FSE streams are read backwards
i64 bit_offset = len * 8 - padding;
i64 bit_offset = (i64)(len * 8 - (size_t)padding);
// "The bitstream starts with initial state values, each using the required
// number of bits in their respective accuracy, decoded previously from
@ -1409,7 +1409,7 @@ size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) {
// get decompressed size from ZSTD frame header
{
const u32 magic_number = IO_read_bits(&in, 32);
const u32 magic_number = (u32)IO_read_bits(&in, 32);
if (magic_number == 0xFD2FB528U) {
// ZSTD frame
@ -1418,7 +1418,7 @@ size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) {
if (header.frame_content_size == 0 && !header.single_segment_flag) {
// Content size not provided, we can't tell
return -1;
return (size_t)-1;
}
return header.frame_content_size;
@ -1529,7 +1529,7 @@ void free_dictionary(dictionary_t *const dict) {
/******* END DICTIONARY PARSING ***********************************************/
/******* IO STREAM OPERATIONS *************************************************/
#define UNALIGNED() ERROR("Attempting to operate on a non-byte aligned stream")
/// Reads `num` bits from a bitstream, and updates the internal offset
static inline u64 IO_read_bits(istream_t *const in, const int num_bits) {
if (num_bits > 64 || num_bits <= 0) {
@ -1608,7 +1608,7 @@ static inline const u8 *IO_get_read_ptr(istream_t *const in, size_t len) {
INP_SIZE();
}
if (in->bit_offset != 0) {
UNALIGNED();
ERROR("Attempting to operate on a non-byte aligned stream");
}
const u8 *const ptr = in->ptr;
in->ptr += len;
@ -1634,7 +1634,7 @@ static inline void IO_advance_input(istream_t *const in, size_t len) {
INP_SIZE();
}
if (in->bit_offset != 0) {
UNALIGNED();
ERROR("Attempting to operate on a non-byte aligned stream");
}
in->ptr += len;

View File

@ -7,6 +7,8 @@
* in the COPYING file in the root directory of this source tree).
*/
#include <stddef.h> /* size_t */
/******* EXPOSED TYPES ********************************************************/
/*
* Contains the parsed contents of a dictionary
@ -39,7 +41,7 @@ size_t ZSTD_get_decompressed_size(const void *const src, const size_t src_len);
* Return a valid dictionary_t pointer for use with dictionary initialization
* or decompression
*/
dictionary_t* create_dictionary();
dictionary_t* create_dictionary(void);
/*
* Parse a provided dictionary blob for use in decompression

View File

@ -16,7 +16,7 @@ Distribution of this document is unlimited.
### Version
0.3.2 (17/07/19)
0.3.4 (16/08/19)
Introduction
@ -358,6 +358,7 @@ It may be followed by an optional `Content_Checksum`
__`Block_Type`__
The next 2 bits represent the `Block_Type`.
`Block_Type` influences the meaning of `Block_Size`.
There are 4 block types :
| Value | 0 | 1 | 2 | 3 |
@ -384,9 +385,12 @@ There are 4 block types :
__`Block_Size`__
The upper 21 bits of `Block_Header` represent the `Block_Size`.
`Block_Size` is the size of the block excluding the header.
A block can contain any number of bytes (even zero), up to
`Block_Maximum_Decompressed_Size`, which is the smallest of:
When `Block_Type` is `Compressed_Block` or `Raw_Block`,
`Block_Size` is the size of `Block_Content`, hence excluding `Block_Header`.
When `Block_Type` is `RLE_Block`, `Block_Content`s size is always 1,
and `Block_Size` represents the number of times this byte must be repeated.
A block can contain and decompress into any number of bytes (even zero),
up to `Block_Maximum_Decompressed_Size`, which is the smallest of:
- Window_Size
- 128 KB
@ -1103,18 +1107,18 @@ It follows the following build rule :
The table has a size of `Table_Size = 1 << Accuracy_Log`.
Each cell describes the symbol decoded,
and instructions to get the next state.
and instructions to get the next state (`Number_of_Bits` and `Baseline`).
Symbols are scanned in their natural order for "less than 1" probabilities.
Symbols with this probability are being attributed a single cell,
starting from the end of the table and retreating.
These symbols define a full state reset, reading `Accuracy_Log` bits.
All remaining symbols are allocated in their natural order.
Starting from symbol `0` and table position `0`,
Then, all remaining symbols, sorted in natural order, are allocated cells.
Starting from symbol `0` (if it exists), and table position `0`,
each symbol gets allocated as many cells as its probability.
Cell allocation is spreaded, not linear :
each successor position follow this rule :
each successor position follows this rule :
```
position += (tableSize>>1) + (tableSize>>3) + 3;
@ -1126,40 +1130,41 @@ A position is skipped if already occupied by a "less than 1" probability symbol.
each position in the table, switching to the next symbol when enough
states have been allocated to the current one.
The result is a list of state values.
Each state will decode the current symbol.
The process guarantees that the table is entirely filled.
Each cell corresponds to a state value, which contains the symbol being decoded.
To get the `Number_of_Bits` and `Baseline` required for next state,
it's first necessary to sort all states in their natural order.
The lower states will need 1 more bit than higher ones.
To add the `Number_of_Bits` and `Baseline` required to retrieve next state,
it's first necessary to sort all occurrences of each symbol in state order.
Lower states will need 1 more bit than higher ones.
The process is repeated for each symbol.
__Example__ :
Presuming a symbol has a probability of 5.
It receives 5 state values. States are sorted in natural order.
Presuming a symbol has a probability of 5,
it receives 5 cells, corresponding to 5 state values.
These state values are then sorted in natural order.
Next power of 2 is 8.
Space of probabilities is divided into 8 equal parts.
Presuming the `Accuracy_Log` is 7, it defines 128 states.
Next power of 2 after 5 is 8.
Space of probabilities must be divided into 8 equal parts.
Presuming the `Accuracy_Log` is 7, it defines a space of 128 states.
Divided by 8, each share is 16 large.
In order to reach 8, 8-5=3 lowest states will count "double",
doubling the number of shares (32 in width),
requiring one more bit in the process.
In order to reach 8 shares, 8-5=3 lowest states will count "double",
doubling their shares (32 in width), hence requiring one more bit.
Baseline is assigned starting from the higher states using fewer bits,
and proceeding naturally, then resuming at the first state,
each takes its allocated width from Baseline.
increasing at each state, then resuming at the first state,
each state takes its allocated width from Baseline.
| state order | 0 | 1 | 2 | 3 | 4 |
| ---------------- | ----- | ----- | ------ | ---- | ----- |
| width | 32 | 32 | 32 | 16 | 16 |
| `Number_of_Bits` | 5 | 5 | 5 | 4 | 4 |
| range number | 2 | 4 | 6 | 0 | 1 |
| `Baseline` | 32 | 64 | 96 | 0 | 16 |
| range | 32-63 | 64-95 | 96-127 | 0-15 | 16-31 |
| state value | 1 | 39 | 77 | 84 | 122 |
| state order | 0 | 1 | 2 | 3 | 4 |
| ---------------- | ----- | ----- | ------ | ---- | ------ |
| width | 32 | 32 | 32 | 16 | 16 |
| `Number_of_Bits` | 5 | 5 | 5 | 4 | 4 |
| range number | 2 | 4 | 6 | 0 | 1 |
| `Baseline` | 32 | 64 | 96 | 0 | 16 |
| range | 32-63 | 64-95 | 96-127 | 0-15 | 16-31 |
The next state is determined from current state
During decoding, the next state value is determined from current state value,
by reading the required `Number_of_Bits`, and adding the specified `Baseline`.
See [Appendix A] for the results of this process applied to the default distributions.
@ -1653,6 +1658,8 @@ or at least provide a meaningful error code explaining for which reason it canno
Version changes
---------------
- 0.3.4 : clarifications for FSE decoding table
- 0.3.3 : clarifications for field Block_Size
- 0.3.2 : remove additional block size restriction on compressed blocks
- 0.3.1 : minor clarification regarding offset history update rules
- 0.3.0 : minor edits to match RFC8478

View File

@ -1,10 +1,10 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>zstd 1.4.2 Manual</title>
<title>zstd 1.4.4 Manual</title>
</head>
<body>
<h1>zstd 1.4.2 Manual</h1>
<h1>zstd 1.4.4 Manual</h1>
<hr>
<a name="Contents"></a><h2>Contents</h2>
<ol>
@ -27,10 +27,16 @@
<li><a href="#Chapter17">Advanced compression functions</a></li>
<li><a href="#Chapter18">Advanced decompression functions</a></li>
<li><a href="#Chapter19">Advanced streaming functions</a></li>
<li><a href="#Chapter20">Buffer-less and synchronous inner streaming functions</a></li>
<li><a href="#Chapter21">Buffer-less streaming compression (synchronous mode)</a></li>
<li><a href="#Chapter22">Buffer-less streaming decompression (synchronous mode)</a></li>
<li><a href="#Chapter23">Block level API</a></li>
<li><a href="#Chapter20">! ZSTD_initCStream_usingDict() :</a></li>
<li><a href="#Chapter21">! ZSTD_initCStream_advanced() :</a></li>
<li><a href="#Chapter22">! ZSTD_initCStream_usingCDict() :</a></li>
<li><a href="#Chapter23">! ZSTD_initCStream_usingCDict_advanced() :</a></li>
<li><a href="#Chapter24">This function is deprecated, and is equivalent to:</a></li>
<li><a href="#Chapter25">This function is deprecated, and is equivalent to:</a></li>
<li><a href="#Chapter26">Buffer-less and synchronous inner streaming functions</a></li>
<li><a href="#Chapter27">Buffer-less streaming compression (synchronous mode)</a></li>
<li><a href="#Chapter28">Buffer-less streaming decompression (synchronous mode)</a></li>
<li><a href="#Chapter29">Block level API</a></li>
</ol>
<hr>
<a name="Chapter1"></a><h2>Introduction</h2><pre>
@ -157,9 +163,13 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
</b><p> Same as ZSTD_compress(), using an explicit ZSTD_CCtx
The function will compress at requested compression level,
ignoring any other parameter
</b><p> Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
Important : in order to behave similarly to `ZSTD_compress()`,
this function compresses at requested compression level,
__ignoring any other parameter__ .
If any advanced parameter was set using the advanced API,
they will all be reset. Only `compressionLevel` remains.
</p></pre><BR>
<h3>Decompression context</h3><pre> When decompressing many times,
@ -199,18 +209,26 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
</b>/* compression parameters<b>
* Note: When compressing with a ZSTD_CDict these parameters are superseded
* by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
* for more info (superseded-by-cdict). */
ZSTD_c_compressionLevel=100, </b>/* Update all compression parameters according to pre-defined cLevel table<b>
* by the parameters used to construct the ZSTD_CDict.
* See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
ZSTD_c_compressionLevel=100, </b>/* Set compression parameters according to pre-defined cLevel table.<b>
* Note that exact compression parameters are dynamically determined,
* depending on both compression level and srcSize (when known).
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
* Note 1 : it's possible to pass a negative compression level.
* Note 2 : setting a level sets all default values of other compression parameters */
* Note 2 : setting a level resets all other compression parameters to default */
</b>/* Advanced compression parameters :<b>
* It's possible to pin down compression parameters to some specific values.
* In which case, these values are no longer dynamically selected by the compressor */
ZSTD_c_windowLog=101, </b>/* Maximum allowed back-reference distance, expressed as power of 2.<b>
* This will set a memory budget for streaming decompression,
* with larger values requiring more memory
* and typically compressing more.
* Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
* Special: value 0 means "use default windowLog".
* Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
* requires explicitly allowing such window size at decompression stage if using streaming. */
* requires explicitly allowing such size at streaming decompression stage. */
ZSTD_c_hashLog=102, </b>/* Size of the initial probe table, as a power of 2.<b>
* Resulting memory usage is (1 << (hashLog+2)).
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
@ -221,13 +239,13 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
* Resulting memory usage is (1 << (chainLog+2)).
* Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
* Larger tables result in better and slower compression.
* This parameter is useless when using "fast" strategy.
* This parameter is useless for "fast" strategy.
* It's still useful when using "dfast" strategy,
* in which case it defines a secondary probe table.
* Special: value 0 means "use default chainLog". */
ZSTD_c_searchLog=104, </b>/* Number of search attempts, as a power of 2.<b>
* More attempts result in better and slower compression.
* This parameter is useless when using "fast" and "dFast" strategies.
* This parameter is useless for "fast" and "dFast" strategies.
* Special: value 0 means "use default searchLog". */
ZSTD_c_minMatch=105, </b>/* Minimum size of searched matches.<b>
* Note that Zstandard can still find matches of smaller size,
@ -282,7 +300,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
ZSTD_c_contentSizeFlag=200, </b>/* Content size will be written into frame header _whenever known_ (default:1)<b>
* Content size must be known at the beginning of compression.
* This is automatically the case when using ZSTD_compress2(),
* For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
* For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
ZSTD_c_checksumFlag=201, </b>/* A 32-bits checksum of content is written at end of frame (default:0) */<b>
ZSTD_c_dictIDFlag=202, </b>/* When applicable, dictionary's ID is written into frame header (default:1) */<b>
@ -301,7 +319,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
* Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
* 0 means default, which is dynamically determined based on compression parameters.
* Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
* The minimum size is automatically and transparently enforced */
* The minimum size is automatically and transparently enforced. */
ZSTD_c_overlapLog=402, </b>/* Control the overlap size, as a fraction of window size.<b>
* The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
* It helps preserve compression ratio, while each job is compressed in parallel.
@ -324,6 +342,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
* ZSTD_c_forceAttachDict
* ZSTD_c_literalCompressionMode
* ZSTD_c_targetCBlockSize
* ZSTD_c_srcSizeHint
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly;
* also, the enums values themselves are unstable and can still change.
@ -334,6 +353,7 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
ZSTD_c_experimentalParam4=1001,
ZSTD_c_experimentalParam5=1002,
ZSTD_c_experimentalParam6=1003,
ZSTD_c_experimentalParam7=1004
} ZSTD_cParameter;
</b></pre><BR>
<pre><b>typedef struct {
@ -672,12 +692,17 @@ size_t ZSTD_freeDStream(ZSTD_DStream* zds);
<pre><b>ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
int compressionLevel);
</b><p> When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
</b><p> When compressing multiple messages or blocks using the same dictionary,
it's recommended to digest the dictionary only once, since it's a costly operation.
ZSTD_createCDict() will create a state from digesting a dictionary.
The resulting state can be used for future compression operations with very limited startup cost.
ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
`dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data.
@dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
in which case the only thing that it transports is the @compressionLevel.
This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
expecting a ZSTD_CDict parameter with any data, including those without a known dictionary.
</p></pre><BR>
<pre><b>size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
@ -794,7 +819,7 @@ size_t ZSTD_freeDStream(ZSTD_DStream* zds);
Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
It's a CPU consuming operation, with non-negligible impact on latency.
If there is a need to use the same prefix multiple times, consider loadDictionary instead.
Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation.
</p></pre><BR>
@ -838,7 +863,7 @@ size_t ZSTD_freeDStream(ZSTD_DStream* zds);
Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
Prefix buffer must remain unmodified up to the end of frame,
reached when ZSTD_decompressStream() returns 0.
Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
A full dictionary is more costly, as it requires building tables.
@ -864,6 +889,24 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
<BR></pre>
<pre><b>typedef struct {
unsigned int matchPos; </b>/* Match pos in dst */<b>
</b>/* If seqDef.offset > 3, then this is seqDef.offset - 3<b>
* If seqDef.offset < 3, then this is the corresponding repeat offset
* But if seqDef.offset < 3 and litLength == 0, this is the
* repeat offset before the corresponding repeat offset
* And if seqDef.offset == 3 and litLength == 0, this is the
* most recent repeat offset - 1
*/
unsigned int offset;
unsigned int litLength; </b>/* Literal length */<b>
unsigned int matchLength; </b>/* Match length */<b>
</b>/* 0 when seq not rep and seqDef.offset otherwise<b>
* when litLength == 0 this will be <= 4, otherwise <= 3 like normal
*/
unsigned int rep;
} ZSTD_Sequence;
</b></pre><BR>
<pre><b>typedef struct {
unsigned windowLog; </b>/**< largest match distance : larger == more compression, more memory needed during decompression */<b>
unsigned chainLog; </b>/**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */<b>
@ -893,21 +936,12 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
</b></pre><BR>
<pre><b>typedef enum {
ZSTD_dlm_byCopy = 0, </b>/**< Copy dictionary content internally */<b>
ZSTD_dlm_byRef = 1, </b>/**< Reference dictionary content -- the dictionary buffer must outlive its users. */<b>
ZSTD_dlm_byRef = 1 </b>/**< Reference dictionary content -- the dictionary buffer must outlive its users. */<b>
} ZSTD_dictLoadMethod_e;
</b></pre><BR>
<pre><b>typedef enum {
</b>/* Opened question : should we have a format ZSTD_f_auto ?<b>
* Today, it would mean exactly the same as ZSTD_f_zstd1.
* But, in the future, should several formats become supported,
* on the compression side, it would mean "default format".
* On the decompression side, it would mean "automatic format detection",
* so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
* Since meaning is a little different, another option could be to define different enums for compression and decompression.
* This question could be kept for later, when there are actually multiple formats to support,
* but there is also the question of pinning enum values, and pinning value `0` is especially important */
ZSTD_f_zstd1 = 0, </b>/* zstd frame format, specified in zstd_compression_format.md (default) */<b>
ZSTD_f_zstd1_magicless = 1, </b>/* Variant of zstd frame format, without initial 4-bytes magic number.<b>
ZSTD_f_zstd1_magicless = 1 </b>/* Variant of zstd frame format, without initial 4-bytes magic number.<b>
* Useful to save 4 bytes per generated frame.
* Decoder cannot recognise automatically this format, requiring this instruction. */
} ZSTD_format_e;
@ -918,7 +952,7 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
* to evolve and should be considered only in the context of extremely
* advanced performance tuning.
*
* Zstd currently supports the use of a CDict in two ways:
* Zstd currently supports the use of a CDict in three ways:
*
* - The contents of the CDict can be copied into the working context. This
* means that the compression can search both the dictionary and input
@ -934,6 +968,12 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
* working context's tables can be reused). For small inputs, this can be
* faster than copying the CDict's tables.
*
* - The CDict's tables are not used at all, and instead we use the working
* context alone to reload the dictionary and use params based on the source
* size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
* This method is effective when the dictionary sizes are very small relative
* to the input size, and the input size is fairly large to begin with.
*
* Zstd has a simple internal heuristic that selects which strategy to use
* at the beginning of a compression. However, if experimentation shows that
* Zstd is making poor choices, it is possible to override that choice with
@ -942,6 +982,7 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
ZSTD_dictDefaultAttach = 0, </b>/* Use the default heuristic. */<b>
ZSTD_dictForceAttach = 1, </b>/* Never copy the dictionary. */<b>
ZSTD_dictForceCopy = 2, </b>/* Always copy the dictionary. */<b>
ZSTD_dictForceLoad = 3 </b>/* Always reload the dictionary */<b>
} ZSTD_dictAttachPref_e;
</b></pre><BR>
<pre><b>typedef enum {
@ -950,7 +991,7 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
* levels will be compressed. */
ZSTD_lcm_huffman = 1, </b>/**< Always attempt Huffman compression. Uncompressed literals will still be<b>
* emitted if Huffman compression is not profitable. */
ZSTD_lcm_uncompressed = 2, </b>/**< Always emit uncompressed literals. */<b>
ZSTD_lcm_uncompressed = 2 </b>/**< Always emit uncompressed literals. */<b>
} ZSTD_literalCompressionMode_e;
</b></pre><BR>
<a name="Chapter15"></a><h2>Frame size functions</h2><pre></pre>
@ -999,20 +1040,38 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
or an error code (if srcSize is too small)
</p></pre><BR>
<pre><b>size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize);
</b><p> Extract sequences from the sequence store
zc can be used to insert custom compression params.
This function invokes ZSTD_compress2
@return : number of sequences extracted
</p></pre><BR>
<a name="Chapter16"></a><h2>Memory management</h2><pre></pre>
<pre><b>size_t ZSTD_estimateCCtxSize(int compressionLevel);
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
size_t ZSTD_estimateDCtxSize(void);
</b><p> These functions make it possible to estimate memory usage
of a future {D,C}Ctx, before its creation.
ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
It will also consider src size to be arbitrarily "large", which is worst case.
If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
Note : CCtx size estimation is only correct for single-threaded compression.
</b><p> These functions make it possible to estimate memory usage of a future
{D,C}Ctx, before its creation.
ZSTD_estimateCCtxSize() will provide a budget large enough for any
compression level up to selected one. Unlike ZSTD_estimateCStreamSize*(),
this estimate does not include space for a window buffer, so this estimate
is guaranteed to be enough for single-shot compressions, but not streaming
compressions. It will however assume the input may be arbitrarily large,
which is the worst case. If srcSize is known to always be small,
ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with
ZSTD_getCParams() to create cParams from compressionLevel.
ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with
ZSTD_CCtxParams_setParameter().
Note: only single-threaded compression is supported. This function will
return an error code if ZSTD_c_nbWorkers is >= 1.
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCStreamSize(int compressionLevel);
@ -1085,7 +1144,8 @@ static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; </b>/**< t
</b><p> Create a digested dictionary for compression
Dictionary content is just referenced, not duplicated.
As a consequence, `dictBuffer` **must** outlive CDict,
and its content must remain unmodified throughout the lifetime of CDict.
and its content must remain unmodified throughout the lifetime of CDict.
note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef
</p></pre><BR>
<pre><b>ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
@ -1116,7 +1176,9 @@ static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; </b>/**< t
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params);
</b><p> Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure)
</b><p> Note : this function is now DEPRECATED.
It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x
</p></pre><BR>
<pre><b>size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
@ -1124,7 +1186,9 @@ static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; </b>/**< t
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams);
</b><p> Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters
</b><p> Note : this function is now REDUNDANT.
It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
This prototype will be marked as deprecated and generate compilation warning in some future version
</p></pre><BR>
<pre><b>size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
@ -1301,53 +1365,67 @@ size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
* pledgedSrcSize must be correct. If it is not known at init time, use
* ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
* "0" also disables frame content size field. It may be enabled in the future.
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
</b>/**! ZSTD_initCStream_usingDict() :<b>
* This function is deprecated, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
*
* Creates of an internal CDict (incompatible with static CCtx), except if
* dict == NULL or dictSize < 8, in which case no dict is used.
* Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
* it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
*/
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
</b>/**! ZSTD_initCStream_advanced() :<b>
* This function is deprecated, and is approximately equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
*
* pledgedSrcSize must be correct. If srcSize is not known at init time, use
* value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
*/
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize);
</b>/**! ZSTD_initCStream_usingCDict() :<b>
* This function is deprecated, and equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
* note : cdict will just be referenced, and must outlive compression session
*/
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
</b>/**! ZSTD_initCStream_usingCDict_advanced() :<b>
* This function is deprecated, and is approximately equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
* same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
* pledgedSrcSize must be correct. If srcSize is not known at init time, use
* value ZSTD_CONTENTSIZE_UNKNOWN.
*/
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
size_t
ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
int compressionLevel,
unsigned long long pledgedSrcSize);
</pre></b><BR>
<a name="Chapter20"></a><h2>! ZSTD_initCStream_usingDict() :</h2><pre> This function is deprecated, and is equivalent to:
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
Creates of an internal CDict (incompatible with static CCtx), except if
dict == NULL or dictSize < 8, in which case no dict is used.
Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
<BR></pre>
<a name="Chapter21"></a><h2>! ZSTD_initCStream_advanced() :</h2><pre> This function is deprecated, and is approximately equivalent to:
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
// Pseudocode: Set each zstd parameter and leave the rest as-is.
for ((param, value) : params) {
ZSTD_CCtx_setParameter(zcs, param, value);
}
ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
pledgedSrcSize must be correct.
If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
<BR></pre>
<a name="Chapter22"></a><h2>! ZSTD_initCStream_usingCDict() :</h2><pre> This function is deprecated, and equivalent to:
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
ZSTD_CCtx_refCDict(zcs, cdict);
note : cdict will just be referenced, and must outlive compression session
Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
<BR></pre>
<a name="Chapter23"></a><h2>! ZSTD_initCStream_usingCDict_advanced() :</h2><pre> This function is DEPRECATED, and is approximately equivalent to:
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
// Pseudocode: Set each zstd frame parameter and leave the rest as-is.
for ((fParam, value) : fParams) {
ZSTD_CCtx_setParameter(zcs, fParam, value);
}
ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
ZSTD_CCtx_refCDict(zcs, cdict);
same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
pledgedSrcSize must be correct. If srcSize is not known at init time, use
value ZSTD_CONTENTSIZE_UNKNOWN.
Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
<BR></pre>
<pre><b>size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
</b><p> This function is deprecated, and is equivalent to:
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
@ -1361,6 +1439,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict*
For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
@return : 0, or an error code (which can be tested using ZSTD_isError())
Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
</p></pre><BR>
@ -1395,34 +1474,35 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict*
* ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
*
* note: no dictionary will be used if dict == NULL or dictSize < 8
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
</b>/**<b>
* This function is deprecated, and is equivalent to:
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
* ZSTD_DCtx_refDDict(zds, ddict);
*
* note : ddict is referenced, it must outlive decompression session
*/
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
</b>/**<b>
* This function is deprecated, and is equivalent to:
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
*
* re-use decompression parameters from previous init; saves dictionary loading
*/
size_t ZSTD_resetDStream(ZSTD_DStream* zds);
</pre></b><BR>
<a name="Chapter20"></a><h2>Buffer-less and synchronous inner streaming functions</h2><pre>
<a name="Chapter24"></a><h2>This function is deprecated, and is equivalent to:</h2><pre>
ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
ZSTD_DCtx_refDDict(zds, ddict);
note : ddict is referenced, it must outlive decompression session
Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
<BR></pre>
<a name="Chapter25"></a><h2>This function is deprecated, and is equivalent to:</h2><pre>
ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
re-use decompression parameters from previous init; saves dictionary loading
Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
<BR></pre>
<a name="Chapter26"></a><h2>Buffer-less and synchronous inner streaming functions</h2><pre>
This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
But it's also a complex one, with several restrictions, documented below.
Prefer normal streaming API for an easier experience.
<BR></pre>
<a name="Chapter21"></a><h2>Buffer-less streaming compression (synchronous mode)</h2><pre>
<a name="Chapter27"></a><h2>Buffer-less streaming compression (synchronous mode)</h2><pre>
A ZSTD_CCtx object is required to track streaming operations.
Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
ZSTD_CCtx object can be re-used multiple times within successive compression operations.
@ -1458,7 +1538,7 @@ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); </b>/* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */<b>
size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); </b>/**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */<b>
</pre></b><BR>
<a name="Chapter22"></a><h2>Buffer-less streaming decompression (synchronous mode)</h2><pre>
<a name="Chapter28"></a><h2>Buffer-less streaming decompression (synchronous mode)</h2><pre>
A ZSTD_DCtx object is required to track streaming operations.
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
A ZSTD_DCtx object can be re-used multiple times.
@ -1554,10 +1634,10 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
<pre><b>typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
</b></pre><BR>
<a name="Chapter23"></a><h2>Block level API</h2><pre></pre>
<a name="Chapter29"></a><h2>Block level API</h2><pre></pre>
<pre><b></b><p> Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
User will have to take in charge required information to regenerate data, such as compressed and content sizes.
<pre><b></b><p> Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
A few rules to respect :
- Compressing and decompressing require a context structure
@ -1568,12 +1648,14 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
+ copyCCtx() and copyDCtx() can be used too
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
- When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
In which case, nothing is produced into `dst` !
+ User must test for such outcome and deal directly with uncompressed data
+ ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
===> In which case, nothing is produced into `dst` !
+ User __must__ test for such outcome and deal directly with uncompressed data
+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
Doing so would mess up with statistics history, leading to potential data corruption.
+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ In case of multiple successive blocks, should some of them be uncompressed,
decoder must be informed of their existence in order to follow proper history.
Use ZSTD_insertBlock() for such a case.

View File

@ -1,15 +0,0 @@
#build
simple_compression
simple_decompression
multiple_simple_compression
dictionary_compression
dictionary_decompression
streaming_compression
streaming_decompression
multiple_streaming_compression
streaming_memory_usage
#test artefact
tmp*
test*
*.zst

View File

@ -44,8 +44,8 @@ static void compressFile_orDie(const char* fname, const char* outName, int cLeve
* and writes all output produced to the output file.
*/
size_t const toRead = buffInSize;
size_t read;
while ((read = fread_orDie(buffIn, toRead, fin))) {
for (;;) {
size_t read = fread_orDie(buffIn, toRead, fin);
/* Select the flush mode.
* If the read may not be finished (read == toRead) we use
* ZSTD_e_continue. If this is the last chunk, we use ZSTD_e_end.
@ -76,6 +76,10 @@ static void compressFile_orDie(const char* fname, const char* outName, int cLeve
} while (!finished);
CHECK(input.pos == input.size,
"Impossible: zstd only returns 0 when the input is completely consumed!");
if (lastChunk) {
break;
}
}
ZSTD_freeCCtx(cctx);

View File

@ -34,7 +34,10 @@ static void decompressFile_orDie(const char* fname)
*/
size_t const toRead = buffInSize;
size_t read;
size_t lastRet = 0;
int isEmpty = 1;
while ( (read = fread_orDie(buffIn, toRead, fin)) ) {
isEmpty = 0;
ZSTD_inBuffer input = { buffIn, read, 0 };
/* Given a valid frame, zstd won't consume the last byte of the frame
* until it has flushed all of the decompressed data of the frame.
@ -53,9 +56,24 @@ static void decompressFile_orDie(const char* fname)
size_t const ret = ZSTD_decompressStream(dctx, &output , &input);
CHECK_ZSTD(ret);
fwrite_orDie(buffOut, output.pos, fout);
lastRet = ret;
}
}
if (isEmpty) {
fprintf(stderr, "input is empty\n");
exit(1);
}
if (lastRet != 0) {
/* The last return value from ZSTD_decompressStream did not end on a
* frame, but we reached the end of the file! We assume this is an
* error, and the input was truncated.
*/
fprintf(stderr, "EOF before end of stream: %zu\n", lastRet);
exit(1);
}
ZSTD_freeDCtx(dctx);
fclose_orDie(fin);
fclose_orDie(fout);

View File

@ -1,3 +0,0 @@
# make install artefact
libzstd.pc
libzstd-nomt

View File

@ -244,8 +244,6 @@ libzstd.pc:
libzstd.pc: libzstd.pc.in
@echo creating pkgconfig
@sed -e 's|@PREFIX@|$(PREFIX)|' \
-e 's|@LIBDIR@|$(LIBDIR)|' \
-e 's|@INCLUDEDIR@|$(INCLUDEDIR)|' \
-e 's|@VERSION@|$(VERSION)|' \
$< >$@

View File

@ -27,10 +27,10 @@ Enabling multithreading requires 2 conditions :
Both conditions are automatically applied when invoking `make lib-mt` target.
When linking a POSIX program with a multithreaded version of `libzstd`,
note that it's necessary to request the `-pthread` flag during link stage.
note that it's necessary to invoke the `-pthread` flag during link stage.
Multithreading capabilities are exposed
via the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.3.8/lib/zstd.h#L592).
via the [advanced API defined in `lib/zstd.h`](https://github.com/facebook/zstd/blob/v1.4.3/lib/zstd.h#L351).
#### API
@ -112,6 +112,17 @@ The file structure is designed to make this selection manually achievable for an
will expose the deprecated `ZSTDMT` API exposed by `zstdmt_compress.h` in
the shared library, which is now hidden by default.
- The build macro `DYNAMIC_BMI2` can be set to 1 or 0 in order to generate binaries
which can detect at runtime the presence of BMI2 instructions, and use them only if present.
These instructions contribute to better performance, notably on the decoder side.
By default, this feature is automatically enabled on detecting
the right instruction set (x64) and compiler (clang or gcc >= 5).
It's obviously disabled for different cpus,
or when BMI2 instruction set is _required_ by the compiler command line
(in this case, only the BMI2 code path is generated).
Setting this macro will either force to generate the BMI2 dispatcher (1)
or prevent it (0). It overrides automatic detection.
#### Windows : using MinGW+MSYS to create DLL

View File

@ -57,6 +57,8 @@ extern "C" {
=========================================*/
#if defined(__BMI__) && defined(__GNUC__)
# include <immintrin.h> /* support for bextr (experimental) */
#elif defined(__ICCARM__)
# include <intrinsics.h>
#endif
#define STREAM_ACCUMULATOR_MIN_32 25
@ -162,7 +164,9 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return 31 - __builtin_clz (val);
return __builtin_clz (val) ^ 31;
# elif defined(__ICCARM__) /* IAR Intrinsic */
return 31 - __CLZ(val);
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
11, 14, 16, 18, 22, 25, 3, 30,
@ -240,9 +244,9 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
assert(bitC->ptr <= bitC->endPtr);
bitC->bitPos &= 7;
bitC->bitContainer >>= nbBytes*8;
}
@ -256,6 +260,7 @@ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;

View File

@ -23,7 +23,7 @@
# define INLINE_KEYWORD
#endif
#if defined(__GNUC__)
#if defined(__GNUC__) || defined(__ICCARM__)
# define FORCE_INLINE_ATTR __attribute__((always_inline))
#elif defined(_MSC_VER)
# define FORCE_INLINE_ATTR __forceinline
@ -61,11 +61,18 @@
# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
#endif
/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
#if defined(__GNUC__)
# define UNUSED_ATTR __attribute__((unused))
#else
# define UNUSED_ATTR
#endif
/* force no inlining */
#ifdef _MSC_VER
# define FORCE_NOINLINE static __declspec(noinline)
#else
# ifdef __GNUC__
# if defined(__GNUC__) || defined(__ICCARM__)
# define FORCE_NOINLINE static __attribute__((__noinline__))
# else
# define FORCE_NOINLINE static
@ -76,7 +83,7 @@
#ifndef __has_attribute
#define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
#endif
#if defined(__GNUC__)
#if defined(__GNUC__) || defined(__ICCARM__)
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
#else
# define TARGET_ATTRIBUTE(target)
@ -127,9 +134,14 @@
} \
}
/* vectorization */
/* vectorization
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ > 5
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
# else
# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
# endif
#else
# define DONT_VECTORIZE
#endif

View File

@ -308,7 +308,7 @@ If there is an error, the function will return an error code, which can be teste
*******************************************/
/* FSE buffer bounds */
#define FSE_NCOUNTBOUND 512
#define FSE_BLOCKBOUND(size) (size + (size>>7))
#define FSE_BLOCKBOUND(size) (size + (size>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */

View File

@ -52,7 +52,9 @@
#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
/* check and forward error code */
#ifndef CHECK_F
#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
#endif
/* **************************************************************

View File

@ -47,6 +47,79 @@ extern "C" {
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
/* detects whether we are being compiled under msan */
#if defined (__has_feature)
# if __has_feature(memory_sanitizer)
# define MEMORY_SANITIZER 1
# endif
#endif
#if defined (MEMORY_SANITIZER)
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
* We therefore declare the functions we need ourselves, rather than trying to
* include the header file... */
#include <stdint.h> /* intptr_t */
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
/* Make memory region fully uninitialized (without changing its contents).
This is a legacy interface that does not update origin information. Use
__msan_allocated_memory() instead. */
void __msan_poison(const volatile void *a, size_t size);
/* Returns the offset of the first (at least partially) poisoned byte in the
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
#endif
/* detects whether we are being compiled under asan */
#if defined (__has_feature)
# if __has_feature(address_sanitizer)
# define ADDRESS_SANITIZER 1
# endif
#elif defined(__SANITIZE_ADDRESS__)
# define ADDRESS_SANITIZER 1
#endif
#if defined (ADDRESS_SANITIZER)
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
* We therefore declare the functions we need ourselves, rather than trying to
* include the header file... */
/**
* Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
*
* This memory must be previously allocated by your program. Instrumented
* code is forbidden from accessing addresses in this region until it is
* unpoisoned. This function is not guaranteed to poison the entire region -
* it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
* alignment restrictions.
*
* \note This function is not thread-safe because no two threads can poison or
* unpoison memory in the same memory region simultaneously.
*
* \param addr Start of memory region.
* \param size Size of memory region. */
void __asan_poison_memory_region(void const volatile *addr, size_t size);
/**
* Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
*
* This memory must be previously allocated by your program. Accessing
* addresses in this region is allowed until this region is poisoned again.
* This function could unpoison a super-region of <c>[addr, addr+size)</c> due
* to ASan alignment restrictions.
*
* \note This function is not thread-safe because no two threads can
* poison or unpoison memory in the same memory region simultaneously.
*
* \param addr Start of memory region.
* \param size Size of memory region. */
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
#endif
/*-**************************************************************
* Basic Types
@ -102,7 +175,7 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define MEM_FORCE_MEMORY_ACCESS 2
# elif defined(__INTEL_COMPILER) || defined(__GNUC__)
# elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif

View File

@ -127,9 +127,13 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
ctx->queueTail = 0;
ctx->numThreadsBusy = 0;
ctx->queueEmpty = 1;
(void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
(void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
(void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
{
int error = 0;
error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
if (error) { POOL_free(ctx); return NULL; }
}
ctx->shutdown = 0;
/* Allocate space for the thread handles */
ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);

View File

@ -14,6 +14,8 @@
* This file will hold wrapper for systems, which do not support pthreads
*/
#include "threading.h"
/* create fake symbol to avoid empty translation unit warning */
int g_ZSTD_threading_useless_symbol;
@ -28,7 +30,6 @@ int g_ZSTD_threading_useless_symbol;
/* === Dependencies === */
#include <process.h>
#include <errno.h>
#include "threading.h"
/* === Implementation === */
@ -73,3 +74,47 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
}
#endif /* ZSTD_MULTITHREAD */
#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
#include <stdlib.h>
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
{
*mutex = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t));
if (!*mutex)
return 1;
return pthread_mutex_init(*mutex, attr);
}
int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
{
if (!*mutex)
return 0;
{
int const ret = pthread_mutex_destroy(*mutex);
free(*mutex);
return ret;
}
}
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
{
*cond = (pthread_cond_t*)malloc(sizeof(pthread_cond_t));
if (!*cond)
return 1;
return pthread_cond_init(*cond, attr);
}
int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
{
if (!*cond)
return 0;
{
int const ret = pthread_cond_destroy(*cond);
free(*cond);
return ret;
}
}
#endif

View File

@ -13,6 +13,8 @@
#ifndef THREADING_H_938743
#define THREADING_H_938743
#include "debug.h"
#if defined (__cplusplus)
extern "C" {
#endif
@ -75,10 +77,12 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
*/
#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
/* === POSIX Systems === */
# include <pthread.h>
#if DEBUGLEVEL < 1
#define ZSTD_pthread_mutex_t pthread_mutex_t
#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
@ -96,6 +100,33 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
#else /* DEBUGLEVEL >= 1 */
/* Debug implementation of threading.
* In this implementation we use pointers for mutexes and condition variables.
* This way, if we forget to init/destroy them the program will crash or ASAN
* will report leaks.
*/
#define ZSTD_pthread_mutex_t pthread_mutex_t*
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr);
int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex);
#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a))
#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a))
#define ZSTD_pthread_cond_t pthread_cond_t*
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr);
int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b))
#define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a))
#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a))
#define ZSTD_pthread_t pthread_t
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
#endif
#else /* ZSTD_MULTITHREAD not defined */
/* No multithreading support */

View File

@ -53,7 +53,8 @@
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define XXH_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
defined(__ICCARM__)
# define XXH_FORCE_MEMORY_ACCESS 1
# endif
#endif
@ -120,7 +121,7 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
# define INLINE_KEYWORD
#endif
#if defined(__GNUC__)
#if defined(__GNUC__) || defined(__ICCARM__)
# define FORCE_INLINE_ATTR __attribute__((always_inline))
#elif defined(_MSC_VER)
# define FORCE_INLINE_ATTR __forceinline
@ -206,7 +207,12 @@ static U64 XXH_read64(const void* memPtr)
# define XXH_rotl32(x,r) _rotl(x,r)
# define XXH_rotl64(x,r) _rotl64(x,r)
#else
#if defined(__ICCARM__)
# include <intrinsics.h>
# define XXH_rotl32(x,r) __ROR(x,(32 - r))
#else
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
#endif
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#endif

View File

@ -56,9 +56,9 @@ extern "C" {
/**
* Return the specified error if the condition evaluates to true.
*
* In debug modes, prints additional information. In order to do that
* (particularly, printing the conditional that failed), this can't just wrap
* RETURN_ERROR().
* In debug modes, prints additional information.
* In order to do that (particularly, printing the conditional that failed),
* this can't just wrap RETURN_ERROR().
*/
#define RETURN_ERROR_IF(cond, err, ...) \
if (cond) { \
@ -197,79 +197,56 @@ static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
static void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); }
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
#define WILDCOPY_OVERLENGTH 8
#define VECLEN 16
#define WILDCOPY_OVERLENGTH 32
#define WILDCOPY_VECLEN 16
typedef enum {
ZSTD_no_overlap,
ZSTD_overlap_src_before_dst,
ZSTD_overlap_src_before_dst
/* ZSTD_overlap_dst_before_src, */
} ZSTD_overlap_e;
/*! ZSTD_wildcopy() :
* custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
* Custom version of memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
* @param ovtype controls the overlap detection
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
* - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
* The src buffer must be before the dst buffer.
*/
MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
{
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
if (length < VECLEN || (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN)) {
do
COPY8(op, ip)
while (op < oend);
}
else {
if ((length & 8) == 0)
COPY8(op, ip);
do {
assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
/* Handle short offset copies. */
do {
COPY8(op, ip)
} while (op < oend);
} else {
assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
/* Separate out the first two COPY16() calls because the copy length is
* almost certain to be short, so the branches have different
* probabilities.
* On gcc-9 unrolling once is +1.6%, twice is +2%, thrice is +1.8%.
* On clang-8 unrolling once is +1.4%, twice is +3.3%, thrice is +3%.
*/
COPY16(op, ip);
}
while (op < oend);
}
}
/*! ZSTD_wildcopy_16min() :
* same semantics as ZSTD_wilcopy() except guaranteed to be able to copy 16 bytes at the start */
MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
void ZSTD_wildcopy_16min(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
{
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
assert(length >= 8);
assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
if (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN) {
do
COPY8(op, ip)
while (op < oend);
}
else {
if ((length & 8) == 0)
COPY8(op, ip);
do {
COPY16(op, ip);
}
while (op < oend);
if (op >= oend) return;
do {
COPY16(op, ip);
COPY16(op, ip);
}
while (op < oend);
}
}
MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = (BYTE*)dstEnd;
do
COPY8(op, ip)
while (op < oend);
}
/*-*******************************************
* Private declarations
@ -323,7 +300,9 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
_BitScanReverse(&r, val);
return (unsigned)r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
return 31 - __builtin_clz(val);
return __builtin_clz (val) ^ 31;
# elif defined(__ICCARM__) /* IAR Intrinsic */
return 31 - __CLZ(val);
# else /* Software version */
static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;

File diff suppressed because it is too large Load Diff

View File

@ -19,6 +19,7 @@
* Dependencies
***************************************/
#include "zstd_internal.h"
#include "zstd_cwksp.h"
#ifdef ZSTD_MULTITHREAD
# include "zstdmt_compress.h"
#endif
@ -134,9 +135,15 @@ typedef struct {
typedef struct ZSTD_matchState_t ZSTD_matchState_t;
struct ZSTD_matchState_t {
ZSTD_window_t window; /* State for window round buffer management */
U32 loadedDictEnd; /* index of end of dictionary, within context's referential. When dict referential is copied into active context (i.e. not attached), effectively same value as dictSize, since referential starts from zero */
U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
* When loadedDictEnd != 0, a dictionary is in use, and still valid.
* This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
* Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
* When dict referential is copied into active context (i.e. not attached),
* loadedDictEnd == dictSize, since referential starts from zero.
*/
U32 nextToUpdate; /* index from which to continue table update */
U32 hashLog3; /* dispatch table : larger == faster, more memory */
U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
U32* hashTable;
U32* hashTable3;
U32* chainTable;
@ -186,6 +193,13 @@ typedef struct {
size_t capacity; /* The capacity starting from `seq` pointer */
} rawSeqStore_t;
typedef struct {
int collectSequences;
ZSTD_Sequence* seqStart;
size_t seqIndex;
size_t maxSequences;
} SeqCollector;
struct ZSTD_CCtx_params_s {
ZSTD_format_e format;
ZSTD_compressionParameters cParams;
@ -197,6 +211,9 @@ struct ZSTD_CCtx_params_s {
size_t targetCBlockSize; /* Tries to fit compressed block size to be around targetCBlockSize.
* No target when targetCBlockSize == 0.
* There is no guarantee on compressed block size */
int srcSizeHint; /* User's best guess of source size.
* Hint is not valid when srcSizeHint == 0.
* There is no guarantee that hint is close to actual source size */
ZSTD_dictAttachPref_e attachDictPref;
ZSTD_literalCompressionMode_e literalCompressionMode;
@ -222,9 +239,7 @@ struct ZSTD_CCtx_s {
ZSTD_CCtx_params appliedParams;
U32 dictID;
int workSpaceOversizedDuration;
void* workSpace;
size_t workSpaceSize;
ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
size_t blockSize;
unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
unsigned long long consumedSrcSize;
@ -232,6 +247,8 @@ struct ZSTD_CCtx_s {
XXH64_state_t xxhState;
ZSTD_customMem customMem;
size_t staticSize;
SeqCollector seqCollector;
int isFirstBlock;
seqStore_t seqStore; /* sequences storage ptrs */
ldmState_t ldmState; /* long distance matching state */
@ -331,26 +348,57 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
return (srcSize >> minlog) + 2;
}
/*! ZSTD_safecopyLiterals() :
* memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
* Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
* large copies.
*/
static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
assert(iend > ilimit_w);
if (ip <= ilimit_w) {
ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
op += ilimit_w - ip;
ip = ilimit_w;
}
while (ip < iend) *op++ = *ip++;
}
/*! ZSTD_storeSeq() :
* Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
* `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
* Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
* `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
* `mlBase` : matchLength - MINMATCH
* Allowed to overread literals up to litLimit.
*/
MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
HINT_INLINE UNUSED_ATTR
void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
{
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
BYTE const* const litEnd = literals + litLength;
#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
static const BYTE* g_start = NULL;
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
}
#endif
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
/* copy Literals */
assert(seqStorePtr->maxNbLit <= 128 KB);
assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
ZSTD_wildcopy(seqStorePtr->lit, literals, litLength, ZSTD_no_overlap);
assert(literals + litLength <= litLimit);
if (litEnd <= litLimit_w) {
/* Common case we can use wildcopy.
* First copy 16 bytes, because literals are likely short.
*/
assert(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(seqStorePtr->lit, literals);
if (litLength > 16) {
ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
}
} else {
ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
}
seqStorePtr->lit += litLength;
/* literal Length */
@ -362,7 +410,7 @@ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const v
seqStorePtr->sequences[0].litLength = (U16)litLength;
/* match offset */
seqStorePtr->sequences[0].offset = offsetCode + 1;
seqStorePtr->sequences[0].offset = offCode + 1;
/* match Length */
if (mlBase>0xFFFF) {
@ -763,24 +811,37 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
/* Similar to ZSTD_window_enforceMaxDist(),
* but only invalidates dictionary
* when input progresses beyond window size. */
* when input progresses beyond window size.
* assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
* loadedDictEnd uses same referential as window->base
* maxDist is the window size */
MEM_STATIC void
ZSTD_checkDictValidity(ZSTD_window_t* window,
ZSTD_checkDictValidity(const ZSTD_window_t* window,
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
const ZSTD_matchState_t** dictMatchStatePtr)
{
U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
(unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
assert(loadedDictEndPtr != NULL);
assert(dictMatchStatePtr != NULL);
{ U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
U32 const loadedDictEnd = *loadedDictEndPtr;
DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
(unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
assert(blockEndIdx >= loadedDictEnd);
if (loadedDictEnd && (blockEndIdx > maxDist + loadedDictEnd)) {
/* On reaching window size, dictionaries are invalidated */
if (loadedDictEndPtr) *loadedDictEndPtr = 0;
if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
}
if (blockEndIdx > loadedDictEnd + maxDist) {
/* On reaching window size, dictionaries are invalidated.
* For simplification, if window size is reached anywhere within next block,
* the dictionary is invalidated for the full block.
*/
DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
*loadedDictEndPtr = 0;
*dictMatchStatePtr = NULL;
} else {
if (*loadedDictEndPtr != 0) {
DEBUGLOG(6, "dictionary considered valid for current block");
} } }
}
/**
@ -822,6 +883,17 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
return contiguous;
}
MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.lowLimit;
U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
U32 const isDictionary = (ms->loadedDictEnd != 0);
U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
return matchLowest;
}
/* debug functions */
#if (DEBUGLEVEL>=2)
@ -880,7 +952,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
void ZSTD_resetSeqStore(seqStore_t* ssPtr);
@ -895,7 +967,7 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
const ZSTD_CCtx_params* params,
unsigned long long pledgedSrcSize);
/* ZSTD_compress_advanced_internal() :
@ -904,7 +976,7 @@ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_CCtx_params params);
const ZSTD_CCtx_params* params);
/* ZSTD_writeLastEmptyBlock() :

View File

@ -70,7 +70,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* workspace, size_t wkspSize,
void* entropyWorkspace, size_t entropyWorkspaceSize,
const int bmi2)
{
size_t const minGain = ZSTD_minGain(srcSize, strategy);
@ -99,10 +99,15 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
{ HUF_repeat repeat = prevHuf->repeatMode;
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
: HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
cLitSize = singleStream ?
HUF_compress1X_repeat(
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
255, 11, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
HUF_compress4X_repeat(
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
255, 11, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
if (repeat != HUF_repeat_none) {
/* reused the existing table */
hType = set_repeat;

View File

@ -23,7 +23,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* workspace, size_t wkspSize,
void* entropyWorkspace, size_t entropyWorkspaceSize,
const int bmi2);
#endif /* ZSTD_COMPRESS_LITERALS_H */

View File

@ -222,7 +222,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
const FSE_CTable* prevCTable, size_t prevCTableSize,
void* workspace, size_t workspaceSize)
void* entropyWorkspace, size_t entropyWorkspaceSize)
{
BYTE* op = (BYTE*)dst;
const BYTE* const oend = op + dstCapacity;
@ -238,7 +238,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
memcpy(nextCTable, prevCTable, prevCTableSize);
return 0;
case set_basic:
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize)); /* note : could be pre-calculated */
return 0;
case set_compressed: {
S16 norm[MaxSeq + 1];
@ -252,7 +252,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
FORWARD_IF_ERROR(NCountSize);
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize));
return NCountSize;
}
}

View File

@ -35,7 +35,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
const FSE_CTable* prevCTable, size_t prevCTableSize,
void* workspace, size_t workspaceSize);
void* entropyWorkspace, size_t entropyWorkspaceSize);
size_t ZSTD_encodeSequences(
void* dst, size_t dstCapacity,

View File

@ -0,0 +1,535 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_CWKSP_H
#define ZSTD_CWKSP_H
/*-*************************************
* Dependencies
***************************************/
#include "zstd_internal.h"
#if defined (__cplusplus)
extern "C" {
#endif
/*-*************************************
* Constants
***************************************/
/* define "workspace is too large" as this number of times larger than needed */
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
/* when workspace is continuously too large
* during at least this number of times,
* context's memory usage is considered wasteful,
* because it's sized to handle a worst case scenario which rarely happens.
* In which case, resize it down to free some memory */
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
/* Since the workspace is effectively its own little malloc implementation /
* arena, when we run under ASAN, we should similarly insert redzones between
* each internal element of the workspace, so ASAN will catch overruns that
* reach outside an object but that stay inside the workspace.
*
* This defines the size of that redzone.
*/
#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
#endif
/*-*************************************
* Structures
***************************************/
typedef enum {
ZSTD_cwksp_alloc_objects,
ZSTD_cwksp_alloc_buffers,
ZSTD_cwksp_alloc_aligned
} ZSTD_cwksp_alloc_phase_e;
/**
* Zstd fits all its internal datastructures into a single continuous buffer,
* so that it only needs to perform a single OS allocation (or so that a buffer
* can be provided to it and it can perform no allocations at all). This buffer
* is called the workspace.
*
* Several optimizations complicate that process of allocating memory ranges
* from this workspace for each internal datastructure:
*
* - These different internal datastructures have different setup requirements:
*
* - The static objects need to be cleared once and can then be trivially
* reused for each compression.
*
* - Various buffers don't need to be initialized at all--they are always
* written into before they're read.
*
* - The matchstate tables have a unique requirement that they don't need
* their memory to be totally cleared, but they do need the memory to have
* some bound, i.e., a guarantee that all values in the memory they've been
* allocated is less than some maximum value (which is the starting value
* for the indices that they will then use for compression). When this
* guarantee is provided to them, they can use the memory without any setup
* work. When it can't, they have to clear the area.
*
* - These buffers also have different alignment requirements.
*
* - We would like to reuse the objects in the workspace for multiple
* compressions without having to perform any expensive reallocation or
* reinitialization work.
*
* - We would like to be able to efficiently reuse the workspace across
* multiple compressions **even when the compression parameters change** and
* we need to resize some of the objects (where possible).
*
* To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
* abstraction was created. It works as follows:
*
* Workspace Layout:
*
* [ ... workspace ... ]
* [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
*
* The various objects that live in the workspace are divided into the
* following categories, and are allocated separately:
*
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
* so that literally everything fits in a single buffer. Note: if present,
* this must be the first object in the workspace, since ZSTD_free{CCtx,
* CDict}() rely on a pointer comparison to see whether one or two frees are
* required.
*
* - Fixed size objects: these are fixed-size, fixed-count objects that are
* nonetheless "dynamically" allocated in the workspace so that we can
* control how they're initialized separately from the broader ZSTD_CCtx.
* Examples:
* - Entropy Workspace
* - 2 x ZSTD_compressedBlockState_t
* - CDict dictionary contents
*
* - Tables: these are any of several different datastructures (hash tables,
* chain tables, binary trees) that all respect a common format: they are
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
* Their sizes depend on the cparams.
*
* - Aligned: these buffers are used for various purposes that require 4 byte
* alignment, but don't require any initialization before they're used.
*
* - Buffers: these buffers are used for various purposes that don't require
* any alignment or initialization before they're used. This means they can
* be moved around at no cost for a new compression.
*
* Allocating Memory:
*
* The various types of objects must be allocated in order, so they can be
* correctly packed into the workspace buffer. That order is:
*
* 1. Objects
* 2. Buffers
* 3. Aligned
* 4. Tables
*
* Attempts to reserve objects of different types out of order will fail.
*/
typedef struct {
void* workspace;
void* workspaceEnd;
void* objectEnd;
void* tableEnd;
void* tableValidEnd;
void* allocStart;
int allocFailed;
int workspaceOversizedDuration;
ZSTD_cwksp_alloc_phase_e phase;
} ZSTD_cwksp;
/*-*************************************
* Functions
***************************************/
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
(void)ws;
assert(ws->workspace <= ws->objectEnd);
assert(ws->objectEnd <= ws->tableEnd);
assert(ws->objectEnd <= ws->tableValidEnd);
assert(ws->tableEnd <= ws->allocStart);
assert(ws->tableValidEnd <= ws->allocStart);
assert(ws->allocStart <= ws->workspaceEnd);
}
/**
* Align must be a power of 2.
*/
MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
size_t const mask = align - 1;
assert((align & mask) == 0);
return (size + mask) & ~mask;
}
/**
* Use this to determine how much space in the workspace we will consume to
* allocate this object. (Normally it should be exactly the size of the object,
* but under special conditions, like ASAN, where we pad each object, it might
* be larger.)
*
* Since tables aren't currently redzoned, you don't need to call through this
* to figure out how much space you need for the matchState tables. Everything
* else is though.
*/
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#else
return size;
#endif
}
MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
assert(phase >= ws->phase);
if (phase > ws->phase) {
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
phase >= ZSTD_cwksp_alloc_buffers) {
ws->tableValidEnd = ws->objectEnd;
}
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
phase >= ZSTD_cwksp_alloc_aligned) {
/* If unaligned allocations down from a too-large top have left us
* unaligned, we need to realign our alloc ptr. Technically, this
* can consume space that is unaccounted for in the neededSpace
* calculation. However, I believe this can only happen when the
* workspace is too large, and specifically when it is too large
* by a larger margin than the space that will be consumed. */
/* TODO: cleaner, compiler warning friendly way to do this??? */
ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
if (ws->allocStart < ws->tableValidEnd) {
ws->tableValidEnd = ws->allocStart;
}
}
ws->phase = phase;
}
}
/**
* Returns whether this object/buffer/etc was allocated in this workspace.
*/
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
}
/**
* Internal function. Do not use directly.
*/
MEM_STATIC void* ZSTD_cwksp_reserve_internal(
ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
void* alloc;
void* bottom = ws->tableEnd;
ZSTD_cwksp_internal_advance_phase(ws, phase);
alloc = (BYTE *)ws->allocStart - bytes;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* over-reserve space */
alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#endif
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(alloc >= bottom);
if (alloc < bottom) {
DEBUGLOG(4, "cwksp: alloc failed!");
ws->allocFailed = 1;
return NULL;
}
if (alloc < ws->tableValidEnd) {
ws->tableValidEnd = alloc;
}
ws->allocStart = alloc;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
__asan_unpoison_memory_region(alloc, bytes);
#endif
return alloc;
}
/**
* Reserves and returns unaligned memory.
*/
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
}
/**
* Reserves and returns memory sized on and aligned on sizeof(unsigned).
*/
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
assert((bytes & (sizeof(U32)-1)) == 0);
return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
}
/**
* Aligned on sizeof(unsigned). These buffers have the special property that
* their values remain constrained, allowing us to re-use them without
* memset()-ing them.
*/
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
void* alloc = ws->tableEnd;
void* end = (BYTE *)alloc + bytes;
void* top = ws->allocStart;
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
assert((bytes & (sizeof(U32)-1)) == 0);
ZSTD_cwksp_internal_advance_phase(ws, phase);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(end <= top);
if (end > top) {
DEBUGLOG(4, "cwksp: table alloc failed!");
ws->allocFailed = 1;
return NULL;
}
ws->tableEnd = end;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
__asan_unpoison_memory_region(alloc, bytes);
#endif
return alloc;
}
/**
* Aligned on sizeof(void*).
*/
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
void* alloc = ws->objectEnd;
void* end = (BYTE*)alloc + roundedBytes;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* over-reserve space */
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#endif
DEBUGLOG(5,
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
assert((bytes & (sizeof(void*)-1)) == 0);
ZSTD_cwksp_assert_internal_consistency(ws);
/* we must be in the first phase, no advance is possible */
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
DEBUGLOG(4, "cwksp: object alloc failed!");
ws->allocFailed = 1;
return NULL;
}
ws->objectEnd = end;
ws->tableEnd = end;
ws->tableValidEnd = end;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
__asan_unpoison_memory_region(alloc, bytes);
#endif
return alloc;
}
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't
* access table space that we haven't cleaned, we re-"poison" the table
* space every time we mark it dirty. */
{
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
__msan_poison(ws->objectEnd, size);
}
#endif
assert(ws->tableValidEnd >= ws->objectEnd);
assert(ws->tableValidEnd <= ws->allocStart);
ws->tableValidEnd = ws->objectEnd;
ZSTD_cwksp_assert_internal_consistency(ws);
}
MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
assert(ws->tableValidEnd >= ws->objectEnd);
assert(ws->tableValidEnd <= ws->allocStart);
if (ws->tableValidEnd < ws->tableEnd) {
ws->tableValidEnd = ws->tableEnd;
}
ZSTD_cwksp_assert_internal_consistency(ws);
}
/**
* Zero the part of the allocated tables not already marked clean.
*/
MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
assert(ws->tableValidEnd >= ws->objectEnd);
assert(ws->tableValidEnd <= ws->allocStart);
if (ws->tableValidEnd < ws->tableEnd) {
memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
}
ZSTD_cwksp_mark_tables_clean(ws);
}
/**
* Invalidates table allocations.
* All other allocations remain valid.
*/
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: clearing tables!");
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
{
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size);
}
#endif
ws->tableEnd = ws->objectEnd;
ZSTD_cwksp_assert_internal_consistency(ws);
}
/**
* Invalidates all buffer, aligned, and table allocations.
* Object allocations remain valid.
*/
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: clearing!");
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the context re-use logic is sound, and that we don't
* access stuff that this compression hasn't initialized, we re-"poison"
* the workspace (or at least the non-static, non-table parts of it)
* every time we start a new compression. */
{
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
__msan_poison(ws->tableValidEnd, size);
}
#endif
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
{
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size);
}
#endif
ws->tableEnd = ws->objectEnd;
ws->allocStart = ws->workspaceEnd;
ws->allocFailed = 0;
if (ws->phase > ZSTD_cwksp_alloc_buffers) {
ws->phase = ZSTD_cwksp_alloc_buffers;
}
ZSTD_cwksp_assert_internal_consistency(ws);
}
/**
* The provided workspace takes ownership of the buffer [start, start+size).
* Any existing values in the workspace are ignored (the previously managed
* buffer, if present, must be separately freed).
*/
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
ws->workspace = start;
ws->workspaceEnd = (BYTE*)start + size;
ws->objectEnd = ws->workspace;
ws->tableValidEnd = ws->objectEnd;
ws->phase = ZSTD_cwksp_alloc_objects;
ZSTD_cwksp_clear(ws);
ws->workspaceOversizedDuration = 0;
ZSTD_cwksp_assert_internal_consistency(ws);
}
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
void* workspace = ZSTD_malloc(size, customMem);
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
RETURN_ERROR_IF(workspace == NULL, memory_allocation);
ZSTD_cwksp_init(ws, workspace, size);
return 0;
}
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
void *ptr = ws->workspace;
DEBUGLOG(4, "cwksp: freeing workspace");
memset(ws, 0, sizeof(ZSTD_cwksp));
ZSTD_free(ptr, customMem);
}
/**
* Moves the management of a workspace from one cwksp to another. The src cwksp
* is left in an invalid state (src must be re-init()'ed before its used again).
*/
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
*dst = *src;
memset(src, 0, sizeof(ZSTD_cwksp));
}
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
}
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
return ws->allocFailed;
}
/*-*************************************
* Functions Checking Free Space
***************************************/
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
}
MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
}
MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
return ZSTD_cwksp_check_available(
ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
}
MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
&& ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
}
MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
ZSTD_cwksp* ws, size_t additionalNeededSpace) {
if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
ws->workspaceOversizedDuration++;
} else {
ws->workspaceOversizedDuration = 0;
}
}
#if defined (__cplusplus)
}
#endif
#endif /* ZSTD_CWKSP_H */

View File

@ -65,6 +65,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowestValid = ms->window.dictLimit;
const U32 maxDistance = 1U << cParams->windowLog;
/* presumes that, if there is a dictionary, it must be using Attach mode */
const U32 prefixLowestIndex = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
const BYTE* const prefixLowest = base + prefixLowestIndex;
const BYTE* const iend = istart + srcSize;
@ -147,7 +148,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
goto _match_stored;
}
@ -156,7 +157,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
&& ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
goto _match_stored;
}
@ -246,7 +247,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
_match_stored:
/* match found */
@ -277,7 +278,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
@ -296,7 +297,7 @@ size_t ZSTD_compressBlock_doubleFast_generic(
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
ip += rLength;
anchor = ip;
continue; /* faster when present ... (?) */
@ -369,9 +370,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* const ilimit = iend - 8;
const BYTE* const base = ms->window.base;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 lowestValid = ms->window.lowLimit;
const U32 lowLimit = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
const U32 dictStartIndex = lowLimit;
const U32 dictLimit = ms->window.dictLimit;
const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
@ -412,7 +411,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
} else {
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
@ -423,7 +422,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
@ -448,7 +447,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
}
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
} else {
ip += ((ip-anchor) >> kSearchStrength) + 1;
@ -480,7 +479,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;

View File

@ -8,7 +8,7 @@
* You may select, at your option, one of the above-listed licenses.
*/
#include "zstd_compress_internal.h"
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
#include "zstd_fast.h"
@ -43,8 +43,8 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
}
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_fast_generic(
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_fast_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls)
@ -71,10 +71,10 @@ size_t ZSTD_compressBlock_fast_generic(
U32 offsetSaved = 0;
/* init */
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
ip0 += (ip0 == prefixStart);
ip1 = ip0 + 1;
{
U32 const maxRep = (U32)(ip0 - prefixStart);
{ U32 const maxRep = (U32)(ip0 - prefixStart);
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
@ -117,8 +117,7 @@ size_t ZSTD_compressBlock_fast_generic(
match0 = match1;
goto _offset;
}
{
size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
{ size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
assert(step >= 2);
ip0 += step;
ip1 += step;
@ -137,7 +136,7 @@ size_t ZSTD_compressBlock_fast_generic(
_match: /* Requires: ip0, match0, offcode */
/* Count the forward length */
mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
/* match found */
ip0 += mLength;
anchor = ip0;
@ -149,16 +148,15 @@ size_t ZSTD_compressBlock_fast_generic(
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
while ( (ip0 <= ilimit)
&& ( (offset_2>0)
& (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
while ( ((ip0 <= ilimit) & (offset_2>0)) /* offset_2==0 means offset_2 is invalidated */
&& (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
/* store sequence */
size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += rLength;
ip1 = ip0 + 1;
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
anchor = ip0;
continue; /* faster when present (confirmed on gcc-8) ... (?) */
}
@ -178,8 +176,7 @@ size_t ZSTD_compressBlock_fast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32 const mls = cParams->minMatch;
U32 const mls = ms->cParams.minMatch;
assert(ms->dictMatchState == NULL);
switch(mls)
{
@ -239,6 +236,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
/* init */
DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
ip += (dictAndPrefixLength == 0);
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
@ -263,7 +261,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
} else if ( (matchIndex <= prefixStartIndex) ) {
size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
U32 const dictMatchIndex = dictHashTable[dictHash];
@ -283,7 +281,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
} /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
}
} else if (MEM_read32(match) != MEM_read32(ip)) {
/* it's not a match, and we're not going to check the dictionary */
@ -298,7 +296,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
&& (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
}
/* match found */
@ -323,7 +321,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
anchor = ip;
@ -346,8 +344,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32 const mls = cParams->minMatch;
U32 const mls = ms->cParams.minMatch;
assert(ms->dictMatchState != NULL);
switch(mls)
{
@ -379,9 +376,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 validLow = ms->window.lowLimit;
const U32 lowLimit = (endIndex - validLow > maxDistance) ? endIndex - maxDistance : validLow;
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
const U32 dictStartIndex = lowLimit;
const BYTE* const dictStart = dictBase + dictStartIndex;
const U32 dictLimit = ms->window.dictLimit;
@ -392,6 +387,8 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const BYTE* const ilimit = iend - 8;
U32 offset_1=rep[0], offset_2=rep[1];
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic");
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
if (prefixStartIndex == dictStartIndex)
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
@ -406,16 +403,17 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const U32 repIndex = current + 1 - offset_1;
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
size_t mLength;
hashTable[h] = current; /* update hash table */
assert(offset_1 <= current +1); /* check repIndex */
if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
ip += rLength;
anchor = ip;
} else {
if ( (matchIndex < dictStartIndex) ||
(MEM_read32(match) != MEM_read32(ip)) ) {
@ -423,21 +421,17 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
}
{ const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
U32 offset;
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
{ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
U32 const offset = current - matchIndex;
size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset = current - matchIndex;
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
offset_2 = offset_1; offset_1 = offset; /* update offset history */
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
ip += mLength;
anchor = ip;
} }
/* found a match : store it */
ip += mLength;
anchor = ip;
if (ip <= ilimit) {
/* Fill Table */
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
@ -446,13 +440,13 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
anchor = ip;
@ -474,8 +468,7 @@ size_t ZSTD_compressBlock_fast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32 const mls = cParams->minMatch;
U32 const mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */

View File

@ -242,9 +242,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
const BYTE* const base = ms->window.base;
U32 const current = (U32)(ip-base);
U32 const maxDistance = 1U << cParams->windowLog;
U32 const windowValid = ms->window.lowLimit;
U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
@ -497,8 +495,10 @@ size_t ZSTD_HcFindBestMatch_generic (
const BYTE* const dictEnd = dictBase + dictLimit;
const U32 current = (U32)(ip-base);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 lowValid = ms->window.lowLimit;
const U32 lowLimit = (current - lowValid > maxDistance) ? current - maxDistance : lowValid;
const U32 lowestValid = ms->window.lowLimit;
const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
const U32 isDictionary = (ms->loadedDictEnd != 0);
const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
const U32 minChain = current > chainSize ? current - chainSize : 0;
U32 nbAttempts = 1U << cParams->searchLog;
size_t ml=4-1;
@ -619,12 +619,14 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
/* *******************************
* Common parser - lazy strategy
*********************************/
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_lazy_generic(
typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_lazy_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const U32 searchMethod, const U32 depth,
const searchMethod_e searchMethod, const U32 depth,
ZSTD_dictMode_e const dictMode)
{
const BYTE* const istart = (const BYTE*)src;
@ -640,8 +642,10 @@ size_t ZSTD_compressBlock_lazy_generic(
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
(searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
(searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
(searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS
: ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
(searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS
: ZSTD_HcFindBestMatch_selectMLS);
U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
const ZSTD_matchState_t* const dms = ms->dictMatchState;
@ -806,7 +810,7 @@ size_t ZSTD_compressBlock_lazy_generic(
/* store sequence */
_storeSequence:
{ size_t const litLength = start - anchor;
ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
anchor = ip = start + matchLength;
}
@ -824,7 +828,7 @@ size_t ZSTD_compressBlock_lazy_generic(
const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
ip += matchLength;
anchor = ip;
continue;
@ -839,7 +843,7 @@ size_t ZSTD_compressBlock_lazy_generic(
/* store sequence */
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
@ -850,7 +854,7 @@ size_t ZSTD_compressBlock_lazy_generic(
rep[1] = offset_2 ? offset_2 : savedOffset;
/* Return the last literals size */
return iend - anchor;
return (size_t)(iend - anchor);
}
@ -858,56 +862,56 @@ size_t ZSTD_compressBlock_btlazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
}
size_t ZSTD_compressBlock_greedy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
}
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_greedy_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
}
@ -916,7 +920,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const U32 searchMethod, const U32 depth)
const searchMethod_e searchMethod, const U32 depth)
{
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
@ -934,7 +938,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
typedef size_t (*searchMax_f)(
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
U32 offset_1 = rep[0], offset_2 = rep[1];
@ -1047,7 +1051,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
/* store sequence */
_storeSequence:
{ size_t const litLength = start - anchor;
ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
anchor = ip = start + matchLength;
}
@ -1062,7 +1066,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
@ -1075,7 +1079,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
rep[1] = offset_2;
/* Return the last literals size */
return iend - anchor;
return (size_t)(iend - anchor);
}
@ -1083,7 +1087,7 @@ size_t ZSTD_compressBlock_greedy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
}
size_t ZSTD_compressBlock_lazy_extDict(
@ -1091,7 +1095,7 @@ size_t ZSTD_compressBlock_lazy_extDict(
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
}
size_t ZSTD_compressBlock_lazy2_extDict(
@ -1099,7 +1103,7 @@ size_t ZSTD_compressBlock_lazy2_extDict(
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
}
size_t ZSTD_compressBlock_btlazy2_extDict(
@ -1107,5 +1111,5 @@ size_t ZSTD_compressBlock_btlazy2_extDict(
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
}

View File

@ -49,9 +49,9 @@ size_t ZSTD_ldm_getTableSize(ldmParams_t params)
{
size_t const ldmHSize = ((size_t)1) << params.hashLog;
size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
size_t const ldmBucketSize =
((size_t)1) << (params.hashLog - ldmBucketSizeLog);
size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t);
size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+ ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
return params.enableLdm ? totalSize : 0;
}
@ -583,7 +583,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
rep[i] = rep[i-1];
rep[0] = sequence.offset;
/* Store the sequence */
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
sequence.offset + ZSTD_REP_MOVE,
sequence.matchLength - MINMATCH);
ip += sequence.matchLength;

View File

@ -552,7 +552,6 @@ U32 ZSTD_insertBtAndGetAllMatches (
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
U32 const maxDistance = 1U << cParams->windowLog;
const BYTE* const base = ms->window.base;
U32 const current = (U32)(ip-base);
U32 const hashLog = cParams->hashLog;
@ -569,8 +568,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
U32 const btLow = (btMask >= current) ? 0 : current - btMask;
U32 const windowValid = ms->window.lowLimit;
U32 const windowLow = ((current - windowValid) > maxDistance) ? current - maxDistance : windowValid;
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
U32 const matchLow = windowLow ? windowLow : 1;
U32* smallerPtr = bt + 2*(current&btMask);
U32* largerPtr = bt + 2*(current&btMask) + 1;
@ -674,19 +672,21 @@ U32 ZSTD_insertBtAndGetAllMatches (
while (nbCompares-- && (matchIndex >= matchLow)) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
const BYTE* match;
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(current > matchIndex);
if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
match = base + matchIndex;
if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
} else {
match = dictBase + matchIndex;
assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* prepare for match[matchLength] */
match = base + matchIndex; /* prepare for match[matchLength] read */
}
if (matchLength > bestLength) {
@ -1098,7 +1098,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
assert(anchor + llen <= iend);
ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
anchor += advance;
ip = anchor;
} }

View File

@ -668,7 +668,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
/* init */
if (job->cdict) {
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
assert(job->firstJob); /* only allowed for first job */
if (ZSTD_isError(initError)) JOB_ERROR(initError);
} else { /* srcStart points at reloaded section */
@ -680,7 +680,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
ZSTD_dtlm_fast,
NULL, /*cdict*/
jobParams, pledgedSrcSize);
&jobParams, pledgedSrcSize);
if (ZSTD_isError(initError)) JOB_ERROR(initError);
} }
@ -927,12 +927,18 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
unsigned jobID;
DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
/* Copy the mutex/cond out */
ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
mtctx->jobs[jobID].cSize = 0;
/* Clear the job description, but keep the mutex/cond */
memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
mtctx->jobs[jobID].job_mutex = mutex;
mtctx->jobs[jobID].job_cond = cond;
}
memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
mtctx->inBuff.buffer = g_nullBuffer;
mtctx->inBuff.filled = 0;
mtctx->allJobsCompleted = 1;
@ -1028,9 +1034,9 @@ size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
/* Sets parameters relevant to the compression job,
* initializing others to default values. */
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params)
{
ZSTD_CCtx_params jobParams = params;
ZSTD_CCtx_params jobParams = *params;
/* Clear parameters related to multithreading */
jobParams.forceWindow = 0;
jobParams.nbWorkers = 0;
@ -1151,16 +1157,16 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
/* ===== Multi-threaded compression ===== */
/* ------------------------------------------ */
static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
{
unsigned jobLog;
if (params.ldmParams.enableLdm) {
if (params->ldmParams.enableLdm) {
/* In Long Range Mode, the windowLog is typically oversized.
* In which case, it's preferable to determine the jobSize
* based on chainLog instead. */
jobLog = MAX(21, params.cParams.chainLog + 4);
jobLog = MAX(21, params->cParams.chainLog + 4);
} else {
jobLog = MAX(20, params.cParams.windowLog + 2);
jobLog = MAX(20, params->cParams.windowLog + 2);
}
return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
}
@ -1193,27 +1199,27 @@ static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
return ovlog;
}
static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
{
int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
assert(0 <= overlapRLog && overlapRLog <= 8);
if (params.ldmParams.enableLdm) {
if (params->ldmParams.enableLdm) {
/* In Long Range Mode, the windowLog is typically oversized.
* In which case, it's preferable to determine the jobSize
* based on chainLog instead.
* Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
- overlapRLog;
}
assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
return (ovLog==0) ? 0 : (size_t)1 << ovLog;
}
static unsigned
ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers)
{
assert(nbWorkers>0);
{ size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
@ -1236,9 +1242,9 @@ static size_t ZSTDMT_compress_advanced_internal(
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params)
{
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(&params);
size_t const overlapSize = ZSTDMT_computeOverlapSize(&params);
unsigned const nbJobs = ZSTDMT_computeNbJobs(&params, srcSize, params.nbWorkers);
size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
const char* const srcStart = (const char*)src;
@ -1256,7 +1262,7 @@ static size_t ZSTDMT_compress_advanced_internal(
ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams);
}
assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
@ -1404,12 +1410,12 @@ size_t ZSTDMT_initCStream_internal(
mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
if (mtctx->singleBlockingThread) {
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(&params);
DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
assert(singleThreadParams.nbWorkers == 0);
return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
dict, dictSize, cdict,
singleThreadParams, pledgedSrcSize);
&singleThreadParams, pledgedSrcSize);
}
DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
@ -1435,11 +1441,11 @@ size_t ZSTDMT_initCStream_internal(
mtctx->cdict = cdict;
}
mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
mtctx->targetSectionSize = params.jobSize;
if (mtctx->targetSectionSize == 0) {
mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
}
assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);

View File

@ -61,7 +61,9 @@
* Error Management
****************************************************************/
#define HUF_isError ERR_isError
#ifndef CHECK_F
#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
#endif
/* **************************************************************

View File

@ -88,10 +88,7 @@ size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
static size_t ZSTD_startingInputLength(ZSTD_format_e format)
{
size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
ZSTD_FRAMEHEADERSIZE_PREFIX - ZSTD_FRAMEIDSIZE :
ZSTD_FRAMEHEADERSIZE_PREFIX;
ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
/* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
return startingInputLength;
@ -376,7 +373,7 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
{
unsigned long long totalDstSize = 0;
while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
U32 const magicNumber = MEM_readLE32(src);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
@ -574,9 +571,10 @@ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
}
/** ZSTD_insertBlock() :
insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
* insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
{
DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
ZSTD_checkContinuity(dctx, blockStart);
dctx->previousDstEnd = (const char*)blockStart + blockSize;
return blockSize;
@ -628,11 +626,12 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
/* check */
RETURN_ERROR_IF(
remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
srcSize_wrong);
/* Frame Header */
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
srcSize_wrong);
@ -713,7 +712,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
dictSize = ZSTD_DDict_dictSize(ddict);
}
while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if (ZSTD_isLegacy(src, srcSize)) {
@ -1097,7 +1096,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
for (i=0; i<3; i++) {
U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
dictionary_corrupted);
entropy->rep[i] = rep;
} }
@ -1266,7 +1265,7 @@ size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
ZSTD_clearDict(dctx);
if (dict && dictSize >= 8) {
if (dict && dictSize != 0) {
dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
dctx->ddict = dctx->ddictLocal;
@ -1299,14 +1298,14 @@ size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSiz
/* ZSTD_initDStream_usingDict() :
* return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
* return : expected size, aka ZSTD_startingInputLength().
* this function cannot fail */
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
{
DEBUGLOG(4, "ZSTD_initDStream_usingDict");
FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
return ZSTD_FRAMEHEADERSIZE_PREFIX;
return ZSTD_startingInputLength(zds->format);
}
/* note : this variant can't fail */
@ -1323,16 +1322,16 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
return ZSTD_FRAMEHEADERSIZE_PREFIX;
return ZSTD_startingInputLength(dctx->format);
}
/* ZSTD_resetDStream() :
* return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
* return : expected size, aka ZSTD_startingInputLength().
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
return ZSTD_FRAMEHEADERSIZE_PREFIX;
return ZSTD_startingInputLength(dctx->format);
}
@ -1563,7 +1562,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->lhSize += remainingInput;
}
input->pos = input->size;
return (MAX(ZSTD_FRAMEHEADERSIZE_MIN, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
assert(ip != NULL);
memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;

View File

@ -79,6 +79,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
{ const BYTE* const istart = (const BYTE*) src;
@ -87,6 +88,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
switch(litEncType)
{
case set_repeat:
DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
/* fall-through */
@ -116,7 +118,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
/* 2 - 2 - 18 - 18 */
lhSize = 5;
litSize = (lhc >> 4) & 0x3FFFF;
litCSize = (lhc >> 22) + (istart[4] << 10);
litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
break;
}
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
@ -391,7 +393,8 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
symbolNext[s] = normalizedCounter[s];
assert(normalizedCounter[s]>=0);
symbolNext[s] = (U16)normalizedCounter[s];
} } }
memcpy(dt, &DTableH, sizeof(DTableH));
}
@ -570,38 +573,118 @@ typedef struct {
size_t pos;
} seqState_t;
/*! ZSTD_overlapCopy8() :
* Copies 8 bytes from ip to op and updates op and ip where ip <= op.
* If the offset is < 8 then the offset is spread to at least 8 bytes.
*
* Precondition: *ip <= *op
* Postcondition: *op - *op >= 8
*/
static void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
assert(*ip <= *op);
if (offset < 8) {
/* close range match, overlap */
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
int const sub2 = dec64table[offset];
(*op)[0] = (*ip)[0];
(*op)[1] = (*ip)[1];
(*op)[2] = (*ip)[2];
(*op)[3] = (*ip)[3];
*ip += dec32table[offset];
ZSTD_copy4(*op+4, *ip);
*ip -= sub2;
} else {
ZSTD_copy8(*op, *ip);
}
*ip += 8;
*op += 8;
assert(*op - *ip >= 8);
}
/* ZSTD_execSequenceLast7():
* exceptional case : decompress a match starting within last 7 bytes of output buffer.
* requires more careful checks, to ensure there is no overflow.
* performance does not matter though.
* note : this case is supposed to be never generated "naturally" by reference encoder,
* since in most cases it needs at least 8 bytes to look for a match.
* but it's allowed by the specification. */
/*! ZSTD_safecopy() :
* Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
* and write up to 16 bytes past oend_w (op >= oend_w is allowed).
* This function is only called in the uncommon case where the sequence is near the end of the block. It
* should be fast for a single long sequence, but can be slow for several short sequences.
*
* @param ovtype controls the overlap detection
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
* - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
* The src buffer must be before the dst buffer.
*/
static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
(ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
if (length < 8) {
/* Handle short lengths. */
while (op < oend) *op++ = *ip++;
return;
}
if (ovtype == ZSTD_overlap_src_before_dst) {
/* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
assert(length >= 8);
ZSTD_overlapCopy8(&op, &ip, diff);
assert(op - ip >= 8);
assert(op <= oend);
}
if (oend <= oend_w) {
/* No risk of overwrite. */
ZSTD_wildcopy(op, ip, length, ovtype);
return;
}
if (op <= oend_w) {
/* Wildcopy until we get close to the end. */
assert(oend > oend_w);
ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
ip += oend_w - op;
op = oend_w;
}
/* Handle the leftovers. */
while (op < oend) *op++ = *ip++;
}
/* ZSTD_execSequenceEnd():
* This version handles cases that are near the end of the output buffer. It requires
* more careful checks to make sure there is no overflow. By separating out these hard
* and unlikely cases, we can speed up the common cases.
*
* NOTE: This function needs to be fast for a single long sequence, but doesn't need
* to be optimized for many small sequences, since those fall into ZSTD_execSequence().
*/
FORCE_NOINLINE
size_t ZSTD_execSequenceLast7(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
size_t ZSTD_execSequenceEnd(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
/* check */
RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
/* bounds checks */
assert(oLitEnd < oMatchEnd);
RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must fit within dstBuffer");
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
/* copy literals */
while (op < oLitEnd) *op++ = *(*litPtr)++;
ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
op = oLitEnd;
*litPtr = iLitEnd;
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - base)) {
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
match = dictEnd - (base-match);
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
match = dictEnd - (prefixStart-match);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
@ -611,13 +694,12 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = base;
match = prefixStart;
} }
while (op < oMatchEnd) *op++ = *match++;
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
HINT_INLINE
size_t ZSTD_execSequence(BYTE* op,
BYTE* const oend, seq_t sequence,
@ -631,20 +713,29 @@ size_t ZSTD_execSequence(BYTE* op,
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
/* check */
RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* Errors and uncommon cases handled here. */
assert(oLitEnd < oMatchEnd);
if (iLitEnd > litLimit || oMatchEnd > oend_w)
return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* copy Literals */
if (sequence.litLength > 8)
ZSTD_wildcopy_16min(op, (*litPtr), sequence.litLength, ZSTD_no_overlap); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
else
ZSTD_copy8(op, *litPtr);
/* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
assert(iLitEnd <= litLimit /* Literal length is in bounds */);
assert(oLitEnd <= oend_w /* Can wildcopy literals */);
assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
/* Copy Literals:
* Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
* We likely don't need the full 32-byte wildcopy.
*/
assert(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(op, (*litPtr));
if (sequence.litLength > 16) {
ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
}
op = oLitEnd;
*litPtr = iLitEnd; /* update for next sequence */
/* copy Match */
/* Copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix -> go into extDict */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
@ -659,123 +750,33 @@ size_t ZSTD_execSequence(BYTE* op,
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
if (op > oend_w || sequence.matchLength < MINMATCH) {
U32 i;
for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
return sequenceLength;
}
} }
/* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
/* Match within prefix of 1 or more bytes */
assert(op <= oMatchEnd);
assert(oMatchEnd <= oend_w);
assert(match >= prefixStart);
assert(sequence.matchLength >= 1);
/* match within prefix */
if (sequence.offset < 8) {
/* close range match, overlap */
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
int const sub2 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTD_copy4(op+4, match);
match -= sub2;
} else {
ZSTD_copy8(op, match);
/* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
* without overlap checking.
*/
if (sequence.offset >= WILDCOPY_VECLEN) {
/* We bet on a full wildcopy for matches, since we expect matches to be
* longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes.
*/
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
return sequenceLength;
}
op += 8; match += 8;
assert(sequence.offset < WILDCOPY_VECLEN);
if (oMatchEnd > oend-(16-MINMATCH)) {
if (op < oend_w) {
ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
match += oend_w - op;
op = oend_w;
}
while (op < oMatchEnd) *op++ = *match++;
} else {
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); /* works even if matchLength < 8 */
}
return sequenceLength;
}
/* Copy 8 bytes and spread the offset to be >= 8. */
ZSTD_overlapCopy8(&op, &match, sequence.offset);
HINT_INLINE
size_t ZSTD_execSequenceLong(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = sequence.match;
/* check */
RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
/* copy Literals */
if (sequence.litLength > 8)
ZSTD_wildcopy_16min(op, *litPtr, sequence.litLength, ZSTD_no_overlap); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
else
ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */
op = oLitEnd;
*litPtr = iLitEnd; /* update for next sequence */
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
if (op > oend_w || sequence.matchLength < MINMATCH) {
U32 i;
for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
return sequenceLength;
}
} }
assert(op <= oend_w);
assert(sequence.matchLength >= MINMATCH);
/* match within prefix */
if (sequence.offset < 8) {
/* close range match, overlap */
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
int const sub2 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTD_copy4(op+4, match);
match -= sub2;
} else {
ZSTD_copy8(op, match);
}
op += 8; match += 8;
if (oMatchEnd > oend-(16-MINMATCH)) {
if (op < oend_w) {
ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
match += oend_w - op;
op = oend_w;
}
while (op < oMatchEnd) *op++ = *match++;
} else {
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); /* works even if matchLength < 8 */
/* If the match length is > 8 bytes, then continue with the wildcopy. */
if (sequence.matchLength > 8) {
assert(op < oMatchEnd);
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
}
return sequenceLength;
}
@ -1095,7 +1096,7 @@ ZSTD_decompressSequencesLong_body(
/* decode and decompress */
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
sequences[seqNb & STORED_SEQS_MASK] = sequence;
@ -1106,7 +1107,7 @@ ZSTD_decompressSequencesLong_body(
/* finish queue */
seqNb -= seqAdvance;
for ( ; seqNb<nbSeq ; seqNb++) {
size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}

View File

@ -36,16 +36,17 @@ extern "C" {
*****************************************************************/
/* Deprecation warnings */
/* Should these warnings be a problem,
it is generally possible to disable them,
typically with -Wno-deprecated-declarations for gcc
or _CRT_SECURE_NO_WARNINGS in Visual.
Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */
* it is generally possible to disable them,
* typically with -Wno-deprecated-declarations for gcc
* or _CRT_SECURE_NO_WARNINGS in Visual.
* Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS
*/
#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */
#else
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
# define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
# elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__)
# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
# elif defined(__GNUC__) && (__GNUC__ >= 3)
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))

View File

@ -638,8 +638,8 @@ void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLeve
"compared to the source size %u! "
"size(source)/size(dictionary) = %f, but it should be >= "
"10! This may lead to a subpar dictionary! We recommend "
"training on sources at least 10x, and up to 100x the "
"size of the dictionary!\n", (U32)maxDictSize,
"training on sources at least 10x, and preferably 100x "
"the size of the dictionary! \n", (U32)maxDictSize,
(U32)nbDmers, ratio);
}
@ -919,13 +919,12 @@ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
}
}
/* Save the dictionary, parameters, and size */
if (!dict) {
return;
if (dict) {
memcpy(best->dict, dict, dictSize);
best->dictSize = dictSize;
best->parameters = parameters;
best->compressedSize = compressedSize;
}
memcpy(best->dict, dict, dictSize);
best->dictSize = dictSize;
best->parameters = parameters;
best->compressedSize = compressedSize;
}
if (liveJobs == 0) {
ZSTD_pthread_cond_broadcast(&best->cond);

View File

@ -571,7 +571,7 @@ static void ZDICT_fillNoise(void* buffer, size_t length)
unsigned const prime1 = 2654435761U;
unsigned const prime2 = 2246822519U;
unsigned acc = prime1;
size_t p=0;;
size_t p=0;
for (p=0; p<length; p++) {
acc *= prime2;
((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);

View File

@ -346,7 +346,7 @@ FORCE_INLINE unsigned FSE_highbit32 (U32 val)
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */
return 31 - __builtin_clz (val);
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;

View File

@ -353,7 +353,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return 31 - __builtin_clz (val);
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
@ -2889,6 +2889,7 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;

View File

@ -356,7 +356,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return 31 - __builtin_clz (val);
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
@ -2530,6 +2530,7 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;

View File

@ -627,7 +627,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return 31 - __builtin_clz (val);
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
@ -2655,6 +2655,7 @@ static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;
@ -3034,9 +3035,12 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
{
/* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
size_t litCSize;
if (srcSize > BLOCKSIZE) return ERROR(corruption_detected);
/* Decode literals sub-block */
size_t litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;

View File

@ -756,7 +756,7 @@ MEM_STATIC unsigned BITv05_highbit32 (U32 val)
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return 31 - __builtin_clz (val);
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;

View File

@ -860,7 +860,7 @@ MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return 31 - __builtin_clz (val);
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;

View File

@ -530,7 +530,7 @@ MEM_STATIC unsigned BITv07_highbit32 (U32 val)
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return 31 - __builtin_clz (val);
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;

View File

@ -3,8 +3,9 @@
# BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
prefix=@PREFIX@
libdir=@LIBDIR@
includedir=@INCLUDEDIR@
exec_prefix=${prefix}
includedir=${prefix}/include
libdir=${exec_prefix}/lib
Name: zstd
Description: fast lossless compression algorithm library

View File

@ -15,6 +15,7 @@ extern "C" {
#define ZSTD_H_235446
/* ====== Dependency ======*/
#include <limits.h> /* INT_MAX */
#include <stddef.h> /* size_t */
@ -71,7 +72,7 @@ extern "C" {
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 4
#define ZSTD_VERSION_RELEASE 2
#define ZSTD_VERSION_RELEASE 4
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
@ -196,9 +197,13 @@ ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
/*! ZSTD_compressCCtx() :
* Same as ZSTD_compress(), using an explicit ZSTD_CCtx
* The function will compress at requested compression level,
* ignoring any other parameter */
* Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
* Important : in order to behave similarly to `ZSTD_compress()`,
* this function compresses at requested compression level,
* __ignoring any other parameter__ .
* If any advanced parameter was set using the advanced API,
* they will all be reset. Only `compressionLevel` remains.
*/
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@ -233,7 +238,7 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
* using ZSTD_CCtx_set*() functions.
* Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
* "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
* They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()
* __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
*
* It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
*
@ -261,18 +266,26 @@ typedef enum {
/* compression parameters
* Note: When compressing with a ZSTD_CDict these parameters are superseded
* by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
* for more info (superseded-by-cdict). */
ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
* by the parameters used to construct the ZSTD_CDict.
* See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
* Note that exact compression parameters are dynamically determined,
* depending on both compression level and srcSize (when known).
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
* Note 1 : it's possible to pass a negative compression level.
* Note 2 : setting a level sets all default values of other compression parameters */
* Note 2 : setting a level resets all other compression parameters to default */
/* Advanced compression parameters :
* It's possible to pin down compression parameters to some specific values.
* In which case, these values are no longer dynamically selected by the compressor */
ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2.
* This will set a memory budget for streaming decompression,
* with larger values requiring more memory
* and typically compressing more.
* Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
* Special: value 0 means "use default windowLog".
* Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
* requires explicitly allowing such window size at decompression stage if using streaming. */
* requires explicitly allowing such size at streaming decompression stage. */
ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2.
* Resulting memory usage is (1 << (hashLog+2)).
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
@ -283,13 +296,13 @@ typedef enum {
* Resulting memory usage is (1 << (chainLog+2)).
* Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
* Larger tables result in better and slower compression.
* This parameter is useless when using "fast" strategy.
* This parameter is useless for "fast" strategy.
* It's still useful when using "dfast" strategy,
* in which case it defines a secondary probe table.
* Special: value 0 means "use default chainLog". */
ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2.
* More attempts result in better and slower compression.
* This parameter is useless when using "fast" and "dFast" strategies.
* This parameter is useless for "fast" and "dFast" strategies.
* Special: value 0 means "use default searchLog". */
ZSTD_c_minMatch=105, /* Minimum size of searched matches.
* Note that Zstandard can still find matches of smaller size,
@ -344,7 +357,7 @@ typedef enum {
ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
* Content size must be known at the beginning of compression.
* This is automatically the case when using ZSTD_compress2(),
* For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
* For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */
@ -363,7 +376,7 @@ typedef enum {
* Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
* 0 means default, which is dynamically determined based on compression parameters.
* Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
* The minimum size is automatically and transparently enforced */
* The minimum size is automatically and transparently enforced. */
ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
* The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
* It helps preserve compression ratio, while each job is compressed in parallel.
@ -386,6 +399,7 @@ typedef enum {
* ZSTD_c_forceAttachDict
* ZSTD_c_literalCompressionMode
* ZSTD_c_targetCBlockSize
* ZSTD_c_srcSizeHint
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly;
* also, the enums values themselves are unstable and can still change.
@ -396,6 +410,7 @@ typedef enum {
ZSTD_c_experimentalParam4=1001,
ZSTD_c_experimentalParam5=1002,
ZSTD_c_experimentalParam6=1003,
ZSTD_c_experimentalParam7=1004
} ZSTD_cParameter;
typedef struct {
@ -793,12 +808,17 @@ ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
typedef struct ZSTD_CDict_s ZSTD_CDict;
/*! ZSTD_createCDict() :
* When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
* ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
* When compressing multiple messages or blocks using the same dictionary,
* it's recommended to digest the dictionary only once, since it's a costly operation.
* ZSTD_createCDict() will create a state from digesting a dictionary.
* The resulting state can be used for future compression operations with very limited startup cost.
* ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
* `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
* Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
* Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
* @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
* Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
* Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
* in which case the only thing that it transports is the @compressionLevel.
* This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
* expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
int compressionLevel);
@ -925,7 +945,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
* Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
* It's a CPU consuming operation, with non-negligible impact on latency.
* If there is a need to use the same prefix multiple times, consider loadDictionary instead.
* Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
* Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
* Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
const void* prefix, size_t prefixSize);
@ -969,7 +989,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
* Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
* Prefix buffer must remain unmodified up to the end of frame,
* reached when ZSTD_decompressStream() returns 0.
* Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
* Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
* Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
* Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
* A full dictionary is more costly, as it requires building tables.
@ -1014,8 +1034,8 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
* Some of them might be removed in the future (especially when redundant with existing stable functions)
* ***************************************************************************************/
#define ZSTD_FRAMEHEADERSIZE_PREFIX 5 /* minimum input size required to query frame header size */
#define ZSTD_FRAMEHEADERSIZE_MIN 6
#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2)
#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */
#define ZSTD_SKIPPABLEHEADERSIZE 8
@ -1063,6 +1083,8 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
/* Advanced parameter bounds */
#define ZSTD_TARGETCBLOCKSIZE_MIN 64
#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
#define ZSTD_SRCSIZEHINT_MIN 0
#define ZSTD_SRCSIZEHINT_MAX INT_MAX
/* internal */
#define ZSTD_HASHLOG3_MAX 17
@ -1072,6 +1094,24 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
typedef struct {
unsigned int matchPos; /* Match pos in dst */
/* If seqDef.offset > 3, then this is seqDef.offset - 3
* If seqDef.offset < 3, then this is the corresponding repeat offset
* But if seqDef.offset < 3 and litLength == 0, this is the
* repeat offset before the corresponding repeat offset
* And if seqDef.offset == 3 and litLength == 0, this is the
* most recent repeat offset - 1
*/
unsigned int offset;
unsigned int litLength; /* Literal length */
unsigned int matchLength; /* Match length */
/* 0 when seq not rep and seqDef.offset otherwise
* when litLength == 0 this will be <= 4, otherwise <= 3 like normal
*/
unsigned int rep;
} ZSTD_Sequence;
typedef struct {
unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */
unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
@ -1101,21 +1141,12 @@ typedef enum {
typedef enum {
ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
ZSTD_dlm_byRef = 1, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
} ZSTD_dictLoadMethod_e;
typedef enum {
/* Opened question : should we have a format ZSTD_f_auto ?
* Today, it would mean exactly the same as ZSTD_f_zstd1.
* But, in the future, should several formats become supported,
* on the compression side, it would mean "default format".
* On the decompression side, it would mean "automatic format detection",
* so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
* Since meaning is a little different, another option could be to define different enums for compression and decompression.
* This question could be kept for later, when there are actually multiple formats to support,
* but there is also the question of pinning enum values, and pinning value `0` is especially important */
ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
ZSTD_f_zstd1_magicless = 1, /* Variant of zstd frame format, without initial 4-bytes magic number.
ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
* Useful to save 4 bytes per generated frame.
* Decoder cannot recognise automatically this format, requiring this instruction. */
} ZSTD_format_e;
@ -1126,7 +1157,7 @@ typedef enum {
* to evolve and should be considered only in the context of extremely
* advanced performance tuning.
*
* Zstd currently supports the use of a CDict in two ways:
* Zstd currently supports the use of a CDict in three ways:
*
* - The contents of the CDict can be copied into the working context. This
* means that the compression can search both the dictionary and input
@ -1142,6 +1173,12 @@ typedef enum {
* working context's tables can be reused). For small inputs, this can be
* faster than copying the CDict's tables.
*
* - The CDict's tables are not used at all, and instead we use the working
* context alone to reload the dictionary and use params based on the source
* size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
* This method is effective when the dictionary sizes are very small relative
* to the input size, and the input size is fairly large to begin with.
*
* Zstd has a simple internal heuristic that selects which strategy to use
* at the beginning of a compression. However, if experimentation shows that
* Zstd is making poor choices, it is possible to override that choice with
@ -1150,6 +1187,7 @@ typedef enum {
ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */
ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
} ZSTD_dictAttachPref_e;
typedef enum {
@ -1158,7 +1196,7 @@ typedef enum {
* levels will be compressed. */
ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be
* emitted if Huffman compression is not profitable. */
ZSTD_lcm_uncompressed = 2, /**< Always emit uncompressed literals. */
ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */
} ZSTD_literalCompressionMode_e;
@ -1210,20 +1248,38 @@ ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcS
* or an error code (if srcSize is too small) */
ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
/*! ZSTD_getSequences() :
* Extract sequences from the sequence store
* zc can be used to insert custom compression params.
* This function invokes ZSTD_compress2
* @return : number of sequences extracted
*/
ZSTDLIB_API size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize);
/***************************************
* Memory management
***************************************/
/*! ZSTD_estimate*() :
* These functions make it possible to estimate memory usage
* of a future {D,C}Ctx, before its creation.
* ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
* It will also consider src size to be arbitrarily "large", which is worst case.
* If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
* ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
* ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
* Note : CCtx size estimation is only correct for single-threaded compression. */
* These functions make it possible to estimate memory usage of a future
* {D,C}Ctx, before its creation.
*
* ZSTD_estimateCCtxSize() will provide a budget large enough for any
* compression level up to selected one. Unlike ZSTD_estimateCStreamSize*(),
* this estimate does not include space for a window buffer, so this estimate
* is guaranteed to be enough for single-shot compressions, but not streaming
* compressions. It will however assume the input may be arbitrarily large,
* which is the worst case. If srcSize is known to always be small,
* ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
* ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with
* ZSTD_getCParams() to create cParams from compressionLevel.
* ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with
* ZSTD_CCtxParams_setParameter().
*
* Note: only single-threaded compression is supported. This function will
* return an error code if ZSTD_c_nbWorkers is >= 1. */
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
@ -1334,7 +1390,8 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictS
* Create a digested dictionary for compression
* Dictionary content is just referenced, not duplicated.
* As a consequence, `dictBuffer` **must** outlive CDict,
* and its content must remain unmodified throughout the lifetime of CDict. */
* and its content must remain unmodified throughout the lifetime of CDict.
* note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
/*! ZSTD_getCParams() :
@ -1361,7 +1418,9 @@ ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
/*! ZSTD_compress_advanced() :
* Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure) */
* Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
* This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@ -1369,7 +1428,9 @@ ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
ZSTD_parameters params);
/*! ZSTD_compress_usingCDict_advanced() :
* Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
* Note : this function is now REDUNDANT.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
* This prototype will be marked as deprecated and generate compilation warning in some future version */
ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@ -1441,6 +1502,12 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
* There is no guarantee on compressed block size (default:0) */
#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
/* User's best guess of source size.
* Hint is not valid when srcSizeHint == 0.
* There is no guarantee that hint is close to actual source size,
* but compression ratio may regress significantly if guess considerably underestimates */
#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
/*! ZSTD_CCtx_getParameter() :
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
* and store it into int* value.
@ -1613,8 +1680,13 @@ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
* pledgedSrcSize must be correct. If it is not known at init time, use
* ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
* "0" also disables frame content size field. It may be enabled in the future.
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
ZSTDLIB_API size_t
ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
int compressionLevel,
unsigned long long pledgedSrcSize);
/**! ZSTD_initCStream_usingDict() :
* This function is deprecated, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
@ -1623,42 +1695,66 @@ ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLe
*
* Creates of an internal CDict (incompatible with static CCtx), except if
* dict == NULL or dictSize < 8, in which case no dict is used.
* Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
* Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
* it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
ZSTDLIB_API size_t
ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
int compressionLevel);
/**! ZSTD_initCStream_advanced() :
* This function is deprecated, and is approximately equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
* // Pseudocode: Set each zstd parameter and leave the rest as-is.
* for ((param, value) : params) {
* ZSTD_CCtx_setParameter(zcs, param, value);
* }
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
*
* pledgedSrcSize must be correct. If srcSize is not known at init time, use
* value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
* dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
* pledgedSrcSize must be correct.
* If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize);
ZSTDLIB_API size_t
ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params,
unsigned long long pledgedSrcSize);
/**! ZSTD_initCStream_usingCDict() :
* This function is deprecated, and equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
* note : cdict will just be referenced, and must outlive compression session
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
/**! ZSTD_initCStream_usingCDict_advanced() :
* This function is deprecated, and is approximately equivalent to:
* This function is DEPRECATED, and is approximately equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
* // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
* for ((fParam, value) : fParams) {
* ZSTD_CCtx_setParameter(zcs, fParam, value);
* }
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
* same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
* pledgedSrcSize must be correct. If srcSize is not known at init time, use
* value ZSTD_CONTENTSIZE_UNKNOWN.
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
ZSTDLIB_API size_t
ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams,
unsigned long long pledgedSrcSize);
/*! ZSTD_resetCStream() :
* This function is deprecated, and is equivalent to:
@ -1673,6 +1769,7 @@ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const
* For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
* but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
* @return : 0, or an error code (which can be tested using ZSTD_isError())
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
@ -1718,8 +1815,10 @@ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
* ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
*
* note: no dictionary will be used if dict == NULL or dictSize < 8
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
/**
* This function is deprecated, and is equivalent to:
*
@ -1727,14 +1826,17 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dic
* ZSTD_DCtx_refDDict(zds, ddict);
*
* note : ddict is referenced, it must outlive decompression session
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
/**
* This function is deprecated, and is equivalent to:
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
*
* re-use decompression parameters from previous init; saves dictionary loading
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
@ -1908,8 +2010,8 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
/*!
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
User will have to take in charge required information to regenerate data, such as compressed and content sizes.
Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
A few rules to respect :
- Compressing and decompressing require a context structure
@ -1920,12 +2022,14 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+ copyCCtx() and copyDCtx() can be used too
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
- When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
In which case, nothing is produced into `dst` !
+ User must test for such outcome and deal directly with uncompressed data
+ ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
===> In which case, nothing is produced into `dst` !
+ User __must__ test for such outcome and deal directly with uncompressed data
+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
Doing so would mess up with statistics history, leading to potential data corruption.
+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ In case of multiple successive blocks, should some of them be uncompressed,
decoder must be informed of their existence in order to follow proper history.
Use ZSTD_insertBlock() for such a case.

View File

@ -1,37 +0,0 @@
# local binary (Makefile)
zstd
zstd32
zstd4
zstd-compress
zstd-decompress
zstd-frugal
zstd-small
zstd-nolegacy
# Object files
*.o
*.ko
default.profraw
have_zlib
# Executables
*.exe
*.out
*.app
# Default result files
dictionary
grillResults.txt
_*
tmp*
*.zst
result
out
# fuzzer
afl
# Misc files
*.bat
!windres/generate_res.bat
dirTest*

View File

@ -173,10 +173,13 @@ Benchmark arguments :
```
#### Restricted usage of Environment Variables
Using environment variables to set compression/decompression parameters has security implications. Therefore,
we intentionally restrict its usage. Currently, only `ZSTD_CLEVEL` is supported for setting compression level.
Using environment variables to set parameters has security implications.
Therefore, this avenue is intentionally restricted.
Only `ZSTD_CLEVEL` is supported currently, for setting compression level.
`ZSTD_CLEVEL` can be used to set the level between 1 and 19 (the "normal" range).
If the value of `ZSTD_CLEVEL` is not a valid integer, it will be ignored with a warning message.
Note that command line options will override corresponding environment variable settings.
`ZSTD_CLEVEL` just replaces the default compression level (`3`).
It can be overridden by corresponding command line arguments.
#### Long distance matching mode
The long distance matching mode, enabled with `--long`, is designed to improve

View File

@ -88,7 +88,7 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER;
#endif
#define DEBUGOUTPUT(...) { if (DEBUG) DISPLAY(__VA_ARGS__); }
#define EXM_THROW_INT(errorNum, ...) { \
#define RETURN_ERROR_INT(errorNum, ...) { \
DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \
DISPLAYLEVEL(1, "Error %i : ", errorNum); \
DISPLAYLEVEL(1, __VA_ARGS__); \
@ -401,9 +401,9 @@ BMK_benchMemAdvancedNoAlloc(
BMK_initCCtxArgs cctxprep;
BMK_initDCtxArgs dctxprep;
cbp.benchFn = local_defaultCompress;
cbp.benchFn = local_defaultCompress; /* ZSTD_compress2 */
cbp.benchPayload = cctx;
cbp.initFn = local_initCCtx;
cbp.initFn = local_initCCtx; /* BMK_initCCtx */
cbp.initPayload = &cctxprep;
cbp.errorFn = ZSTD_isError;
cbp.blockCount = nbBlocks;
@ -534,8 +534,8 @@ BMK_benchMemAdvancedNoAlloc(
if (u==srcSize-1) { /* should never happen */
DISPLAY("no difference detected\n");
}
}
}
} /* for (u=0; u<srcSize; u++) */
} /* if ((adv->mode == BMK_both) && (crcOrig!=crcCheck)) */
} /* CRC Checking */
if (displayLevel == 1) { /* hidden display mode -q, used by python speed benchmark */
@ -754,8 +754,7 @@ static int BMK_loadFiles(void* buffer, size_t bufferSize,
size_t pos = 0, totalSize = 0;
unsigned n;
for (n=0; n<nbFiles; n++) {
FILE* f;
U64 fileSize = UTIL_getFileSize(fileNamesTable[n]);
U64 fileSize = UTIL_getFileSize(fileNamesTable[n]); /* last file may be shortened */
if (UTIL_isDirectory(fileNamesTable[n])) {
DISPLAYLEVEL(2, "Ignoring %s directory... \n", fileNamesTable[n]);
fileSizes[n] = 0;
@ -766,20 +765,20 @@ static int BMK_loadFiles(void* buffer, size_t bufferSize,
fileSizes[n] = 0;
continue;
}
f = fopen(fileNamesTable[n], "rb");
if (f==NULL) EXM_THROW_INT(10, "impossible to open file %s", fileNamesTable[n]);
DISPLAYUPDATE(2, "Loading %s... \r", fileNamesTable[n]);
if (fileSize > bufferSize-pos) fileSize = bufferSize-pos, nbFiles=n; /* buffer too small - stop after this file */
{ size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f);
if (readSize != (size_t)fileSize) EXM_THROW_INT(11, "could not read %s", fileNamesTable[n]);
pos += readSize;
}
fileSizes[n] = (size_t)fileSize;
totalSize += (size_t)fileSize;
fclose(f);
}
{ FILE* const f = fopen(fileNamesTable[n], "rb");
if (f==NULL) RETURN_ERROR_INT(10, "impossible to open file %s", fileNamesTable[n]);
DISPLAYUPDATE(2, "Loading %s... \r", fileNamesTable[n]);
if (fileSize > bufferSize-pos) fileSize = bufferSize-pos, nbFiles=n; /* buffer too small - stop after this file */
{ size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f);
if (readSize != (size_t)fileSize) RETURN_ERROR_INT(11, "could not read %s", fileNamesTable[n]);
pos += readSize;
}
fileSizes[n] = (size_t)fileSize;
totalSize += (size_t)fileSize;
fclose(f);
} }
if (totalSize == 0) EXM_THROW_INT(12, "no data to bench");
if (totalSize == 0) RETURN_ERROR_INT(12, "no data to bench");
return 0;
}

View File

@ -205,7 +205,6 @@ BMK_benchOutcome_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
#endif /* BENCH_ZSTD_H_3242387 */
#if defined (__cplusplus)

View File

@ -55,17 +55,18 @@ static U32 RDG_rand(U32* src)
return rand32 >> 5;
}
typedef U32 fixedPoint_24_8;
static void RDG_fillLiteralDistrib(BYTE* ldt, double ld)
static void RDG_fillLiteralDistrib(BYTE* ldt, fixedPoint_24_8 ld)
{
BYTE const firstChar = (ld<=0.0) ? 0 : '(';
BYTE const lastChar = (ld<=0.0) ? 255 : '}';
BYTE character = (ld<=0.0) ? 0 : '0';
U32 u;
if (ld<=0.0) ld = 0.0;
if (ld<=0) ld = 0;
for (u=0; u<LTSIZE; ) {
U32 const weight = (U32)((double)(LTSIZE - u) * ld) + 1;
U32 const weight = (((LTSIZE - u) * ld) >> 8) + 1;
U32 const end = MIN ( u + weight , LTSIZE);
while (u < end) ldt[u++] = character;
character++;
@ -92,7 +93,8 @@ static U32 RDG_randLength(U32* seedPtr)
return (RDG_rand(seedPtr) & 0x1FF) + 0xF;
}
static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, double matchProba, const BYTE* ldt, U32* seedPtr)
static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize,
double matchProba, const BYTE* ldt, U32* seedPtr)
{
BYTE* const buffPtr = (BYTE*)buffer;
U32 const matchProba32 = (U32)(32768 * matchProba);
@ -128,13 +130,13 @@ static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, doubl
U32 const randOffset = RDG_rand15Bits(seedPtr) + 1;
U32 const offset = repeatOffset ? prevOffset : (U32) MIN(randOffset , pos);
size_t match = pos - offset;
while (pos < d) buffPtr[pos++] = buffPtr[match++]; /* correctly manages overlaps */
while (pos < d) { buffPtr[pos++] = buffPtr[match++]; /* correctly manages overlaps */ }
prevOffset = offset;
} else {
/* Literal (noise) */
U32 const length = RDG_randLength(seedPtr);
U32 const d = (U32) MIN(pos + length, buffSize);
while (pos < d) buffPtr[pos++] = RDG_genChar(seedPtr, ldt);
while (pos < d) { buffPtr[pos++] = RDG_genChar(seedPtr, ldt); }
} }
}
@ -145,7 +147,7 @@ void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba
BYTE ldt[LTSIZE];
memset(ldt, '0', sizeof(ldt)); /* yes, character '0', this is intentional */
if (litProba<=0.0) litProba = matchProba / 4.5;
RDG_fillLiteralDistrib(ldt, litProba);
RDG_fillLiteralDistrib(ldt, (fixedPoint_24_8)(litProba * 256 + 0.001));
RDG_genBlock(buffer, size, 0, matchProba, ldt, &seed32);
}
@ -163,7 +165,7 @@ void RDG_genStdout(unsigned long long size, double matchProba, double litProba,
if (buff==NULL) { perror("datagen"); exit(1); }
if (litProba<=0.0) litProba = matchProba / 4.5;
memset(ldt, '0', sizeof(ldt)); /* yes, character '0', this is intentional */
RDG_fillLiteralDistrib(ldt, litProba);
RDG_fillLiteralDistrib(ldt, (fixedPoint_24_8)(litProba * 256 + 0.001));
SET_BINARY_MODE(stdout);
/* Generate initial dict */

View File

@ -201,7 +201,7 @@ static void DiB_fillNoise(void* buffer, size_t length)
unsigned const prime1 = 2654435761U;
unsigned const prime2 = 2246822519U;
unsigned acc = prime1;
size_t p=0;;
size_t p=0;
for (p=0; p<length; p++) {
acc *= prime2;

File diff suppressed because it is too large Load Diff

View File

@ -26,15 +26,27 @@ extern "C" {
#define stdinmark "/*stdin*\\"
#define stdoutmark "/*stdout*\\"
#ifdef _WIN32
# define nulmark "nul"
# define nulmark "NUL"
#else
# define nulmark "/dev/null"
#endif
/**
* We test whether the extension we found starts with 't', and if so, we append
* ".tar" to the end of the output name.
*/
#define LZMA_EXTENSION ".lzma"
#define XZ_EXTENSION ".xz"
#define TXZ_EXTENSION ".txz"
#define GZ_EXTENSION ".gz"
#define TGZ_EXTENSION ".tgz"
#define ZSTD_EXTENSION ".zst"
#define TZSTD_EXTENSION ".tzst"
#define LZ4_EXTENSION ".lz4"
#define TLZ4_EXTENSION ".tlz4"
/*-*************************************
@ -71,25 +83,30 @@ void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog);
void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag);
void FIO_setSparseWrite(FIO_prefs_t* const prefs, unsigned sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */
void FIO_setRsyncable(FIO_prefs_t* const prefs, int rsyncable);
void FIO_setStreamSrcSize(FIO_prefs_t* const prefs, size_t streamSrcSize);
void FIO_setTargetCBlockSize(FIO_prefs_t* const prefs, size_t targetCBlockSize);
void FIO_setSrcSizeHint(FIO_prefs_t* const prefs, size_t srcSizeHint);
void FIO_setTestMode(FIO_prefs_t* const prefs, int testMode);
void FIO_setLiteralCompressionMode(
FIO_prefs_t* const prefs,
ZSTD_literalCompressionMode_e mode);
void FIO_setNoProgress(unsigned noProgress);
void FIO_setNotificationLevel(int level);
void FIO_setExcludeCompressedFile(FIO_prefs_t* const prefs, int excludeCompressedFiles);
/*-*************************************
* Single File functions
***************************************/
/** FIO_compressFilename() :
@return : 0 == ok; 1 == pb with src file. */
* @return : 0 == ok; 1 == pb with src file. */
int FIO_compressFilename (FIO_prefs_t* const prefs,
const char* outfilename, const char* infilename, const char* dictFileName,
int compressionLevel, ZSTD_compressionParameters comprParams);
const char* outfilename, const char* infilename,
const char* dictFileName, int compressionLevel,
ZSTD_compressionParameters comprParams);
/** FIO_decompressFilename() :
@return : 0 == ok; 1 == pb with src file. */
* @return : 0 == ok; 1 == pb with src file. */
int FIO_decompressFilename (FIO_prefs_t* const prefs,
const char* outfilename, const char* infilename, const char* dictFileName);
@ -100,20 +117,28 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis
* Multiple File functions
***************************************/
/** FIO_compressMultipleFilenames() :
@return : nb of missing files */
* @return : nb of missing files */
int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs,
const char** srcNamesTable, unsigned nbFiles,
const char** inFileNamesTable, unsigned nbFiles,
const char* outDirName,
const char* outFileName, const char* suffix,
const char* dictFileName, int compressionLevel,
ZSTD_compressionParameters comprParams);
/** FIO_decompressMultipleFilenames() :
@return : nb of missing or skipped files */
* @return : nb of missing or skipped files */
int FIO_decompressMultipleFilenames(FIO_prefs_t* const prefs,
const char** srcNamesTable, unsigned nbFiles,
const char* outDirName,
const char* outFileName,
const char* dictFileName);
/* FIO_checkFilenameCollisions() :
* Checks for and warns if there are any files that would have the same output path
*/
int FIO_checkFilenameCollisions(const char** filenameTable, unsigned nbFiles);
/*-*************************************
* Advanced stuff (should actually be hosted elsewhere)

View File

@ -92,7 +92,7 @@ extern "C" {
# if defined(__linux__) || defined(__linux)
# ifndef _POSIX_C_SOURCE
# define _POSIX_C_SOURCE 200112L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */
# define _POSIX_C_SOURCE 200809L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */
# endif
# endif
# include <unistd.h> /* declares _POSIX_VERSION */

View File

@ -19,12 +19,6 @@ extern "C" {
/*-****************************************
* Dependencies
******************************************/
#include <sys/types.h> /* utime */
#if defined(_MSC_VER)
# include <sys/utime.h> /* utime */
#else
# include <utime.h> /* utime */
#endif
#include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */

View File

@ -20,6 +20,9 @@ extern "C" {
#include <errno.h>
#include <assert.h>
#if defined(_MSC_VER) || defined(__MINGW32__) || defined (__MSVCRT__)
#include <direct.h> /* needed for _mkdir in windows */
#endif
int UTIL_fileExist(const char* filename)
{
@ -54,14 +57,26 @@ int UTIL_getFileStat(const char* infilename, stat_t *statbuf)
int UTIL_setFileStat(const char *filename, stat_t *statbuf)
{
int res = 0;
struct utimbuf timebuf;
if (!UTIL_isRegularFile(filename))
return -1;
timebuf.actime = time(NULL);
timebuf.modtime = statbuf->st_mtime;
res += utime(filename, &timebuf); /* set access and modification times */
/* set access and modification times */
#if defined(_WIN32) || (PLATFORM_POSIX_VERSION < 200809L)
{
struct utimbuf timebuf;
timebuf.actime = time(NULL);
timebuf.modtime = statbuf->st_mtime;
res += utime(filename, &timebuf);
}
#else
{
/* (atime, mtime) */
struct timespec timebuf[2] = { {0, UTIME_NOW} };
timebuf[1] = statbuf->st_mtim;
res += utimensat(AT_FDCWD, filename, timebuf, 0);
}
#endif
#if !defined(_WIN32)
res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */
@ -87,23 +102,45 @@ U32 UTIL_isDirectory(const char* infilename)
return 0;
}
int UTIL_isSameFile(const char* file1, const char* file2)
int UTIL_compareStr(const void *p1, const void *p2) {
return strcmp(* (char * const *) p1, * (char * const *) p2);
}
int UTIL_isSameFile(const char* fName1, const char* fName2)
{
#if defined(_MSC_VER)
assert(fName1 != NULL); assert(fName2 != NULL);
#if defined(_MSC_VER) || defined(_WIN32)
/* note : Visual does not support file identification by inode.
* inode does not work on Windows, even with a posix layer, like msys2.
* The following work-around is limited to detecting exact name repetition only,
* aka `filename` is considered different from `subdir/../filename` */
return !strcmp(file1, file2);
return !strcmp(fName1, fName2);
#else
stat_t file1Stat;
stat_t file2Stat;
return UTIL_getFileStat(file1, &file1Stat)
&& UTIL_getFileStat(file2, &file2Stat)
&& (file1Stat.st_dev == file2Stat.st_dev)
&& (file1Stat.st_ino == file2Stat.st_ino);
{ stat_t file1Stat;
stat_t file2Stat;
return UTIL_getFileStat(fName1, &file1Stat)
&& UTIL_getFileStat(fName2, &file2Stat)
&& (file1Stat.st_dev == file2Stat.st_dev)
&& (file1Stat.st_ino == file2Stat.st_ino);
}
#endif
}
#ifndef _MSC_VER
/* Using this to distinguish named pipes */
U32 UTIL_isFIFO(const char* infilename)
{
/* macro guards, as defined in : https://linux.die.net/man/2/lstat */
#if PLATFORM_POSIX_VERSION >= 200112L
stat_t statbuf;
int r = UTIL_getFileStat(infilename, &statbuf);
if (!r && S_ISFIFO(statbuf.st_mode)) return 1;
#endif
(void)infilename;
return 0;
}
#endif
U32 UTIL_isLink(const char* infilename)
{
/* macro guards, as defined in : https://linux.die.net/man/2/lstat */
@ -221,19 +258,20 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char
DIR *dir;
struct dirent *entry;
char* path;
int dirLength, fnameLength, pathLength, nbFiles = 0;
size_t dirLength, fnameLength, pathLength;
int nbFiles = 0;
if (!(dir = opendir(dirName))) {
UTIL_DISPLAYLEVEL(1, "Cannot open directory '%s': %s\n", dirName, strerror(errno));
return 0;
}
dirLength = (int)strlen(dirName);
dirLength = strlen(dirName);
errno = 0;
while ((entry = readdir(dir)) != NULL) {
if (strcmp (entry->d_name, "..") == 0 ||
strcmp (entry->d_name, ".") == 0) continue;
fnameLength = (int)strlen(entry->d_name);
fnameLength = strlen(entry->d_name);
path = (char*) malloc(dirLength + fnameLength + 2);
if (!path) { closedir(dir); return 0; }
memcpy(path, dirName, dirLength);
@ -255,7 +293,8 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char
} else {
if (*bufStart + *pos + pathLength >= *bufEnd) {
ptrdiff_t newListSize = (*bufEnd - *bufStart) + LIST_SIZE_INCREASE;
*bufStart = (char*)UTIL_realloc(*bufStart, newListSize);
assert(newListSize >= 0);
*bufStart = (char*)UTIL_realloc(*bufStart, (size_t)newListSize);
*bufEnd = *bufStart + newListSize;
if (*bufStart == NULL) { free(path); closedir(dir); return 0; }
}
@ -289,6 +328,27 @@ int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char
#endif /* #ifdef _WIN32 */
int UTIL_isCompressedFile(const char *inputName, const char *extensionList[])
{
const char* ext = UTIL_getFileExtension(inputName);
while(*extensionList!=NULL)
{
const int isCompressedExtension = strcmp(ext,*extensionList);
if(isCompressedExtension==0)
return 1;
++extensionList;
}
return 0;
}
/*Utility function to get file extension from file */
const char* UTIL_getFileExtension(const char* infilename)
{
const char* extension = strrchr(infilename, '.');
if(!extension || extension==infilename) return "";
return extension;
}
/*
* UTIL_createFileList - takes a list of files and directories (params: inputNames, inputNamesNb), scans directories,
* and returns a new list of files (params: return value, allocatedBuffer, allocatedNamesNb).
@ -304,7 +364,6 @@ UTIL_createFileList(const char **inputNames, unsigned inputNamesNb,
unsigned i, nbFiles;
char* buf = (char*)malloc(LIST_SIZE_INCREASE);
char* bufend = buf + LIST_SIZE_INCREASE;
const char** fileTable;
if (!buf) return NULL;
@ -313,36 +372,37 @@ UTIL_createFileList(const char **inputNames, unsigned inputNamesNb,
size_t const len = strlen(inputNames[i]);
if (buf + pos + len >= bufend) {
ptrdiff_t newListSize = (bufend - buf) + LIST_SIZE_INCREASE;
buf = (char*)UTIL_realloc(buf, newListSize);
assert(newListSize >= 0);
buf = (char*)UTIL_realloc(buf, (size_t)newListSize);
bufend = buf + newListSize;
if (!buf) return NULL;
}
if (buf + pos + len < bufend) {
memcpy(buf+pos, inputNames[i], len+1); /* with final \0 */
memcpy(buf+pos, inputNames[i], len+1); /* including final \0 */
pos += len + 1;
nbFiles++;
}
} else {
nbFiles += UTIL_prepareFileList(inputNames[i], &buf, &pos, &bufend, followLinks);
nbFiles += (unsigned)UTIL_prepareFileList(inputNames[i], &buf, &pos, &bufend, followLinks);
if (buf == NULL) return NULL;
} }
if (nbFiles == 0) { free(buf); return NULL; }
fileTable = (const char**)malloc((nbFiles+1) * sizeof(const char*));
if (!fileTable) { free(buf); return NULL; }
{ const char** const fileTable = (const char**)malloc((nbFiles + 1) * sizeof(*fileTable));
if (!fileTable) { free(buf); return NULL; }
for (i=0, pos=0; i<nbFiles; i++) {
fileTable[i] = buf + pos;
pos += strlen(fileTable[i]) + 1;
for (i = 0, pos = 0; i < nbFiles; i++) {
fileTable[i] = buf + pos;
if (buf + pos > bufend) { free(buf); free((void*)fileTable); return NULL; }
pos += strlen(fileTable[i]) + 1;
}
*allocatedBuffer = buf;
*allocatedNamesNb = nbFiles;
return fileTable;
}
if (buf + pos > bufend) { free(buf); free((void*)fileTable); return NULL; }
*allocatedBuffer = buf;
*allocatedNamesNb = nbFiles;
return fileTable;
}
@ -375,8 +435,13 @@ int UTIL_countPhysicalCores(void)
DWORD returnLength = 0;
size_t byteOffset = 0;
glpi = (LPFN_GLPI)GetProcAddress(GetModuleHandle(TEXT("kernel32")),
"GetLogicalProcessorInformation");
#if defined(_MSC_VER)
/* Visual Studio does not like the following cast */
# pragma warning( disable : 4054 ) /* conversion from function ptr to data ptr */
# pragma warning( disable : 4055 ) /* conversion from data ptr to function ptr */
#endif
glpi = (LPFN_GLPI)(void*)GetProcAddress(GetModuleHandle(TEXT("kernel32")),
"GetLogicalProcessorInformation");
if (glpi == NULL) {
goto failed;
@ -494,7 +559,7 @@ int UTIL_countPhysicalCores(void)
if (fgets(buff, BUF_SIZE, cpuinfo) != NULL) {
if (strncmp(buff, "siblings", 8) == 0) {
const char* const sep = strchr(buff, ':');
if (*sep == '\0') {
if (sep == NULL || *sep == '\0') {
/* formatting was broken? */
goto failed;
}
@ -503,7 +568,7 @@ int UTIL_countPhysicalCores(void)
}
if (strncmp(buff, "cpu cores", 9) == 0) {
const char* const sep = strchr(buff, ':');
if (*sep == '\0') {
if (sep == NULL || *sep == '\0') {
/* formatting was broken? */
goto failed;
}

View File

@ -25,17 +25,21 @@ extern "C" {
#include <stdio.h> /* fprintf */
#include <sys/types.h> /* stat, utime */
#include <sys/stat.h> /* stat, chmod */
#if defined(_MSC_VER)
#if defined(_WIN32)
# include <sys/utime.h> /* utime */
# include <io.h> /* _chmod */
#else
# include <unistd.h> /* chown, stat */
#if PLATFORM_POSIX_VERSION < 200809L
# include <utime.h> /* utime */
#else
# include <fcntl.h> /* AT_FDCWD */
# include <sys/stat.h> /* utimensat */
#endif
#endif
#include <time.h> /* clock_t, clock, CLOCKS_PER_SEC, nanosleep */
#include "mem.h" /* U32, U64 */
/*-************************************************************
* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW
***************************************************************/
@ -129,7 +133,13 @@ int UTIL_setFileStat(const char* filename, stat_t* statbuf);
U32 UTIL_isDirectory(const char* infilename);
int UTIL_getFileStat(const char* infilename, stat_t* statbuf);
int UTIL_isSameFile(const char* file1, const char* file2);
int UTIL_compareStr(const void *p1, const void *p2);
int UTIL_isCompressedFile(const char* infilename, const char *extensionList[]);
const char* UTIL_getFileExtension(const char* infilename);
#ifndef _MSC_VER
U32 UTIL_isFIFO(const char* infilename);
#endif
U32 UTIL_isLink(const char* infilename);
#define UTIL_FILESIZE_UNKNOWN ((U64)(-1))
U64 UTIL_getFileSize(const char* infilename);

View File

@ -1,5 +1,5 @@
.
.TH "ZSTD" "1" "July 2019" "zstd 1.4.2" "User Commands"
.TH "ZSTD" "1" "October 2019" "zstd 1.4.4" "User Commands"
.
.SH "NAME"
\fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files
@ -127,6 +127,14 @@ Does not spawn a thread for compression, use a single thread for both I/O and co
\fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. \fInote\fR : at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\.
.
.TP
\fB\-\-stream\-size=#\fR
Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\.
.
.TP
\fB\-\-size\-hint=#\fR
When handling input from a stream, \fBzstd\fR must guess how large the source size will be when optimizing compression parameters\. If the stream size is relatively small, this guess may be a poor one, resulting in a higher compression ratio than expected\. This feature allows for controlling the guess when needed\. Exact guesses result in better compression ratios\. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation\.
.
.TP
\fB\-\-rsyncable\fR
\fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your milage may vary\.
.
@ -167,6 +175,10 @@ keep source file(s) after successful compression or decompression\. This is the
operate recursively on directories
.
.TP
\fB\-\-output\-dir\-flat[=dir]\fR
resulting files are stored into target \fBdir\fR directory, instead of same directory as origin file\. Be aware that this command can introduce name collision issues, if multiple files, from different directories, end up having the same name\. Collision resolution ensures first file with a given name will be present in \fBdir\fR, while in combination with \fB\-f\fR, the last file will be present instead\.
.
.TP
\fB\-\-format=FORMAT\fR
compress and decompress in other formats\. If compiled with support, zstd can compress to or decompress from other compression algorithm formats\. Possibly available options are \fBzstd\fR, \fBgzip\fR, \fBxz\fR, \fBlzma\fR, and \fBlz4\fR\. If no such format is provided, \fBzstd\fR is the default\.
.
@ -198,6 +210,9 @@ add integrity check computed from uncompressed data (default: enabled)
\fB\-\-\fR
All arguments after \fB\-\-\fR are treated as files
.
.SS "Restricted usage of Environment Variables"
Using environment variables to set parameters has security implications\. Therefore, this avenue is intentionally restricted\. Only \fBZSTD_CLEVEL\fR is supported currently, for setting compression level\. \fBZSTD_CLEVEL\fR can be used to set the level between 1 and 19 (the "normal" range)\. If the value of \fBZSTD_CLEVEL\fR is not a valid integer, it will be ignored with a warning message\. \fBZSTD_CLEVEL\fR just replaces the default compression level (\fB3\fR)\. It can be overridden by corresponding command line arguments\.
.
.SH "DICTIONARY BUILDER"
\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\.
.

View File

@ -144,6 +144,18 @@ the last one takes effect.
Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible.
_note_ : at the time of this writing, `--adapt` can remain stuck at low speed
when combined with multiple worker threads (>=2).
* `--stream-size=#` :
Sets the pledged source size of input coming from a stream. This value must be exact, as it
will be included in the produced frame header. Incorrect stream sizes will cause an error.
This information will be used to better optimize compression parameters, resulting in
better and potentially faster compression, especially for smaller source sizes.
* `--size-hint=#`:
When handling input from a stream, `zstd` must guess how large the source size
will be when optimizing compression parameters. If the stream size is relatively
small, this guess may be a poor one, resulting in a higher compression ratio than
expected. This feature allows for controlling the guess when needed.
Exact guesses result in better compression ratios. Overestimates result in slightly
degraded compression ratios, while underestimates may result in significant degradation.
* `--rsyncable` :
`zstd` will periodically synchronize the compression state to make the
compressed file more rsync-friendly. There is a negligible impact to
@ -179,6 +191,13 @@ the last one takes effect.
This is the default behavior.
* `-r`:
operate recursively on directories
* `--output-dir-flat[=dir]`:
resulting files are stored into target `dir` directory,
instead of same directory as origin file.
Be aware that this command can introduce name collision issues,
if multiple files, from different directories, end up having the same name.
Collision resolution ensures first file with a given name will be present in `dir`,
while in combination with `-f`, the last file will be present instead.
* `--format=FORMAT`:
compress and decompress in other formats. If compiled with
support, zstd can compress to or decompress from other compression algorithm
@ -202,6 +221,16 @@ the last one takes effect.
* `--`:
All arguments after `--` are treated as files
### Restricted usage of Environment Variables
Using environment variables to set parameters has security implications.
Therefore, this avenue is intentionally restricted.
Only `ZSTD_CLEVEL` is supported currently, for setting compression level.
`ZSTD_CLEVEL` can be used to set the level between 1 and 19 (the "normal" range).
If the value of `ZSTD_CLEVEL` is not a valid integer, it will be ignored with a warning message.
`ZSTD_CLEVEL` just replaces the default compression level (`3`).
It can be overridden by corresponding command line arguments.
DICTIONARY BUILDER
------------------

View File

@ -136,16 +136,19 @@ static int usage_advanced(const char* programName)
DISPLAY( " -q : suppress warnings; specify twice to suppress errors too\n");
DISPLAY( " -c : force write to standard output, even if it is the console\n");
DISPLAY( " -l : print information about zstd compressed files \n");
DISPLAY( "--exclude-compressed: only compress files that are not previously compressed \n");
#ifndef ZSTD_NOCOMPRESS
DISPLAY( "--ultra : enable levels beyond %i, up to %i (requires more memory)\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel());
DISPLAY( "--long[=#]: enable long distance matching with given window log (default: %u)\n", g_defaultMaxWindowLog);
DISPLAY( "--fast[=#]: switch to ultra fast compression level (default: %u)\n", 1);
DISPLAY( "--fast[=#]: switch to very fast compression levels (default: %u)\n", 1);
DISPLAY( "--adapt : dynamically adapt compression level to I/O conditions \n");
DISPLAY( "--stream-size=# : optimize compression parameters for streaming input of given number of bytes \n");
DISPLAY( "--size-hint=# optimize compression parameters for streaming input of approximately this size\n");
DISPLAY( "--target-compressed-block-size=# : make compressed block near targeted size \n");
#ifdef ZSTD_MULTITHREAD
DISPLAY( " -T# : spawns # compression threads (default: 1, 0==# cores) \n");
DISPLAY( " -B# : select size of each job (default: 0==automatic) \n");
DISPLAY( " --rsyncable : compress using a rsync-friendly method (-B sets block size) \n");
DISPLAY( "--rsyncable : compress using a rsync-friendly method (-B sets block size) \n");
#endif
DISPLAY( "--no-dictID : don't write dictID into header (dictionary compression)\n");
DISPLAY( "--[no-]check : integrity check (default: enabled) \n");
@ -153,6 +156,7 @@ static int usage_advanced(const char* programName)
#endif
#ifdef UTIL_HAS_CREATEFILELIST
DISPLAY( " -r : operate recursively on directories \n");
DISPLAY( "--output-dir-flat[=directory]: all resulting files stored into `directory`. \n");
#endif
DISPLAY( "--format=zstd : compress files to the .zst format (default) \n");
#ifdef ZSTD_GZCOMPRESS
@ -284,7 +288,7 @@ static unsigned readU32FromChar(const char** stringPtr) {
* If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand.
* @return 0 and doesn't modify *stringPtr otherwise.
*/
static unsigned longCommandWArg(const char** stringPtr, const char* longCommand)
static int longCommandWArg(const char** stringPtr, const char* longCommand)
{
size_t const comSize = strlen(longCommand);
int const result = !strncmp(*stringPtr, longCommand, comSize);
@ -427,8 +431,8 @@ static ZDICT_fastCover_params_t defaultFastCoverParams(void)
static unsigned parseAdaptParameters(const char* stringPtr, int* adaptMinPtr, int* adaptMaxPtr)
{
for ( ; ;) {
if (longCommandWArg(&stringPtr, "min=")) { *adaptMinPtr = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "max=")) { *adaptMaxPtr = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "min=")) { *adaptMinPtr = (int)readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "max=")) { *adaptMaxPtr = (int)readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
DISPLAYLEVEL(4, "invalid compression parameter \n");
return 0;
}
@ -523,7 +527,7 @@ static int init_cLevel(void) {
DISPLAYLEVEL(2, "Ignore environment variable setting %s=%s: numeric value too large\n", ENV_CLEVEL, env);
return ZSTDCLI_CLEVEL_DEFAULT;
} else if (*ptr == 0) {
return sign * absLevel;
return sign * (int)absLevel;
}
}
@ -560,6 +564,7 @@ int main(int argCount, const char* argv[])
adaptMax = MAXCLEVEL,
rsyncable = 0,
nextArgumentIsOutFileName = 0,
nextArgumentIsOutDirName = 0,
nextArgumentIsMaxDict = 0,
nextArgumentIsDictID = 0,
nextArgumentsAreFiles = 0,
@ -580,15 +585,18 @@ int main(int argCount, const char* argv[])
int cLevelLast = -1000000000;
unsigned recursive = 0;
unsigned memLimit = 0;
const char** filenameTable = (const char**)malloc(argCount * sizeof(const char*)); /* argCount >= 1 */
const char** filenameTable = (const char**)malloc((size_t)argCount * sizeof(const char*)); /* argCount >= 1 */
unsigned filenameIdx = 0;
const char* programName = argv[0];
const char* outFileName = NULL;
const char* outDirName = NULL;
const char* dictFileName = NULL;
const char* suffix = ZSTD_EXTENSION;
unsigned maxDictSize = g_defaultMaxDictSize;
unsigned dictID = 0;
size_t streamSrcSize = 0;
size_t targetCBlockSize = 0;
size_t srcSizeHint = 0;
int dictCLevel = g_defaultDictCLevel;
unsigned dictSelect = g_defaultSelectivityLevel;
#ifdef UTIL_HAS_CREATEFILELIST
@ -682,6 +690,7 @@ int main(int argCount, const char* argv[])
if (!strcmp(argument, "--keep")) { FIO_setRemoveSrcFile(prefs, 0); continue; }
if (!strcmp(argument, "--rm")) { FIO_setRemoveSrcFile(prefs, 1); continue; }
if (!strcmp(argument, "--priority=rt")) { setRealTimePrio = 1; continue; }
if (!strcmp(argument, "--output-dir-flat")) {nextArgumentIsOutDirName=1; lastCommand=1; continue; }
if (!strcmp(argument, "--adapt")) { adapt = 1; continue; }
if (longCommandWArg(&argument, "--adapt=")) { adapt = 1; if (!parseAdaptParameters(argument, &adaptMin, &adaptMax)) CLEAN_RETURN(badusage(programName)); continue; }
if (!strcmp(argument, "--single-thread")) { nbWorkers = 0; singleThread = 1; continue; }
@ -700,7 +709,7 @@ int main(int argCount, const char* argv[])
if (!strcmp(argument, "--compress-literals")) { literalCompressionMode = ZSTD_lcm_huffman; continue; }
if (!strcmp(argument, "--no-compress-literals")) { literalCompressionMode = ZSTD_lcm_uncompressed; continue; }
if (!strcmp(argument, "--no-progress")) { FIO_setNoProgress(1); continue; }
if (!strcmp(argument, "--exclude-compressed")) { FIO_setExcludeCompressedFile(prefs, 1); continue; }
/* long commands with arguments */
#ifndef ZSTD_NODICT
if (longCommandWArg(&argument, "--train-cover")) {
@ -737,7 +746,7 @@ int main(int argCount, const char* argv[])
continue;
}
#endif
if (longCommandWArg(&argument, "--threads=")) { nbWorkers = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--threads=")) { nbWorkers = (int)readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--memlimit=")) { memLimit = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--memory=")) { memLimit = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--memlimit-decompress=")) { memLimit = readU32FromChar(&argument); continue; }
@ -745,7 +754,10 @@ int main(int argCount, const char* argv[])
if (longCommandWArg(&argument, "--maxdict=")) { maxDictSize = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--dictID=")) { dictID = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--zstd=")) { if (!parseCompressionParameters(argument, &compressionParams)) CLEAN_RETURN(badusage(programName)); continue; }
if (longCommandWArg(&argument, "--stream-size=")) { streamSrcSize = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--target-compressed-block-size=")) { targetCBlockSize = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--size-hint=")) { srcSizeHint = readU32FromChar(&argument); continue; }
if (longCommandWArg(&argument, "--output-dir-flat=")) { outDirName = argument; continue; }
if (longCommandWArg(&argument, "--long")) {
unsigned ldmWindowLog = 0;
ldmFlag = 1;
@ -797,7 +809,7 @@ int main(int argCount, const char* argv[])
#ifndef ZSTD_NOCOMPRESS
/* compression Level */
if ((*argument>='0') && (*argument<='9')) {
dictCLevel = cLevel = readU32FromChar(&argument);
dictCLevel = cLevel = (int)readU32FromChar(&argument);
continue;
}
#endif
@ -869,7 +881,7 @@ int main(int argCount, const char* argv[])
case 'e':
/* compression Level */
argument++;
cLevelLast = readU32FromChar(&argument);
cLevelLast = (int)readU32FromChar(&argument);
break;
/* Modify Nb Iterations (benchmark only) */
@ -895,7 +907,7 @@ int main(int argCount, const char* argv[])
/* nb of threads (hidden option) */
case 'T':
argument++;
nbWorkers = readU32FromChar(&argument);
nbWorkers = (int)readU32FromChar(&argument);
break;
/* Dictionary Selection level */
@ -959,6 +971,13 @@ int main(int argCount, const char* argv[])
continue;
}
if (nextArgumentIsOutDirName) {
nextArgumentIsOutDirName = 0;
lastCommand = 0;
outDirName = argument;
continue;
}
/* add filename to list */
filenameTable[filenameIdx++] = argument;
}
@ -986,7 +1005,11 @@ int main(int argCount, const char* argv[])
if (!followLinks) {
unsigned u;
for (u=0, fileNamesNb=0; u<filenameIdx; u++) {
if (UTIL_isLink(filenameTable[u])) {
if (UTIL_isLink(filenameTable[u])
#ifndef _MSC_VER
&& !UTIL_isFIFO(filenameTable[u])
#endif /* _MSC_VER */
) {
DISPLAYLEVEL(2, "Warning : %s is a symbolic link, ignoring\n", filenameTable[u]);
} else {
filenameTable[fileNamesNb++] = filenameTable[u];
@ -1025,16 +1048,16 @@ int main(int argCount, const char* argv[])
#ifndef ZSTD_NOBENCH
benchParams.blockSize = blockSize;
benchParams.nbWorkers = nbWorkers;
benchParams.realTime = setRealTimePrio;
benchParams.realTime = (unsigned)setRealTimePrio;
benchParams.nbSeconds = bench_nbSeconds;
benchParams.ldmFlag = ldmFlag;
benchParams.ldmMinMatch = g_ldmMinMatch;
benchParams.ldmHashLog = g_ldmHashLog;
benchParams.ldmMinMatch = (int)g_ldmMinMatch;
benchParams.ldmHashLog = (int)g_ldmHashLog;
if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) {
benchParams.ldmBucketSizeLog = g_ldmBucketSizeLog;
benchParams.ldmBucketSizeLog = (int)g_ldmBucketSizeLog;
}
if (g_ldmHashRateLog != LDM_PARAM_DEFAULT) {
benchParams.ldmHashRateLog = g_ldmHashRateLog;
benchParams.ldmHashRateLog = (int)g_ldmHashRateLog;
}
benchParams.literalCompressionMode = literalCompressionMode;
@ -1075,16 +1098,16 @@ int main(int argCount, const char* argv[])
#ifndef ZSTD_NODICT
ZDICT_params_t zParams;
zParams.compressionLevel = dictCLevel;
zParams.notificationLevel = g_displayLevel;
zParams.notificationLevel = (unsigned)g_displayLevel;
zParams.dictID = dictID;
if (dict == cover) {
int const optimize = !coverParams.k || !coverParams.d;
coverParams.nbThreads = nbWorkers;
coverParams.nbThreads = (unsigned)nbWorkers;
coverParams.zParams = zParams;
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, NULL, &coverParams, NULL, optimize);
} else if (dict == fastCover) {
int const optimize = !fastCoverParams.k || !fastCoverParams.d;
fastCoverParams.nbThreads = nbWorkers;
fastCoverParams.nbThreads = (unsigned)nbWorkers;
fastCoverParams.zParams = zParams;
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, NULL, NULL, &fastCoverParams, optimize);
} else {
@ -1103,7 +1126,7 @@ int main(int argCount, const char* argv[])
}
#ifndef ZSTD_NODECOMPRESS
if (operation==zom_test) { outFileName=nulmark; FIO_setRemoveSrcFile(prefs, 0); } /* test mode */
if (operation==zom_test) { FIO_setTestMode(prefs, 1); outFileName=nulmark; FIO_setRemoveSrcFile(prefs, 0); } /* test mode */
#endif
/* No input filename ==> use stdin and stdout */
@ -1139,18 +1162,20 @@ int main(int argCount, const char* argv[])
if (operation==zom_compress) {
#ifndef ZSTD_NOCOMPRESS
FIO_setNbWorkers(prefs, nbWorkers);
FIO_setBlockSize(prefs, (U32)blockSize);
if (g_overlapLog!=OVERLAP_LOG_DEFAULT) FIO_setOverlapLog(prefs, g_overlapLog);
FIO_setLdmFlag(prefs, ldmFlag);
FIO_setLdmHashLog(prefs, g_ldmHashLog);
FIO_setLdmMinMatch(prefs, g_ldmMinMatch);
if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) FIO_setLdmBucketSizeLog(prefs, g_ldmBucketSizeLog);
if (g_ldmHashRateLog != LDM_PARAM_DEFAULT) FIO_setLdmHashRateLog(prefs, g_ldmHashRateLog);
FIO_setAdaptiveMode(prefs, adapt);
FIO_setBlockSize(prefs, (int)blockSize);
if (g_overlapLog!=OVERLAP_LOG_DEFAULT) FIO_setOverlapLog(prefs, (int)g_overlapLog);
FIO_setLdmFlag(prefs, (unsigned)ldmFlag);
FIO_setLdmHashLog(prefs, (int)g_ldmHashLog);
FIO_setLdmMinMatch(prefs, (int)g_ldmMinMatch);
if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) FIO_setLdmBucketSizeLog(prefs, (int)g_ldmBucketSizeLog);
if (g_ldmHashRateLog != LDM_PARAM_DEFAULT) FIO_setLdmHashRateLog(prefs, (int)g_ldmHashRateLog);
FIO_setAdaptiveMode(prefs, (unsigned)adapt);
FIO_setAdaptMin(prefs, adaptMin);
FIO_setAdaptMax(prefs, adaptMax);
FIO_setRsyncable(prefs, rsyncable);
FIO_setStreamSrcSize(prefs, streamSrcSize);
FIO_setTargetCBlockSize(prefs, targetCBlockSize);
FIO_setSrcSizeHint(prefs, srcSizeHint);
FIO_setLiteralCompressionMode(prefs, literalCompressionMode);
if (adaptMin > cLevel) cLevel = adaptMin;
if (adaptMax < cLevel) cLevel = adaptMax;
@ -1158,9 +1183,9 @@ int main(int argCount, const char* argv[])
if ((filenameIdx==1) && outFileName)
operationResult = FIO_compressFilename(prefs, outFileName, filenameTable[0], dictFileName, cLevel, compressionParams);
else
operationResult = FIO_compressMultipleFilenames(prefs, filenameTable, filenameIdx, outFileName, suffix, dictFileName, cLevel, compressionParams);
operationResult = FIO_compressMultipleFilenames(prefs, filenameTable, filenameIdx, outDirName, outFileName, suffix, dictFileName, cLevel, compressionParams);
#else
(void)suffix; (void)adapt; (void)rsyncable; (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; (void)targetCBlockSize; /* not used when ZSTD_NOCOMPRESS set */
(void)suffix; (void)adapt; (void)rsyncable; (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; (void)targetCBlockSize; (void)streamSrcSize; (void)srcSizeHint; /* not used when ZSTD_NOCOMPRESS set */
DISPLAY("Compression not supported \n");
#endif
} else { /* decompression or test */
@ -1176,7 +1201,7 @@ int main(int argCount, const char* argv[])
if (filenameIdx==1 && outFileName)
operationResult = FIO_decompressFilename(prefs, outFileName, filenameTable[0], dictFileName);
else
operationResult = FIO_decompressMultipleFilenames(prefs, filenameTable, filenameIdx, outFileName, dictFileName);
operationResult = FIO_decompressMultipleFilenames(prefs, filenameTable, filenameIdx, outDirName, outFileName, dictFileName);
#else
DISPLAY("Decompression not supported \n");
#endif

View File

@ -1,5 +1,5 @@
.
.TH "ZSTDGREP" "1" "July 2019" "zstd 1.4.2" "User Commands"
.TH "ZSTDGREP" "1" "October 2019" "zstd 1.4.4" "User Commands"
.
.SH "NAME"
\fBzstdgrep\fR \- print lines matching a pattern in zstandard\-compressed files

View File

@ -1,5 +1,5 @@
.
.TH "ZSTDLESS" "1" "July 2019" "zstd 1.4.2" "User Commands"
.TH "ZSTDLESS" "1" "October 2019" "zstd 1.4.4" "User Commands"
.
.SH "NAME"
\fBzstdless\fR \- view zstandard\-compressed files

View File

@ -1,68 +0,0 @@
# local binary (Makefile)
fullbench
fullbench32
fullbench-lib
fuzzer
fuzzer32
fuzzer-dll
zbufftest
zbufftest32
zbufftest-dll
zstreamtest
zstreamtest32
zstreamtest_asan
zstreamtest_tsan
zstreamtest-dll
datagen
paramgrill
paramgrill32
roundTripCrash
longmatch
symbols
legacy
decodecorpus
pool
poolTests
invalidDictionaries
checkTag
zcat
zstdcat
tm
# Tmp test directory
zstdtest
speedTest
versionsTest
namespaceTest
# Local script
startSpeedTest
speedTest.pid
# Object files
*.o
*.ko
# Executables
*.exe
*.out
*.app
# Default result files
dictionary
grillResults.txt
_*
tmp*
*.zst
*.gz
!gzip/hufts-segv.gz
result
out
*.zstd
# fuzzer
afl
# Misc files
*.bat
dirTest*

View File

@ -160,19 +160,13 @@ fuzzer-dll : LDFLAGS+= -L$(ZSTDDIR) -lzstd
fuzzer-dll : $(ZSTDDIR)/common/xxhash.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/datagen.c fuzzer.c
$(CC) $(CPPFLAGS) $(CFLAGS) $(filter %.c,$^) $(LDFLAGS) -o $@$(EXT)
zbufftest : CPPFLAGS += -I$(ZSTDDIR)/deprecated
zbufftest : CFLAGS += -Wno-deprecated-declarations # required to silence deprecation warnings
zbufftest : $(ZSTD_OBJECTS) $(ZBUFF_FILES) $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/datagen.c zbufftest.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
zbufftest32 : CPPFLAGS += -I$(ZSTDDIR)/deprecated
zbufftest32 : CFLAGS += -Wno-deprecated-declarations -m32
zbufftest32 : $(ZSTD_FILES) $(ZBUFF_FILES) $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/datagen.c zbufftest.c
zbufftest zbufftest32 zbufftest-dll : CPPFLAGS += -I$(ZSTDDIR)/deprecated
zbufftest zbufftest32 zbufftest-dll : CFLAGS += -Wno-deprecated-declarations # required to silence deprecation warnings
zbufftest32 : CFLAGS += -m32
zbufftest zbufftest32 : $(ZSTD_OBJECTS) $(ZBUFF_FILES) $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/datagen.c zbufftest.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
zbufftest-dll : zstd-dll
zbufftest-dll : CPPFLAGS += -I$(ZSTDDIR)/deprecated
zbufftest-dll : CFLAGS += -Wno-deprecated-declarations # required to silence deprecation warnings
zbufftest-dll : LDFLAGS+= -L$(ZSTDDIR) -lzstd
zbufftest-dll : $(ZSTDDIR)/common/xxhash.c $(PRGDIR)/util.c $(PRGDIR)/timefn.c $(PRGDIR)/datagen.c zbufftest.c
$(CC) $(CPPFLAGS) $(CFLAGS) $(filter %.c,$^) $(LDFLAGS) -o $@$(EXT)
@ -250,7 +244,8 @@ clean:
$(MAKE) -C $(ZSTDDIR) clean
$(MAKE) -C $(PRGDIR) clean
@$(RM) -fR $(TESTARTEFACT)
@$(RM) -f core *.o tmp* *.tmp result* *.gcda dictionary *.zst \
@$(RM) -rf tmp* # some test directories are named tmp*
@$(RM) core *.o *.tmp result* *.gcda dictionary *.zst \
$(PRGDIR)/zstd$(EXT) $(PRGDIR)/zstd32$(EXT) \
fullbench$(EXT) fullbench32$(EXT) \
fullbench-lib$(EXT) fullbench-dll$(EXT) \
@ -264,7 +259,7 @@ clean:
#----------------------------------------------------------------------------------
#make valgrindTest is validated only for Linux, macOS, BSD, Hurd and Solaris targets
# valgrind tests are validated only for some posix platforms
#----------------------------------------------------------------------------------
ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS))
HOST_OS = POSIX
@ -286,7 +281,7 @@ valgrindTest: zstd datagen fuzzer fullbench
endif
ifneq (,$(filter MSYS%,$(shell uname)))
ifneq (,$(filter MINGW% MSYS%,$(shell uname)))
HOST_OS = MSYS
endif

View File

@ -758,8 +758,8 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
DISPLAYLEVEL(7, " repeat offset: %d\n", (int)repIndex);
}
/* use libzstd sequence handling */
ZSTD_storeSeq(seqStore, literalLen, literals, offsetCode,
matchLen - MINMATCH);
ZSTD_storeSeq(seqStore, literalLen, literals, literals + literalLen,
offsetCode, matchLen - MINMATCH);
literalsSize -= literalLen;
excessMatch -= (matchLen - MIN_SEQ_LEN);

View File

@ -45,7 +45,6 @@
#define NBLOOPS 6
#define TIMELOOP_S 2
#define KNUTH 2654435761U
#define MAX_MEM (1984 MB)
#define DEFAULT_CLEVEL 1
@ -450,7 +449,7 @@ static int benchMem(unsigned benchNb,
case 31: /* ZSTD_decodeLiteralsBlock : starts literals block in dstBuff2 */
{ size_t frameHeaderSize;
g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, cLevel);
frameHeaderSize = ZSTD_frameHeaderSize(dstBuff, ZSTD_FRAMEHEADERSIZE_PREFIX);
frameHeaderSize = ZSTD_frameHeaderSize(dstBuff, ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
CONTROL(!ZSTD_isError(frameHeaderSize));
/* check block is compressible, hence contains a literals section */
{ blockProperties_t bp;
@ -471,10 +470,10 @@ static int benchMem(unsigned benchNb,
const BYTE* ip = dstBuff;
const BYTE* iend;
{ size_t const cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, cLevel);
CONTROL(cSize > ZSTD_FRAMEHEADERSIZE_PREFIX);
CONTROL(cSize > ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
}
/* Skip frame Header */
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(dstBuff, ZSTD_FRAMEHEADERSIZE_PREFIX);
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(dstBuff, ZSTD_FRAMEHEADERSIZE_PREFIX(ZSTD_f_zstd1));
CONTROL(!ZSTD_isError(frameHeaderSize));
ip += frameHeaderSize;
}
@ -722,7 +721,7 @@ static int usage_advanced(const char* exename)
DISPLAY( "\nAdvanced options :\n");
DISPLAY( " -b# : test only function # \n");
DISPLAY( " -l# : benchmark functions at that compression level (default : %i)\n", DEFAULT_CLEVEL);
DISPLAY( " --zstd : custom parameter selection. Format same as zstdcli \n");
DISPLAY( "--zstd= : custom parameter selection. Format same as zstdcli \n");
DISPLAY( " -P# : sample compressibility (default : %.1f%%)\n", COMPRESSIBILITY_DEFAULT * 100);
DISPLAY( " -B# : sample size (default : %u)\n", (unsigned)kSampleSizeDefault);
DISPLAY( " -i# : iteration loops [1-9](default : %i)\n", NBLOOPS);

View File

@ -40,8 +40,8 @@ FUZZ_LDFLAGS := -pthread $(LDFLAGS)
FUZZ_ARFLAGS := $(ARFLAGS)
FUZZ_TARGET_FLAGS = $(FUZZ_CPPFLAGS) $(FUZZ_CXXFLAGS) $(FUZZ_LDFLAGS)
FUZZ_HEADERS := fuzz_helpers.h fuzz.h zstd_helpers.h
FUZZ_SRC := $(PRGDIR)/util.c zstd_helpers.c
FUZZ_HEADERS := fuzz_helpers.h fuzz.h zstd_helpers.h fuzz_data_producer.h
FUZZ_SRC := $(PRGDIR)/util.c zstd_helpers.c fuzz_data_producer.c
ZSTDCOMMON_SRC := $(ZSTDDIR)/common/*.c
ZSTDCOMP_SRC := $(ZSTDDIR)/compress/*.c
@ -73,7 +73,8 @@ FUZZ_TARGETS := \
dictionary_round_trip \
dictionary_decompress \
zstd_frame_info \
simple_compress
simple_compress \
dictionary_loader
all: $(FUZZ_TARGETS)
@ -110,18 +111,12 @@ simple_compress: $(FUZZ_HEADERS) $(FUZZ_OBJ) simple_compress.o
zstd_frame_info: $(FUZZ_HEADERS) $(FUZZ_OBJ) zstd_frame_info.o
$(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) zstd_frame_info.o $(LIB_FUZZING_ENGINE) -o $@
dictionary_loader: $(FUZZ_HEADERS) $(FUZZ_OBJ) dictionary_loader.o
$(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_OBJ) dictionary_loader.o $(LIB_FUZZING_ENGINE) -o $@
libregression.a: $(FUZZ_HEADERS) $(PRGDIR)/util.h $(PRGDIR)/util.c regression_driver.o
$(AR) $(FUZZ_ARFLAGS) $@ regression_driver.o
# Install libfuzzer (not usable for MSAN testing)
# Provided for convenience. To use this library run make libFuzzer and
# set LDFLAGS=-L.
.PHONY: libFuzzer
libFuzzer:
@$(RM) -rf Fuzzer
@git clone https://chromium.googlesource.com/chromium/llvm-project/compiler-rt/lib/fuzzer Fuzzer
@cd Fuzzer && ./build.sh
corpora/%_seed_corpus.zip:
@mkdir -p corpora
$(DOWNLOAD) $@ $(CORPORA_URL_PREFIX)$*_seed_corpus.zip

View File

@ -35,6 +35,8 @@ The environment variables can be overridden with the corresponding flags
`--cc`, `--cflags`, etc.
The specific fuzzing engine is selected with `LIB_FUZZING_ENGINE` or
`--lib-fuzzing-engine`, the default is `libregression.a`.
Alternatively, you can use Clang's built in fuzzing engine with
`--enable-fuzzer`.
It has flags that can easily set up sanitizers `--enable-{a,ub,m}san`, and
coverage instrumentation `--enable-coverage`.
It sets sane defaults which can be overridden with flags `--debug`,
@ -51,22 +53,25 @@ The command used to run the fuzzer is printed for debugging.
## LibFuzzer
```
# Build libfuzzer if necessary
make libFuzzer
# Build the fuzz targets
./fuzz.py build all --enable-coverage --enable-asan --enable-ubsan --lib-fuzzing-engine Fuzzer/libFuzzer.a --cc clang --cxx clang++
./fuzz.py build all --enable-fuzzer --enable-asan --enable-ubsan --cc clang --cxx clang++
# OR equivalently
CC=clang CXX=clang++ LIB_FUZZING_ENGINE=Fuzzer/libFuzzer.a ./fuzz.py build all --enable-coverage --enable-asan --enable-ubsan
CC=clang CXX=clang++ ./fuzz.py build all --enable-fuzzer --enable-asan --enable-ubsan
# Run the fuzzer
./fuzz.py libfuzzer TARGET -max_len=8192 -jobs=4
./fuzz.py libfuzzer TARGET <libfuzzer args like -jobs=4>
```
where `TARGET` could be `simple_decompress`, `stream_round_trip`, etc.
### MSAN
Fuzzing with `libFuzzer` and `MSAN` will require building a C++ standard library
and libFuzzer with MSAN.
Fuzzing with `libFuzzer` and `MSAN` is as easy as:
```
CC=clang CXX=clang++ ./fuzz.py build all --enable-fuzzer --enable-msan
./fuzz.py libfuzzer TARGET <libfuzzer args>
```
`fuzz.py` respects the environment variables / flags `MSAN_EXTRA_CPPFLAGS`,
`MSAN_EXTRA_CFLAGS`, `MSAN_EXTRA_CXXFLAGS`, `MSAN_EXTRA_LDFLAGS` to easily pass
the extra parameters only for MSAN.
@ -85,7 +90,7 @@ CC=afl-clang CXX=afl-clang++ ./fuzz.py build all --enable-asan --enable-ubsan
## Regression Testing
The regression rest supports the `all` target to run all the fuzzers in one
The regression test supports the `all` target to run all the fuzzers in one
command.
```

View File

@ -28,8 +28,6 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
size_t const neededBufSize = ZSTD_BLOCKSIZE_MAX;
FUZZ_seed(&src, &size);
/* Allocate all buffers and contexts if not already allocated */
if (neededBufSize > bufSize) {
free(rBuf);

View File

@ -20,21 +20,20 @@
#include <string.h>
#include "fuzz_helpers.h"
#include "zstd.h"
static const int kMaxClevel = 19;
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
static ZSTD_CCtx *cctx = NULL;
static ZSTD_DCtx *dctx = NULL;
static void* cBuf = NULL;
static void* rBuf = NULL;
static size_t bufSize = 0;
static uint32_t seed;
static size_t roundTripTest(void *result, size_t resultCapacity,
void *compressed, size_t compressedCapacity,
const void *src, size_t srcSize)
const void *src, size_t srcSize,
int cLevel)
{
int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
ZSTD_parameters const params = ZSTD_getParams(cLevel, srcSize, 0);
size_t ret = ZSTD_compressBegin_advanced(cctx, NULL, 0, params, srcSize);
FUZZ_ZASSERT(ret);
@ -52,12 +51,16 @@ static size_t roundTripTest(void *result, size_t resultCapacity,
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
size_t neededBufSize;
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
seed = FUZZ_seed(&src, &size);
neededBufSize = size;
int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel);
size_t neededBufSize = size;
if (size > ZSTD_BLOCKSIZE_MAX)
return 0;
size = ZSTD_BLOCKSIZE_MAX;
/* Allocate all buffers and contexts if not already allocated */
if (neededBufSize > bufSize || !cBuf || !rBuf) {
@ -79,11 +82,13 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
size_t const result =
roundTripTest(rBuf, neededBufSize, cBuf, neededBufSize, src, size);
roundTripTest(rBuf, neededBufSize, cBuf, neededBufSize, src, size,
cLevel);
FUZZ_ZASSERT(result);
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
}
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING
ZSTD_freeCCtx(cctx); cctx = NULL;
ZSTD_freeDCtx(dctx); dctx = NULL;

View File

@ -18,33 +18,37 @@
#include <stdio.h>
#include "fuzz_helpers.h"
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
static ZSTD_DCtx *dctx = NULL;
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
uint32_t seed = FUZZ_seed(&src, &size);
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
FUZZ_dict_t dict;
ZSTD_DDict* ddict = NULL;
int i;
if (!dctx) {
dctx = ZSTD_createDCtx();
FUZZ_ASSERT(dctx);
}
dict = FUZZ_train(src, size, &seed);
if (FUZZ_rand32(&seed, 0, 1) == 0) {
dict = FUZZ_train(src, size, producer);
if (FUZZ_dataProducer_uint32Range(producer, 0, 1) == 0) {
ddict = ZSTD_createDDict(dict.buff, dict.size);
FUZZ_ASSERT(ddict);
} else {
FUZZ_ZASSERT(ZSTD_DCtx_loadDictionary_advanced(
dctx, dict.buff, dict.size,
(ZSTD_dictLoadMethod_e)FUZZ_rand32(&seed, 0, 1),
(ZSTD_dictContentType_e)FUZZ_rand32(&seed, 0, 2)));
(ZSTD_dictLoadMethod_e)FUZZ_dataProducer_uint32Range(producer, 0, 1),
(ZSTD_dictContentType_e)FUZZ_dataProducer_uint32Range(producer, 0, 2)));
}
/* Run it 10 times over 10 output sizes. Reuse the context and dict. */
for (i = 0; i < 10; ++i) {
size_t const bufSize = FUZZ_rand32(&seed, 0, 2 * size);
{
size_t const bufSize = FUZZ_dataProducer_uint32Range(producer, 0, 10 * size);
void* rBuf = malloc(bufSize);
FUZZ_ASSERT(rBuf);
if (ddict) {
@ -55,6 +59,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
free(rBuf);
}
free(dict.buff);
FUZZ_dataProducer_free(producer);
ZSTD_freeDDict(ddict);
#ifndef STATEFUL_FUZZING
ZSTD_freeDCtx(dctx); dctx = NULL;

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
*/
/**
* This fuzz target makes sure that whenever a compression dictionary can be
* loaded, the data can be round tripped.
*/
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "fuzz_helpers.h"
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
/**
* Compresses the data and returns the compressed size or an error.
*/
static size_t compress(void* compressed, size_t compressedCapacity,
void const* source, size_t sourceSize,
void const* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
ZSTD_CCtx* cctx = ZSTD_createCCtx();
FUZZ_ZASSERT(ZSTD_CCtx_loadDictionary_advanced(
cctx, dict, dictSize, dictLoadMethod, dictContentType));
size_t const compressedSize = ZSTD_compress2(
cctx, compressed, compressedCapacity, source, sourceSize);
ZSTD_freeCCtx(cctx);
return compressedSize;
}
static size_t decompress(void* result, size_t resultCapacity,
void const* compressed, size_t compressedSize,
void const* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
ZSTD_DCtx* dctx = ZSTD_createDCtx();
FUZZ_ZASSERT(ZSTD_DCtx_loadDictionary_advanced(
dctx, dict, dictSize, dictLoadMethod, dictContentType));
size_t const resultSize = ZSTD_decompressDCtx(
dctx, result, resultCapacity, compressed, compressedSize);
FUZZ_ZASSERT(resultSize);
ZSTD_freeDCtx(dctx);
return resultSize;
}
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
ZSTD_dictLoadMethod_e const dlm =
size = FUZZ_dataProducer_uint32Range(producer, 0, 1);
ZSTD_dictContentType_e const dct =
FUZZ_dataProducer_uint32Range(producer, 0, 2);
size = FUZZ_dataProducer_remainingBytes(producer);
DEBUGLOG(2, "Dict load method %d", dlm);
DEBUGLOG(2, "Dict content type %d", dct);
DEBUGLOG(2, "Dict size %u", (unsigned)size);
void* const rBuf = malloc(size);
FUZZ_ASSERT(rBuf);
size_t const cBufSize = ZSTD_compressBound(size);
void* const cBuf = malloc(cBufSize);
FUZZ_ASSERT(cBuf);
size_t const cSize =
compress(cBuf, cBufSize, src, size, src, size, dlm, dct);
/* compression failing is okay */
if (ZSTD_isError(cSize)) {
FUZZ_ASSERT_MSG(dct != ZSTD_dct_rawContent, "Raw must always succeed!");
goto out;
}
size_t const rSize =
decompress(rBuf, size, cBuf, cSize, src, size, dlm, dct);
FUZZ_ASSERT_MSG(rSize == size, "Incorrect regenerated size");
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
out:
free(cBuf);
free(rBuf);
FUZZ_dataProducer_free(producer);
return 0;
}

View File

@ -19,22 +19,21 @@
#include <string.h>
#include "fuzz_helpers.h"
#include "zstd_helpers.h"
static const int kMaxClevel = 19;
#include "fuzz_data_producer.h"
static ZSTD_CCtx *cctx = NULL;
static ZSTD_DCtx *dctx = NULL;
static uint32_t seed;
static size_t roundTripTest(void *result, size_t resultCapacity,
void *compressed, size_t compressedCapacity,
const void *src, size_t srcSize)
const void *src, size_t srcSize,
FUZZ_dataProducer_t *producer)
{
ZSTD_dictContentType_e dictContentType = ZSTD_dct_auto;
FUZZ_dict_t dict = FUZZ_train(src, srcSize, &seed);
FUZZ_dict_t dict = FUZZ_train(src, srcSize, producer);
size_t cSize;
if ((FUZZ_rand(&seed) & 15) == 0) {
int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
if (FUZZ_dataProducer_uint32Range(producer, 0, 15) == 0) {
int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel);
cSize = ZSTD_compress_usingDict(cctx,
compressed, compressedCapacity,
@ -42,20 +41,20 @@ static size_t roundTripTest(void *result, size_t resultCapacity,
dict.buff, dict.size,
cLevel);
} else {
dictContentType = FUZZ_rand32(&seed, 0, 2);
FUZZ_setRandomParameters(cctx, srcSize, &seed);
dictContentType = FUZZ_dataProducer_uint32Range(producer, 0, 2);
FUZZ_setRandomParameters(cctx, srcSize, producer);
/* Disable checksum so we can use sizes smaller than compress bound. */
FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 0));
FUZZ_ZASSERT(ZSTD_CCtx_loadDictionary_advanced(
cctx, dict.buff, dict.size,
(ZSTD_dictLoadMethod_e)FUZZ_rand32(&seed, 0, 1),
(ZSTD_dictLoadMethod_e)FUZZ_dataProducer_uint32Range(producer, 0, 1),
dictContentType));
cSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize);
}
FUZZ_ZASSERT(cSize);
FUZZ_ZASSERT(ZSTD_DCtx_loadDictionary_advanced(
dctx, dict.buff, dict.size,
(ZSTD_dictLoadMethod_e)FUZZ_rand32(&seed, 0, 1),
(ZSTD_dictLoadMethod_e)FUZZ_dataProducer_uint32Range(producer, 0, 1),
dictContentType));
{
size_t const ret = ZSTD_decompressDCtx(
@ -67,17 +66,20 @@ static size_t roundTripTest(void *result, size_t resultCapacity,
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
size_t const rBufSize = size;
void* rBuf = malloc(rBufSize);
size_t cBufSize = ZSTD_compressBound(size);
void* cBuf;
seed = FUZZ_seed(&src, &size);
void *cBuf;
/* Half of the time fuzz with a 1 byte smaller output size.
* This will still succeed because we force the checksum to be disabled,
* giving us 4 bytes of overhead.
*/
cBufSize -= FUZZ_rand32(&seed, 0, 1);
cBufSize -= FUZZ_dataProducer_uint32Range(producer, 0, 1);
cBuf = malloc(cBufSize);
if (!cctx) {
@ -91,13 +93,14 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
size_t const result =
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size);
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size, producer);
FUZZ_ZASSERT(result);
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
}
free(rBuf);
free(cBuf);
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING
ZSTD_freeCCtx(cctx); cctx = NULL;
ZSTD_freeDCtx(dctx); dctx = NULL;

View File

@ -17,12 +17,6 @@
* test code paths which are only executed when contexts are reused.
* WARNING: Makes reproducing crashes much harder.
* Default: Not defined.
* @param FUZZ_RNG_SEED_SIZE:
* The number of bytes of the source to look at when constructing a seed
* for the deterministic RNG. These bytes are discarded before passing
* the data to zstd functions. Every fuzzer initializes the RNG exactly
* once before doing anything else, even if it is unused.
* Default: 4.
* @param DEBUGLEVEL:
* This is a parameter for the zstd library. Defining `DEBUGLEVEL=1`
* enables assert() statements in the zstd library. Higher levels enable
@ -42,10 +36,6 @@
#ifndef FUZZ_H
#define FUZZ_H
#ifndef FUZZ_RNG_SEED_SIZE
# define FUZZ_RNG_SEED_SIZE 4
#endif
#include <stddef.h>
#include <stdint.h>

View File

@ -24,21 +24,40 @@ def abs_join(a, *p):
return os.path.abspath(os.path.join(a, *p))
class InputType(object):
RAW_DATA = 1
COMPRESSED_DATA = 2
DICTIONARY_DATA = 3
class FrameType(object):
ZSTD = 1
BLOCK = 2
class TargetInfo(object):
def __init__(self, input_type, frame_type=FrameType.ZSTD):
self.input_type = input_type
self.frame_type = frame_type
# Constants
FUZZ_DIR = os.path.abspath(os.path.dirname(__file__))
CORPORA_DIR = abs_join(FUZZ_DIR, 'corpora')
TARGETS = [
'simple_round_trip',
'stream_round_trip',
'block_round_trip',
'simple_decompress',
'stream_decompress',
'block_decompress',
'dictionary_round_trip',
'dictionary_decompress',
'zstd_frame_info',
'simple_compress',
]
TARGET_INFO = {
'simple_round_trip': TargetInfo(InputType.RAW_DATA),
'stream_round_trip': TargetInfo(InputType.RAW_DATA),
'block_round_trip': TargetInfo(InputType.RAW_DATA, FrameType.BLOCK),
'simple_decompress': TargetInfo(InputType.COMPRESSED_DATA),
'stream_decompress': TargetInfo(InputType.COMPRESSED_DATA),
'block_decompress': TargetInfo(InputType.COMPRESSED_DATA, FrameType.BLOCK),
'dictionary_round_trip': TargetInfo(InputType.RAW_DATA),
'dictionary_decompress': TargetInfo(InputType.COMPRESSED_DATA),
'zstd_frame_info': TargetInfo(InputType.COMPRESSED_DATA),
'simple_compress': TargetInfo(InputType.RAW_DATA),
'dictionary_loader': TargetInfo(InputType.DICTIONARY_DATA),
}
TARGETS = list(TARGET_INFO.keys())
ALL_TARGETS = TARGETS + ['all']
FUZZ_RNG_SEED_SIZE = 4
@ -56,6 +75,7 @@ def abs_join(a, *p):
AFL_FUZZ = os.environ.get('AFL_FUZZ', 'afl-fuzz')
DECODECORPUS = os.environ.get('DECODECORPUS',
abs_join(FUZZ_DIR, '..', 'decodecorpus'))
ZSTD = os.environ.get('ZSTD', abs_join(FUZZ_DIR, '..', '..', 'zstd'))
# Sanitizer environment variables
MSAN_EXTRA_CPPFLAGS = os.environ.get('MSAN_EXTRA_CPPFLAGS', '')
@ -67,7 +87,7 @@ def abs_join(a, *p):
def create(r):
d = os.path.abspath(r)
if not os.path.isdir(d):
os.mkdir(d)
os.makedirs(d)
return d
@ -158,7 +178,7 @@ def compiler_version(cc, cxx):
assert(b'clang' in cxx_version_bytes)
compiler = 'clang'
elif b'gcc' in cc_version_bytes:
assert(b'gcc' in cxx_version_bytes)
assert(b'gcc' in cxx_version_bytes or b'g++' in cxx_version_bytes)
compiler = 'gcc'
if compiler is not None:
version_regex = b'([0-9])+\.([0-9])+\.([0-9])+'
@ -643,7 +663,7 @@ def gen_parser(args):
parser.add_argument(
'--max-size-log',
type=int,
default=13,
default=18,
help='Maximum sample size to generate')
parser.add_argument(
'--seed',
@ -656,6 +676,11 @@ def gen_parser(args):
default=DECODECORPUS,
help="decodecorpus binary (default: $DECODECORPUS='{}')".format(
DECODECORPUS))
parser.add_argument(
'--zstd',
type=str,
default=ZSTD,
help="zstd binary (default: $ZSTD='{}')".format(ZSTD))
parser.add_argument(
'--fuzz-rng-seed-size',
type=int,
@ -690,46 +715,66 @@ def gen(args):
return 1
seed = create(args.seed)
with tmpdir() as compressed:
with tmpdir() as decompressed:
cmd = [
args.decodecorpus,
'-n{}'.format(args.number),
'-p{}/'.format(compressed),
'-o{}'.format(decompressed),
with tmpdir() as compressed, tmpdir() as decompressed, tmpdir() as dict:
info = TARGET_INFO[args.TARGET]
if info.input_type == InputType.DICTIONARY_DATA:
number = max(args.number, 1000)
else:
number = args.number
cmd = [
args.decodecorpus,
'-n{}'.format(args.number),
'-p{}/'.format(compressed),
'-o{}'.format(decompressed),
]
if info.frame_type == FrameType.BLOCK:
cmd += [
'--gen-blocks',
'--max-block-size-log={}'.format(min(args.max_size_log, 17))
]
else:
cmd += ['--max-content-size-log={}'.format(args.max_size_log)]
if 'block_' in args.TARGET:
cmd += [
'--gen-blocks',
'--max-block-size-log={}'.format(args.max_size_log)
print(' '.join(cmd))
subprocess.check_call(cmd)
if info.input_type == InputType.RAW_DATA:
print('using decompressed data in {}'.format(decompressed))
samples = decompressed
elif info.input_type == InputType.COMPRESSED_DATA:
print('using compressed data in {}'.format(compressed))
samples = compressed
else:
assert info.input_type == InputType.DICTIONARY_DATA
print('making dictionary data from {}'.format(decompressed))
samples = dict
min_dict_size_log = 9
max_dict_size_log = max(min_dict_size_log + 1, args.max_size_log)
for dict_size_log in range(min_dict_size_log, max_dict_size_log):
dict_size = 1 << dict_size_log
cmd = [
args.zstd,
'--train',
'-r', decompressed,
'--maxdict={}'.format(dict_size),
'-o', abs_join(dict, '{}.zstd-dict'.format(dict_size))
]
else:
cmd += ['--max-content-size-log={}'.format(args.max_size_log)]
print(' '.join(cmd))
subprocess.check_call(cmd)
print(' '.join(cmd))
subprocess.check_call(cmd)
if '_round_trip' in args.TARGET:
print('using decompressed data in {}'.format(decompressed))
samples = decompressed
elif '_decompress' in args.TARGET:
print('using compressed data in {}'.format(compressed))
samples = compressed
# Copy the samples over and prepend the RNG seeds
for name in os.listdir(samples):
samplename = abs_join(samples, name)
outname = abs_join(seed, name)
rng_seed = os.urandom(args.fuzz_rng_seed_size)
with open(samplename, 'rb') as sample:
with open(outname, 'wb') as out:
out.write(rng_seed)
CHUNK_SIZE = 131072
# Copy the samples over and prepend the RNG seeds
for name in os.listdir(samples):
samplename = abs_join(samples, name)
outname = abs_join(seed, name)
with open(samplename, 'rb') as sample:
with open(outname, 'wb') as out:
CHUNK_SIZE = 131072
chunk = sample.read(CHUNK_SIZE)
while len(chunk) > 0:
out.write(chunk)
chunk = sample.read(CHUNK_SIZE)
while len(chunk) > 0:
out.write(chunk)
chunk = sample.read(CHUNK_SIZE)
return 0

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
*/
#include "fuzz_data_producer.h"
struct FUZZ_dataProducer_s{
const uint8_t *data;
size_t size;
};
FUZZ_dataProducer_t *FUZZ_dataProducer_create(const uint8_t *data, size_t size) {
FUZZ_dataProducer_t *producer = malloc(sizeof(FUZZ_dataProducer_t));
FUZZ_ASSERT(producer != NULL);
producer->data = data;
producer->size = size;
return producer;
}
void FUZZ_dataProducer_free(FUZZ_dataProducer_t *producer) { free(producer); }
uint32_t FUZZ_dataProducer_uint32Range(FUZZ_dataProducer_t *producer, uint32_t min,
uint32_t max) {
FUZZ_ASSERT(min <= max);
uint32_t range = max - min;
uint32_t rolling = range;
uint32_t result = 0;
while (rolling > 0 && producer->size > 0) {
uint8_t next = *(producer->data + producer->size - 1);
producer->size -= 1;
result = (result << 8) | next;
rolling >>= 8;
}
if (range == 0xffffffff) {
return result;
}
return min + result % (range + 1);
}
uint32_t FUZZ_dataProducer_uint32(FUZZ_dataProducer_t *producer) {
return FUZZ_dataProducer_uint32Range(producer, 0, 0xffffffff);
}
int32_t FUZZ_dataProducer_int32Range(FUZZ_dataProducer_t *producer,
int32_t min, int32_t max)
{
FUZZ_ASSERT(min <= max);
if (min < 0)
return (int)FUZZ_dataProducer_uint32Range(producer, 0, max - min) + min;
return FUZZ_dataProducer_uint32Range(producer, min, max);
}
size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer){
return producer->size;
}
size_t FUZZ_dataProducer_contract(FUZZ_dataProducer_t *producer, size_t newSize)
{
newSize = newSize > producer->size ? producer->size : newSize;
size_t remaining = producer->size - newSize;
producer->data = producer->data + remaining;
producer->size = newSize;
return remaining;
}
size_t FUZZ_dataProducer_reserveDataPrefix(FUZZ_dataProducer_t *producer)
{
size_t producerSliceSize = FUZZ_dataProducer_uint32Range(
producer, 0, producer->size);
return FUZZ_dataProducer_contract(producer, producerSliceSize);
}

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
*/
/**
* Helper APIs for generating random data from input data stream.
The producer reads bytes from the end of the input and appends them together
to generate a random number in the requested range. If it runs out of input
data, it will keep returning the same value (min) over and over again.
*/
#ifndef FUZZ_DATA_PRODUCER_H
#define FUZZ_DATA_PRODUCER_H
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "fuzz_helpers.h"
/* Struct used for maintaining the state of the data */
typedef struct FUZZ_dataProducer_s FUZZ_dataProducer_t;
/* Returns a data producer state struct. Use for producer initialization. */
FUZZ_dataProducer_t *FUZZ_dataProducer_create(const uint8_t *data, size_t size);
/* Frees the data producer */
void FUZZ_dataProducer_free(FUZZ_dataProducer_t *producer);
/* Returns value between [min, max] */
uint32_t FUZZ_dataProducer_uint32Range(FUZZ_dataProducer_t *producer, uint32_t min,
uint32_t max);
/* Returns a uint32 value */
uint32_t FUZZ_dataProducer_uint32(FUZZ_dataProducer_t *producer);
/* Returns a signed value between [min, max] */
int32_t FUZZ_dataProducer_int32Range(FUZZ_dataProducer_t *producer,
int32_t min, int32_t max);
/* Returns the size of the remaining bytes of data in the producer */
size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer);
/* Restricts the producer to only the last newSize bytes of data.
If newSize > current data size, nothing happens. Returns the number of bytes
the producer won't use anymore, after contracting. */
size_t FUZZ_dataProducer_contract(FUZZ_dataProducer_t *producer, size_t newSize);
/* Restricts the producer to use only the last X bytes of data, where X is
a random number in the interval [0, data_size]. Returns the size of the
remaining data the producer won't use anymore (the prefix). */
size_t FUZZ_dataProducer_reserveDataPrefix(FUZZ_dataProducer_t *producer);
#endif // FUZZ_DATA_PRODUCER_H

View File

@ -14,6 +14,7 @@
#ifndef FUZZ_HELPERS_H
#define FUZZ_HELPERS_H
#include "debug.h"
#include "fuzz.h"
#include "xxhash.h"
#include "zstd.h"
@ -54,37 +55,6 @@ extern "C" {
#define FUZZ_STATIC static
#endif
/**
* Deterministically constructs a seed based on the fuzz input.
* Consumes up to the first FUZZ_RNG_SEED_SIZE bytes of the input.
*/
FUZZ_STATIC uint32_t FUZZ_seed(uint8_t const **src, size_t* size) {
uint8_t const *data = *src;
size_t const toHash = MIN(FUZZ_RNG_SEED_SIZE, *size);
*size -= toHash;
*src += toHash;
return XXH32(data, toHash, 0);
}
#define FUZZ_rotl32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
FUZZ_STATIC uint32_t FUZZ_rand(uint32_t *state) {
static const uint32_t prime1 = 2654435761U;
static const uint32_t prime2 = 2246822519U;
uint32_t rand32 = *state;
rand32 *= prime1;
rand32 += prime2;
rand32 = FUZZ_rotl32(rand32, 13);
*state = rand32;
return rand32 >> 5;
}
/* Returns a random numer in the range [min, max]. */
FUZZ_STATIC uint32_t FUZZ_rand32(uint32_t *state, uint32_t min, uint32_t max) {
uint32_t random = FUZZ_rand(state);
return min + (random % (max - min + 1));
}
#ifdef __cplusplus
}
#endif

View File

@ -36,6 +36,7 @@ int main(int argc, char const **argv) {
fprintf(stderr, "WARNING: No files passed to %s\n", argv[0]);
for (i = 0; i < numFiles; ++i) {
char const *fileName = files[i];
DEBUGLOG(3, "Running %s", fileName);
size_t const fileSize = UTIL_getFileSize(fileName);
size_t readSize;
FILE *file;

View File

@ -18,28 +18,33 @@
#include <stdio.h>
#include "fuzz_helpers.h"
#include "zstd.h"
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
static ZSTD_CCtx *cctx = NULL;
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
uint32_t seed = FUZZ_seed(&src, &size);
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
size_t const maxSize = ZSTD_compressBound(size);
int i;
size_t const bufSize = FUZZ_dataProducer_uint32Range(producer, 0, maxSize);
int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel);
if (!cctx) {
cctx = ZSTD_createCCtx();
FUZZ_ASSERT(cctx);
}
/* Run it 10 times over 10 output sizes. Reuse the context. */
for (i = 0; i < 10; ++i) {
int const level = (int)FUZZ_rand32(&seed, 0, 19 + 3) - 3; /* [-3, 19] */
size_t const bufSize = FUZZ_rand32(&seed, 0, maxSize);
void* rBuf = malloc(bufSize);
FUZZ_ASSERT(rBuf);
ZSTD_compressCCtx(cctx, rBuf, bufSize, src, size, level);
free(rBuf);
}
void *rBuf = malloc(bufSize);
FUZZ_ASSERT(rBuf);
ZSTD_compressCCtx(cctx, rBuf, bufSize, src, size, cLevel);
free(rBuf);
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING
ZSTD_freeCCtx(cctx); cctx = NULL;
#endif

View File

@ -17,26 +17,30 @@
#include <stdio.h>
#include "fuzz_helpers.h"
#include "zstd.h"
#include "fuzz_data_producer.h"
static ZSTD_DCtx *dctx = NULL;
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
uint32_t seed = FUZZ_seed(&src, &size);
int i;
if (!dctx) {
dctx = ZSTD_createDCtx();
FUZZ_ASSERT(dctx);
}
/* Run it 10 times over 10 output sizes. Reuse the context. */
for (i = 0; i < 10; ++i) {
size_t const bufSize = FUZZ_rand32(&seed, 0, 2 * size);
void* rBuf = malloc(bufSize);
FUZZ_ASSERT(rBuf);
ZSTD_decompressDCtx(dctx, rBuf, bufSize, src, size);
free(rBuf);
}
size_t const bufSize = FUZZ_dataProducer_uint32Range(producer, 0, 10 * size);
void *rBuf = malloc(bufSize);
FUZZ_ASSERT(rBuf);
ZSTD_decompressDCtx(dctx, rBuf, bufSize, src, size);
free(rBuf);
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING
ZSTD_freeDCtx(dctx); dctx = NULL;

View File

@ -20,23 +20,23 @@
#include <string.h>
#include "fuzz_helpers.h"
#include "zstd_helpers.h"
static const int kMaxClevel = 19;
#include "fuzz_data_producer.h"
static ZSTD_CCtx *cctx = NULL;
static ZSTD_DCtx *dctx = NULL;
static uint32_t seed;
static size_t roundTripTest(void *result, size_t resultCapacity,
void *compressed, size_t compressedCapacity,
const void *src, size_t srcSize)
const void *src, size_t srcSize,
FUZZ_dataProducer_t *producer)
{
size_t cSize;
if (FUZZ_rand(&seed) & 1) {
FUZZ_setRandomParameters(cctx, srcSize, &seed);
if (FUZZ_dataProducer_uint32Range(producer, 0, 1)) {
FUZZ_setRandomParameters(cctx, srcSize, producer);
cSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize);
} else {
int const cLevel = FUZZ_rand(&seed) % kMaxClevel;
int const cLevel = FUZZ_dataProducer_int32Range(producer, kMinClevel, kMaxClevel);
cSize = ZSTD_compressCCtx(
cctx, compressed, compressedCapacity, src, srcSize, cLevel);
}
@ -51,12 +51,17 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
size_t cBufSize = ZSTD_compressBound(size);
void* cBuf;
seed = FUZZ_seed(&src, &size);
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
/* Half of the time fuzz with a 1 byte smaller output size.
* This will still succeed because we don't use a dictionary, so the dictID
* field is empty, giving us 4 bytes of overhead.
*/
cBufSize -= FUZZ_rand32(&seed, 0, 1);
cBufSize -= FUZZ_dataProducer_uint32Range(producer, 0, 1);
cBuf = malloc(cBufSize);
FUZZ_ASSERT(cBuf && rBuf);
@ -72,13 +77,14 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
size_t const result =
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size);
roundTripTest(rBuf, rBufSize, cBuf, cBufSize, src, size, producer);
FUZZ_ZASSERT(result);
FUZZ_ASSERT_MSG(result == size, "Incorrect regenerated size");
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
}
free(rBuf);
free(cBuf);
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING
ZSTD_freeCCtx(cctx); cctx = NULL;
ZSTD_freeDCtx(dctx); dctx = NULL;

View File

@ -19,6 +19,7 @@
#include <stdio.h>
#include "fuzz_helpers.h"
#include "zstd.h"
#include "fuzz_data_producer.h"
static size_t const kBufSize = ZSTD_BLOCKSIZE_MAX;
@ -26,22 +27,23 @@ static ZSTD_DStream *dstream = NULL;
static void* buf = NULL;
uint32_t seed;
static ZSTD_outBuffer makeOutBuffer(void)
static ZSTD_outBuffer makeOutBuffer(FUZZ_dataProducer_t *producer)
{
ZSTD_outBuffer buffer = { buf, 0, 0 };
buffer.size = (FUZZ_rand(&seed) % kBufSize) + 1;
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 1, kBufSize));
FUZZ_ASSERT(buffer.size <= kBufSize);
return buffer;
}
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size,
FUZZ_dataProducer_t *producer)
{
ZSTD_inBuffer buffer = { *src, 0, 0 };
FUZZ_ASSERT(*size > 0);
buffer.size = (FUZZ_rand(&seed) % *size) + 1;
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 1, *size));
FUZZ_ASSERT(buffer.size <= *size);
*src += buffer.size;
*size -= buffer.size;
@ -51,13 +53,16 @@ static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
seed = FUZZ_seed(&src, &size);
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
/* Allocate all buffers and contexts if not already allocated */
if (!buf) {
buf = malloc(kBufSize);
FUZZ_ASSERT(buf);
}
FUZZ_ASSERT(buf);
}
if (!dstream) {
dstream = ZSTD_createDStream();
@ -67,9 +72,9 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
}
while (size > 0) {
ZSTD_inBuffer in = makeInBuffer(&src, &size);
ZSTD_inBuffer in = makeInBuffer(&src, &size, producer);
while (in.pos != in.size) {
ZSTD_outBuffer out = makeOutBuffer();
ZSTD_outBuffer out = makeOutBuffer(producer);
size_t const rc = ZSTD_decompressStream(dstream, &out, &in);
if (ZSTD_isError(rc)) goto error;
}
@ -79,5 +84,6 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
#ifndef STATEFUL_FUZZING
ZSTD_freeDStream(dstream); dstream = NULL;
#endif
FUZZ_dataProducer_free(producer);
return 0;
}

View File

@ -20,31 +20,33 @@
#include <string.h>
#include "fuzz_helpers.h"
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
ZSTD_CCtx *cctx = NULL;
static ZSTD_DCtx *dctx = NULL;
static uint8_t* cBuf = NULL;
static uint8_t* rBuf = NULL;
static size_t bufSize = 0;
static uint32_t seed;
static ZSTD_outBuffer makeOutBuffer(uint8_t *dst, size_t capacity)
static ZSTD_outBuffer makeOutBuffer(uint8_t *dst, size_t capacity,
FUZZ_dataProducer_t *producer)
{
ZSTD_outBuffer buffer = { dst, 0, 0 };
FUZZ_ASSERT(capacity > 0);
buffer.size = (FUZZ_rand(&seed) % capacity) + 1;
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 1, capacity));
FUZZ_ASSERT(buffer.size <= capacity);
return buffer;
}
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size,
FUZZ_dataProducer_t *producer)
{
ZSTD_inBuffer buffer = { *src, 0, 0 };
FUZZ_ASSERT(*size > 0);
buffer.size = (FUZZ_rand(&seed) % *size) + 1;
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 1, *size));
FUZZ_ASSERT(buffer.size <= *size);
*src += buffer.size;
*size -= buffer.size;
@ -53,23 +55,24 @@ static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size)
}
static size_t compress(uint8_t *dst, size_t capacity,
const uint8_t *src, size_t srcSize)
const uint8_t *src, size_t srcSize,
FUZZ_dataProducer_t *producer)
{
size_t dstSize = 0;
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
FUZZ_setRandomParameters(cctx, srcSize, &seed);
FUZZ_setRandomParameters(cctx, srcSize, producer);
while (srcSize > 0) {
ZSTD_inBuffer in = makeInBuffer(&src, &srcSize);
ZSTD_inBuffer in = makeInBuffer(&src, &srcSize, producer);
/* Mode controls the action. If mode == -1 we pick a new mode */
int mode = -1;
while (in.pos < in.size || mode != -1) {
ZSTD_outBuffer out = makeOutBuffer(dst, capacity);
ZSTD_outBuffer out = makeOutBuffer(dst, capacity, producer);
/* Previous action finished, pick a new mode. */
if (mode == -1) mode = FUZZ_rand(&seed) % 10;
if (mode == -1) mode = FUZZ_dataProducer_uint32Range(producer, 0, 9);
switch (mode) {
case 0: /* fall-though */
case 1: /* fall-though */
case 0: /* fall-through */
case 1: /* fall-through */
case 2: {
size_t const ret =
ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush);
@ -85,9 +88,9 @@ static size_t compress(uint8_t *dst, size_t capacity,
/* Reset the compressor when the frame is finished */
if (ret == 0) {
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
if ((FUZZ_rand(&seed) & 7) == 0) {
if (FUZZ_dataProducer_uint32Range(producer, 0, 7) == 0) {
size_t const remaining = in.size - in.pos;
FUZZ_setRandomParameters(cctx, remaining, &seed);
FUZZ_setRandomParameters(cctx, remaining, producer);
}
mode = -1;
}
@ -107,7 +110,7 @@ static size_t compress(uint8_t *dst, size_t capacity,
}
for (;;) {
ZSTD_inBuffer in = {NULL, 0, 0};
ZSTD_outBuffer out = makeOutBuffer(dst, capacity);
ZSTD_outBuffer out = makeOutBuffer(dst, capacity, producer);
size_t const ret = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end);
FUZZ_ZASSERT(ret);
@ -124,8 +127,12 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
size_t neededBufSize;
seed = FUZZ_seed(&src, &size);
neededBufSize = ZSTD_compressBound(size) * 2;
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size = FUZZ_dataProducer_reserveDataPrefix(producer);
neededBufSize = ZSTD_compressBound(size) * 15;
/* Allocate all buffers and contexts if not already allocated */
if (neededBufSize > bufSize) {
@ -146,7 +153,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
}
{
size_t const cSize = compress(cBuf, neededBufSize, src, size);
size_t const cSize = compress(cBuf, neededBufSize, src, size, producer);
size_t const rSize =
ZSTD_decompressDCtx(dctx, rBuf, neededBufSize, cBuf, cSize);
FUZZ_ZASSERT(rSize);
@ -154,6 +161,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
FUZZ_ASSERT_MSG(!memcmp(src, rBuf, size), "Corruption!");
}
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING
ZSTD_freeCCtx(cctx); cctx = NULL;
ZSTD_freeDCtx(dctx); dctx = NULL;

View File

@ -21,10 +21,6 @@
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
ZSTD_frameHeader zfh;
/* Consume the seed to be compatible with the corpora of other decompression
* fuzzers.
*/
FUZZ_seed(&src, &size);
/* You can fuzz any helper functions here that are fast, and take zstd
* compressed data as input. E.g. don't expect the input to be a dictionary,
* so don't fuzz ZSTD_getDictID_fromDict().

View File

@ -17,53 +17,56 @@
#include "zstd.h"
#include "zdict.h"
const int kMinClevel = -3;
const int kMaxClevel = 19;
static void set(ZSTD_CCtx *cctx, ZSTD_cParameter param, int value)
{
FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, param, value));
}
static void setRand(ZSTD_CCtx *cctx, ZSTD_cParameter param, unsigned min,
unsigned max, uint32_t *state) {
unsigned const value = FUZZ_rand32(state, min, max);
unsigned max, FUZZ_dataProducer_t *producer) {
unsigned const value = FUZZ_dataProducer_uint32Range(producer, min, max);
set(cctx, param, value);
}
ZSTD_compressionParameters FUZZ_randomCParams(size_t srcSize, uint32_t *state)
ZSTD_compressionParameters FUZZ_randomCParams(size_t srcSize, FUZZ_dataProducer_t *producer)
{
/* Select compression parameters */
ZSTD_compressionParameters cParams;
cParams.windowLog = FUZZ_rand32(state, ZSTD_WINDOWLOG_MIN, 15);
cParams.hashLog = FUZZ_rand32(state, ZSTD_HASHLOG_MIN, 15);
cParams.chainLog = FUZZ_rand32(state, ZSTD_CHAINLOG_MIN, 16);
cParams.searchLog = FUZZ_rand32(state, ZSTD_SEARCHLOG_MIN, 9);
cParams.minMatch = FUZZ_rand32(state, ZSTD_MINMATCH_MIN,
cParams.windowLog = FUZZ_dataProducer_uint32Range(producer, ZSTD_WINDOWLOG_MIN, 15);
cParams.hashLog = FUZZ_dataProducer_uint32Range(producer, ZSTD_HASHLOG_MIN, 15);
cParams.chainLog = FUZZ_dataProducer_uint32Range(producer, ZSTD_CHAINLOG_MIN, 16);
cParams.searchLog = FUZZ_dataProducer_uint32Range(producer, ZSTD_SEARCHLOG_MIN, 9);
cParams.minMatch = FUZZ_dataProducer_uint32Range(producer, ZSTD_MINMATCH_MIN,
ZSTD_MINMATCH_MAX);
cParams.targetLength = FUZZ_rand32(state, 0, 512);
cParams.strategy = FUZZ_rand32(state, ZSTD_STRATEGY_MIN, ZSTD_STRATEGY_MAX);
cParams.targetLength = FUZZ_dataProducer_uint32Range(producer, 0, 512);
cParams.strategy = FUZZ_dataProducer_uint32Range(producer, ZSTD_STRATEGY_MIN, ZSTD_STRATEGY_MAX);
return ZSTD_adjustCParams(cParams, srcSize, 0);
}
ZSTD_frameParameters FUZZ_randomFParams(uint32_t *state)
ZSTD_frameParameters FUZZ_randomFParams(FUZZ_dataProducer_t *producer)
{
/* Select frame parameters */
ZSTD_frameParameters fParams;
fParams.contentSizeFlag = FUZZ_rand32(state, 0, 1);
fParams.checksumFlag = FUZZ_rand32(state, 0, 1);
fParams.noDictIDFlag = FUZZ_rand32(state, 0, 1);
fParams.contentSizeFlag = FUZZ_dataProducer_uint32Range(producer, 0, 1);
fParams.checksumFlag = FUZZ_dataProducer_uint32Range(producer, 0, 1);
fParams.noDictIDFlag = FUZZ_dataProducer_uint32Range(producer, 0, 1);
return fParams;
}
ZSTD_parameters FUZZ_randomParams(size_t srcSize, uint32_t *state)
ZSTD_parameters FUZZ_randomParams(size_t srcSize, FUZZ_dataProducer_t *producer)
{
ZSTD_parameters params;
params.cParams = FUZZ_randomCParams(srcSize, state);
params.fParams = FUZZ_randomFParams(state);
params.cParams = FUZZ_randomCParams(srcSize, producer);
params.fParams = FUZZ_randomFParams(producer);
return params;
}
void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, uint32_t *state)
void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, FUZZ_dataProducer_t *producer)
{
ZSTD_compressionParameters cParams = FUZZ_randomCParams(srcSize, state);
ZSTD_compressionParameters cParams = FUZZ_randomCParams(srcSize, producer);
set(cctx, ZSTD_c_windowLog, cParams.windowLog);
set(cctx, ZSTD_c_hashLog, cParams.hashLog);
set(cctx, ZSTD_c_chainLog, cParams.chainLog);
@ -72,27 +75,30 @@ void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, uint32_t *state)
set(cctx, ZSTD_c_targetLength, cParams.targetLength);
set(cctx, ZSTD_c_strategy, cParams.strategy);
/* Select frame parameters */
setRand(cctx, ZSTD_c_contentSizeFlag, 0, 1, state);
setRand(cctx, ZSTD_c_checksumFlag, 0, 1, state);
setRand(cctx, ZSTD_c_dictIDFlag, 0, 1, state);
setRand(cctx, ZSTD_c_contentSizeFlag, 0, 1, producer);
setRand(cctx, ZSTD_c_checksumFlag, 0, 1, producer);
setRand(cctx, ZSTD_c_dictIDFlag, 0, 1, producer);
/* Select long distance matching parameters */
setRand(cctx, ZSTD_c_enableLongDistanceMatching, 0, 1, state);
setRand(cctx, ZSTD_c_ldmHashLog, ZSTD_HASHLOG_MIN, 16, state);
setRand(cctx, ZSTD_c_enableLongDistanceMatching, 0, 1, producer);
setRand(cctx, ZSTD_c_ldmHashLog, ZSTD_HASHLOG_MIN, 16, producer);
setRand(cctx, ZSTD_c_ldmMinMatch, ZSTD_LDM_MINMATCH_MIN,
ZSTD_LDM_MINMATCH_MAX, state);
ZSTD_LDM_MINMATCH_MAX, producer);
setRand(cctx, ZSTD_c_ldmBucketSizeLog, 0, ZSTD_LDM_BUCKETSIZELOG_MAX,
state);
producer);
setRand(cctx, ZSTD_c_ldmHashRateLog, ZSTD_LDM_HASHRATELOG_MIN,
ZSTD_LDM_HASHRATELOG_MAX, state);
ZSTD_LDM_HASHRATELOG_MAX, producer);
/* Set misc parameters */
setRand(cctx, ZSTD_c_nbWorkers, 0, 2, state);
setRand(cctx, ZSTD_c_rsyncable, 0, 1, state);
setRand(cctx, ZSTD_c_forceMaxWindow, 0, 1, state);
setRand(cctx, ZSTD_c_literalCompressionMode, 0, 2, state);
setRand(cctx, ZSTD_c_forceAttachDict, 0, 2, state);
setRand(cctx, ZSTD_c_nbWorkers, 0, 2, producer);
setRand(cctx, ZSTD_c_rsyncable, 0, 1, producer);
setRand(cctx, ZSTD_c_forceMaxWindow, 0, 1, producer);
setRand(cctx, ZSTD_c_literalCompressionMode, 0, 2, producer);
setRand(cctx, ZSTD_c_forceAttachDict, 0, 2, producer);
if (FUZZ_dataProducer_uint32Range(producer, 0, 1) == 0) {
setRand(cctx, ZSTD_c_srcSizeHint, ZSTD_SRCSIZEHINT_MIN, 2 * srcSize, producer);
}
}
FUZZ_dict_t FUZZ_train(void const* src, size_t srcSize, uint32_t *state)
FUZZ_dict_t FUZZ_train(void const* src, size_t srcSize, FUZZ_dataProducer_t *producer)
{
size_t const dictSize = MAX(srcSize / 8, 1024);
size_t const totalSampleSize = dictSize * 11;
@ -107,7 +113,7 @@ FUZZ_dict_t FUZZ_train(void const* src, size_t srcSize, uint32_t *state)
for (sample = 0; sample < nbSamples; ++sample) {
size_t const remaining = totalSampleSize - pos;
size_t const offset = FUZZ_rand32(state, 0, MAX(srcSize, 1) - 1);
size_t const offset = FUZZ_dataProducer_uint32Range(producer, 0, MAX(srcSize, 1) - 1);
size_t const limit = MIN(srcSize - offset, remaining);
size_t const toCopy = MIN(limit, remaining / (nbSamples - sample));
memcpy(samples + pos, src + offset, toCopy);

View File

@ -17,17 +17,21 @@
#define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h"
#include "fuzz_data_producer.h"
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, uint32_t *state);
extern const int kMinClevel;
extern const int kMaxClevel;
ZSTD_compressionParameters FUZZ_randomCParams(size_t srcSize, uint32_t *state);
ZSTD_frameParameters FUZZ_randomFParams(uint32_t *state);
ZSTD_parameters FUZZ_randomParams(size_t srcSize, uint32_t *state);
void FUZZ_setRandomParameters(ZSTD_CCtx *cctx, size_t srcSize, FUZZ_dataProducer_t *producer);
ZSTD_compressionParameters FUZZ_randomCParams(size_t srcSize, FUZZ_dataProducer_t *producer);
ZSTD_frameParameters FUZZ_randomFParams(FUZZ_dataProducer_t *producer);
ZSTD_parameters FUZZ_randomParams(size_t srcSize, FUZZ_dataProducer_t *producer);
typedef struct {
void* buff;
@ -38,8 +42,7 @@ typedef struct {
* NOTE: Don't use this to train production dictionaries, it is only optimized
* for speed, and doesn't care about dictionary quality.
*/
FUZZ_dict_t FUZZ_train(void const* src, size_t srcSize, uint32_t *state);
FUZZ_dict_t FUZZ_train(void const* src, size_t srcSize, FUZZ_dataProducer_t *producer);
#ifdef __cplusplus
}

View File

@ -123,9 +123,10 @@ static U32 FUZ_highbit32(U32 v32)
exit(1); \
} }
#define CHECK_V(var, fn) size_t const var = fn; if (ZSTD_isError(var)) goto _output_error
#define CHECK(fn) { CHECK_V(err, fn); }
#define CHECKPLUS(var, fn, more) { CHECK_V(var, fn); more; }
#define CHECK_VAR(var, fn) var = fn; if (ZSTD_isError(var)) { DISPLAYLEVEL(1, "%s : fails : %s \n", #fn, ZSTD_getErrorName(var)); goto _output_error; }
#define CHECK_NEWV(var, fn) size_t const CHECK_VAR(var, fn)
#define CHECK(fn) { CHECK_NEWV(err, fn); }
#define CHECKPLUS(var, fn, more) { CHECK_NEWV(var, fn); more; }
#define CHECK_OP(op, lhs, rhs) { \
if (!((lhs) op (rhs))) { \
@ -231,7 +232,7 @@ static int FUZ_mallocTests_internal(unsigned seed, double compressibility, unsig
/* advanced MT API test */
if (part <= 3)
{ unsigned nbThreads;
{ int nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
@ -242,7 +243,7 @@ static int FUZ_mallocTests_internal(unsigned seed, double compressibility, unsig
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, nbThreads) );
CHECK_Z( ZSTD_compress2(cctx, outBuffer, outSize, inBuffer, inSize) );
ZSTD_freeCCtx(cctx);
DISPLAYLEVEL(3, "compress_generic,-T%u,end level %i : ",
DISPLAYLEVEL(3, "compress_generic,-T%i,end level %i : ",
nbThreads, compressionLevel);
FUZ_displayMallocStats(malcount);
} } }
@ -303,11 +304,33 @@ static int FUZ_mallocTests(unsigned seed, double compressibility, unsigned part)
#endif
static void FUZ_decodeSequences(BYTE* dst, ZSTD_Sequence* seqs, size_t seqsSize, BYTE* src, size_t size)
{
size_t i;
size_t j;
for(i = 0; i < seqsSize - 1; ++i) {
assert(dst + seqs[i].litLength + seqs[i].matchLength < dst + size);
assert(src + seqs[i].litLength + seqs[i].matchLength < src + size);
memcpy(dst, src, seqs[i].litLength);
dst += seqs[i].litLength;
src += seqs[i].litLength;
size -= seqs[i].litLength;
for (j = 0; j < seqs[i].matchLength; ++j)
dst[j] = dst[j - seqs[i].offset];
dst += seqs[i].matchLength;
src += seqs[i].matchLength;
size -= seqs[i].matchLength;
}
memcpy(dst, src, size);
}
/*=============================================
* Unit tests
=============================================*/
static int basicUnitTests(U32 seed, double compressibility)
static int basicUnitTests(U32 const seed, double compressibility)
{
size_t const CNBuffSize = 5 MB;
void* const CNBuffer = malloc(CNBuffSize);
@ -345,10 +368,9 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3u : compress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ ZSTD_CCtx* const cctx = ZSTD_createCCtx();
if (cctx==NULL) goto _output_error;
CHECKPLUS(r, ZSTD_compressCCtx(cctx,
CHECK_VAR(cSize, ZSTD_compressCCtx(cctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, 1),
cSize=r );
CNBuffer, CNBuffSize, 1) );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : size of cctx for level 1 : ", testNb++);
@ -401,7 +423,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : check decompressed result : ", testNb++);
{ size_t u;
for (u=0; u<CNBuffSize; u++) {
if (((BYTE*)decodedBuffer)[u] != ((BYTE*)CNBuffer)[u]) goto _output_error;;
if (((BYTE*)decodedBuffer)[u] != ((BYTE*)CNBuffer)[u]) goto _output_error;
} }
DISPLAYLEVEL(3, "OK \n");
@ -450,12 +472,11 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : ZSTD_decompressBound test with content size missing : ", testNb++);
{ /* create compressed buffer with content size missing */
ZSTD_CCtx* cctx = ZSTD_createCCtx();
ZSTD_CCtx* const cctx = ZSTD_createCCtx();
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, 0) );
CHECKPLUS(r, ZSTD_compress2(cctx,
CHECK_VAR(cSize, ZSTD_compress2(cctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize),
cSize=r );
CNBuffer, CNBuffSize) );
ZSTD_freeCCtx(cctx);
}
{ /* ensure frame content size is missing */
@ -742,10 +763,9 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : simple compression test with static CCtx : ", testNb++);
CHECKPLUS(r, ZSTD_compressCCtx(staticCCtx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, STATIC_CCTX_LEVEL),
cSize=r );
CHECK_VAR(cSize, ZSTD_compressCCtx(staticCCtx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, STATIC_CCTX_LEVEL) );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n",
(unsigned)cSize, (double)cSize/CNBuffSize*100);
@ -760,7 +780,7 @@ static int basicUnitTests(U32 seed, double compressibility)
{ size_t u;
for (u=0; u<CNBuffSize; u++) {
if (((BYTE*)decodedBuffer)[u] != ((BYTE*)CNBuffer)[u])
goto _output_error;;
goto _output_error;
} }
DISPLAYLEVEL(3, "OK \n");
@ -777,7 +797,7 @@ static int basicUnitTests(U32 seed, double compressibility)
CHECK( ZSTD_initCStream(staticCCtx, 1) );
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : init CStream with dictionary (should fail) : ", testNb++);
DISPLAYLEVEL(3, "test%3i : init static CStream with dictionary (should fail) : ", testNb++);
{ size_t const r = ZSTD_initCStream_usingDict(staticCCtx, CNBuffer, 64 KB, 1);
if (!ZSTD_isError(r)) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
@ -810,20 +830,19 @@ static int basicUnitTests(U32 seed, double compressibility)
/* ZSTDMT simple MT compression test */
DISPLAYLEVEL(3, "test%3i : create ZSTDMT CCtx : ", testNb++);
{ ZSTDMT_CCtx* mtctx = ZSTDMT_createCCtx(2);
{ ZSTDMT_CCtx* const mtctx = ZSTDMT_createCCtx(2);
if (mtctx==NULL) {
DISPLAY("mtctx : mot enough memory, aborting \n");
DISPLAY("mtctx : not enough memory, aborting \n");
testResult = 1;
goto _end;
}
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3u : compress %u bytes with 2 threads : ", testNb++, (unsigned)CNBuffSize);
CHECKPLUS(r, ZSTDMT_compressCCtx(mtctx,
CHECK_VAR(cSize, ZSTDMT_compressCCtx(mtctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize,
1),
cSize=r );
1) );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : decompressed size test : ", testNb++);
@ -842,7 +861,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : check decompressed result : ", testNb++);
{ size_t u;
for (u=0; u<CNBuffSize; u++) {
if (((BYTE*)decodedBuffer)[u] != ((BYTE*)CNBuffer)[u]) goto _output_error;;
if (((BYTE*)decodedBuffer)[u] != ((BYTE*)CNBuffer)[u]) goto _output_error;
} }
DISPLAYLEVEL(3, "OK \n");
@ -850,11 +869,10 @@ static int basicUnitTests(U32 seed, double compressibility)
{ ZSTD_parameters params = ZSTD_getParams(1, CNBuffSize, 0);
params.fParams.checksumFlag = 1;
params.fParams.contentSizeFlag = 1;
CHECKPLUS(r, ZSTDMT_compress_advanced(mtctx,
CHECK_VAR(cSize, ZSTDMT_compress_advanced(mtctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize,
NULL, params, 3 /*overlapRLog*/),
cSize=r );
NULL, params, 3 /*overlapRLog*/) );
}
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
@ -928,10 +946,10 @@ static int basicUnitTests(U32 seed, double compressibility)
/* only use the first half so we don't push against size limit of compressedBuffer */
size_t const segSize = (CNBuffSize / 2) / segs;
for (i = 0; i < segs; i++) {
CHECK_V(r, ZSTD_compress(
(BYTE *)compressedBuffer + off, CNBuffSize - off,
(BYTE *)CNBuffer + segSize * i,
segSize, 5));
CHECK_NEWV(r, ZSTD_compress(
(BYTE*)compressedBuffer + off, CNBuffSize - off,
(BYTE*)CNBuffer + segSize * (size_t)i, segSize,
5) );
off += r;
if (i == segs/2) {
/* insert skippable frame */
@ -956,7 +974,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : decompress multiple frames : ", testNb++);
{ CHECK_V(r, ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize));
{ CHECK_NEWV(r, ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize));
if (r != CNBuffSize / 2) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
@ -983,8 +1001,9 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : compress with flat dictionary : ", testNb++);
cSize = 0;
CHECKPLUS(r, ZSTD_compressEnd(ctxOrig, compressedBuffer, compressedBufferSize,
(const char*)CNBuffer + dictSize, CNBuffSize - dictSize),
CHECKPLUS(r, ZSTD_compressEnd(ctxOrig,
compressedBuffer, compressedBufferSize,
(const char*)CNBuffer + dictSize, CNBuffSize - dictSize),
cSize += r);
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
@ -999,8 +1018,9 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : compress with duplicated context : ", testNb++);
{ size_t const cSizeOrig = cSize;
cSize = 0;
CHECKPLUS(r, ZSTD_compressEnd(ctxDuplicated, compressedBuffer, compressedBufferSize,
(const char*)CNBuffer + dictSize, CNBuffSize - dictSize),
CHECKPLUS(r, ZSTD_compressEnd(ctxDuplicated,
compressedBuffer, compressedBufferSize,
(const char*)CNBuffer + dictSize, CNBuffSize - dictSize),
cSize += r);
if (cSize != cSizeOrig) goto _output_error; /* should be identical ==> same size */
}
@ -1024,7 +1044,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : decompress with static DDict : ", testNb++);
{ size_t const ddictBufferSize = ZSTD_estimateDDictSize(dictSize, ZSTD_dlm_byCopy);
void* ddictBuffer = malloc(ddictBufferSize);
void* const ddictBuffer = malloc(ddictBufferSize);
if (ddictBuffer == NULL) goto _output_error;
{ const ZSTD_DDict* const ddict = ZSTD_initStaticDDict(ddictBuffer, ddictBufferSize, CNBuffer, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
size_t const r = ZSTD_decompress_usingDDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, ddict);
@ -1042,15 +1062,66 @@ static int basicUnitTests(U32 seed, double compressibility)
}
CHECK( ZSTD_copyCCtx(ctxDuplicated, ctxOrig, testSize) );
CHECKPLUS(r, ZSTD_compressEnd(ctxDuplicated, compressedBuffer, ZSTD_compressBound(testSize),
(const char*)CNBuffer + dictSize, testSize),
cSize = r);
CHECK_VAR(cSize, ZSTD_compressEnd(ctxDuplicated, compressedBuffer, ZSTD_compressBound(testSize),
(const char*)CNBuffer + dictSize, testSize) );
{ ZSTD_frameHeader zfh;
if (ZSTD_getFrameHeader(&zfh, compressedBuffer, cSize)) goto _output_error;
if ((zfh.frameContentSize != testSize) && (zfh.frameContentSize != 0)) goto _output_error;
} }
DISPLAYLEVEL(3, "OK \n");
if ((int)(compressibility * 100 + 0.1) == FUZ_compressibility_default) { /* test only valid with known input */
size_t const flatdictSize = 22 KB;
size_t const contentSize = 9 KB;
const void* const dict = (const char*)CNBuffer;
const void* const contentStart = (const char*)dict + flatdictSize;
size_t const target_nodict_cSize[22+1] = { 3840, 3770, 3870, 3830, 3770,
3770, 3770, 3770, 3750, 3750,
3740, 3670, 3670, 3660, 3660,
3660, 3660, 3660, 3660, 3660,
3660, 3660, 3660 };
size_t const target_wdict_cSize[22+1] = { 2830, 2890, 2890, 2820, 2940,
2950, 2950, 2920, 2900, 2890,
2910, 2910, 2910, 2770, 2760,
2750, 2750, 2750, 2750, 2750,
2750, 2750, 2750 };
int l = 1;
int const maxLevel = ZSTD_maxCLevel();
DISPLAYLEVEL(3, "test%3i : flat-dictionary efficiency test : \n", testNb++);
assert(maxLevel == 22);
RDG_genBuffer(CNBuffer, flatdictSize + contentSize, compressibility, 0., seed);
DISPLAYLEVEL(4, "content hash : %016llx; dict hash : %016llx \n", XXH64(contentStart, contentSize, 0), XXH64(dict, flatdictSize, 0));
for ( ; l <= maxLevel; l++) {
size_t const nodict_cSize = ZSTD_compress(compressedBuffer, compressedBufferSize,
contentStart, contentSize, l);
if (nodict_cSize > target_nodict_cSize[l]) {
DISPLAYLEVEL(1, "error : compression at level %i worse than expected (%u > %u) \n",
l, (unsigned)nodict_cSize, (unsigned)target_nodict_cSize[l]);
goto _output_error;
}
DISPLAYLEVEL(4, "level %i : max expected %u >= reached %u \n",
l, (unsigned)target_nodict_cSize[l], (unsigned)nodict_cSize);
}
for ( l=1 ; l <= maxLevel; l++) {
size_t const wdict_cSize = ZSTD_compress_usingDict(ctxOrig,
compressedBuffer, compressedBufferSize,
contentStart, contentSize,
dict, flatdictSize,
l);
if (wdict_cSize > target_wdict_cSize[l]) {
DISPLAYLEVEL(1, "error : compression with dictionary at level %i worse than expected (%u > %u) \n",
l, (unsigned)wdict_cSize, (unsigned)target_wdict_cSize[l]);
goto _output_error;
}
DISPLAYLEVEL(4, "level %i with dictionary : max expected %u >= reached %u \n",
l, (unsigned)target_wdict_cSize[l], (unsigned)wdict_cSize);
}
DISPLAYLEVEL(4, "compression efficiency tests OK \n");
}
ZSTD_freeCCtx(ctxOrig);
ZSTD_freeCCtx(ctxDuplicated);
ZSTD_freeDCtx(dctx);
@ -1316,7 +1387,7 @@ static int basicUnitTests(U32 seed, double compressibility)
}
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : Building cdict w/ ZSTD_dm_fullDict on a good dictionary : ", testNb++);
DISPLAYLEVEL(3, "test%3i : Building cdict w/ ZSTD_dct_fullDict on a good dictionary : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_fullDict, cParams, ZSTD_defaultCMem);
if (cdict==NULL) goto _output_error;
@ -1324,7 +1395,7 @@ static int basicUnitTests(U32 seed, double compressibility)
}
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : Building cdict w/ ZSTD_dm_fullDict on a rawContent (must fail) : ", testNb++);
DISPLAYLEVEL(3, "test%3i : Building cdict w/ ZSTD_dct_fullDict on a rawContent (must fail) : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced((const char*)dictBuffer+1, dictSize-1, ZSTD_dlm_byRef, ZSTD_dct_fullDict, cParams, ZSTD_defaultCMem);
if (cdict!=NULL) goto _output_error;
@ -1332,7 +1403,7 @@ static int basicUnitTests(U32 seed, double compressibility)
}
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : Loading rawContent starting with dict header w/ ZSTD_dm_auto should fail : ", testNb++);
DISPLAYLEVEL(3, "test%3i : Loading rawContent starting with dict header w/ ZSTD_dct_auto should fail : ", testNb++);
{
size_t ret;
MEM_writeLE32((char*)dictBuffer+2, ZSTD_MAGIC_DICTIONARY);
@ -1346,7 +1417,7 @@ static int basicUnitTests(U32 seed, double compressibility)
}
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : Loading rawContent starting with dict header w/ ZSTD_dm_rawContent should pass : ", testNb++);
DISPLAYLEVEL(3, "test%3i : Loading rawContent starting with dict header w/ ZSTD_dct_rawContent should pass : ", testNb++);
{
size_t ret;
MEM_writeLE32((char*)dictBuffer+2, ZSTD_MAGIC_DICTIONARY);
@ -1598,6 +1669,7 @@ static int basicUnitTests(U32 seed, double compressibility)
size_t const sampleUnitSize = 8 KB;
U32 const nbSamples = (U32)(totalSampleSize / sampleUnitSize);
size_t* const samplesSizes = (size_t*) malloc(nbSamples * sizeof(size_t));
U32 seed32 = seed;
ZDICT_cover_params_t params;
U32 dictID;
@ -1610,8 +1682,8 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : ZDICT_trainFromBuffer_cover : ", testNb++);
{ U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; }
memset(&params, 0, sizeof(params));
params.d = 1 + (FUZ_rand(&seed) % 16);
params.k = params.d + (FUZ_rand(&seed) % 256);
params.d = 1 + (FUZ_rand(&seed32) % 16);
params.k = params.d + (FUZ_rand(&seed32) % 256);
dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, dictSize,
CNBuffer, samplesSizes, nbSamples,
params);
@ -1814,6 +1886,36 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "streaming OK : regenerated %u bytes \n", (unsigned)out.pos);
}
/* basic block compression */
DISPLAYLEVEL(3, "test%3i : empty magic-less format test : ", testNb++);
CHECK( ZSTD_CCtx_setParameter(cctx, ZSTD_c_format, ZSTD_f_zstd1_magicless) );
{ ZSTD_inBuffer in = { CNBuffer, 0, 0 };
ZSTD_outBuffer out = { compressedBuffer, ZSTD_compressBound(0), 0 };
size_t const result = ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end);
if (result != 0) goto _output_error;
if (in.pos != in.size) goto _output_error;
cSize = out.pos;
}
DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (unsigned)0, (unsigned)cSize);
DISPLAYLEVEL(3, "test%3i : decompress of empty magic-less frame : ", testNb++);
ZSTD_DCtx_reset(dctx, ZSTD_reset_session_and_parameters);
CHECK( ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, ZSTD_f_zstd1_magicless) );
/* one shot */
{ size_t const result = ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (result != 0) goto _output_error;
DISPLAYLEVEL(3, "one-shot OK, ");
}
/* streaming */
{ ZSTD_inBuffer in = { compressedBuffer, cSize, 0 };
ZSTD_outBuffer out = { decodedBuffer, CNBuffSize, 0 };
size_t const result = ZSTD_decompressStream(dctx, &out, &in);
if (result != 0) goto _output_error;
if (in.pos != in.size) goto _output_error;
if (out.pos != 0) goto _output_error;
DISPLAYLEVEL(3, "streaming OK : regenerated %u bytes \n", (unsigned)out.pos);
}
ZSTD_freeCCtx(cctx);
ZSTD_freeDCtx(dctx);
}
@ -1830,13 +1932,12 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : Block compression test : ", testNb++);
CHECK( ZSTD_compressBegin(cctx, 5) );
CHECK( ZSTD_getBlockSize(cctx) >= blockSize);
cSize = ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), CNBuffer, blockSize);
if (ZSTD_isError(cSize)) goto _output_error;
CHECK_VAR(cSize, ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), CNBuffer, blockSize) );
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : Block decompression test : ", testNb++);
CHECK( ZSTD_decompressBegin(dctx) );
{ CHECK_V(r, ZSTD_decompressBlock(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize) );
{ CHECK_NEWV(r, ZSTD_decompressBlock(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize) );
if (r != blockSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
@ -1851,30 +1952,36 @@ static int basicUnitTests(U32 seed, double compressibility)
assert(blockCSize != 0);
if (ZSTD_isError(blockCSize)) goto _output_error;
compressed += blockCSize;
}
}
} }
DISPLAYLEVEL(3, "OK \n");
/* dictionary block compression */
DISPLAYLEVEL(3, "test%3i : Dictionary Block compression test : ", testNb++);
CHECK( ZSTD_compressBegin_usingDict(cctx, CNBuffer, dictSize, 5) );
cSize = ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), (char*)CNBuffer+dictSize, blockSize);
if (ZSTD_isError(cSize)) goto _output_error;
cSize2 = ZSTD_compressBlock(cctx, (char*)compressedBuffer+cSize, ZSTD_compressBound(blockSize), (char*)CNBuffer+dictSize+blockSize, blockSize);
if (ZSTD_isError(cSize2)) goto _output_error;
memcpy((char*)compressedBuffer+cSize, (char*)CNBuffer+dictSize+blockSize, blockSize); /* fake non-compressed block */
cSize2 = ZSTD_compressBlock(cctx, (char*)compressedBuffer+cSize+blockSize, ZSTD_compressBound(blockSize),
(char*)CNBuffer+dictSize+2*blockSize, blockSize);
if (ZSTD_isError(cSize2)) goto _output_error;
CHECK_VAR(cSize, ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), (char*)CNBuffer+dictSize, blockSize));
RDG_genBuffer((char*)CNBuffer+dictSize+blockSize, blockSize, 0.0, 0.0, seed); /* create a non-compressible second block */
{ CHECK_NEWV(r, ZSTD_compressBlock(cctx, (char*)compressedBuffer+cSize, ZSTD_compressBound(blockSize), (char*)CNBuffer+dictSize+blockSize, blockSize) ); /* for cctx history consistency */
assert(r == 0); /* non-compressible block */ }
memcpy((char*)compressedBuffer+cSize, (char*)CNBuffer+dictSize+blockSize, blockSize); /* send non-compressed block (without header) */
CHECK_VAR(cSize2, ZSTD_compressBlock(cctx, (char*)compressedBuffer+cSize+blockSize, ZSTD_compressBound(blockSize),
(char*)CNBuffer+dictSize+2*blockSize, blockSize));
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : Dictionary Block decompression test : ", testNb++);
CHECK( ZSTD_decompressBegin_usingDict(dctx, CNBuffer, dictSize) );
{ CHECK_V( r, ZSTD_decompressBlock(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize) );
if (r != blockSize) goto _output_error; }
{ CHECK_NEWV( r, ZSTD_decompressBlock(dctx, decodedBuffer, blockSize, compressedBuffer, cSize) );
if (r != blockSize) {
DISPLAYLEVEL(1, "ZSTD_decompressBlock() with _usingDict() fails : %u, instead of %u expected \n", (unsigned)r, (unsigned)blockSize);
goto _output_error;
} }
memcpy((char*)decodedBuffer+blockSize, (char*)compressedBuffer+cSize, blockSize);
ZSTD_insertBlock(dctx, (char*)decodedBuffer+blockSize, blockSize); /* insert non-compressed block into dctx history */
{ CHECK_V( r, ZSTD_decompressBlock(dctx, (char*)decodedBuffer+2*blockSize, CNBuffSize, (char*)compressedBuffer+cSize+blockSize, cSize2) );
if (r != blockSize) goto _output_error; }
{ CHECK_NEWV( r, ZSTD_decompressBlock(dctx, (char*)decodedBuffer+2*blockSize, blockSize, (char*)compressedBuffer+cSize+blockSize, cSize2) );
if (r != blockSize) {
DISPLAYLEVEL(1, "ZSTD_decompressBlock() with _usingDict() and after insertBlock() fails : %u, instead of %u expected \n", (unsigned)r, (unsigned)blockSize);
goto _output_error;
} }
assert(memcpy((char*)CNBuffer+dictSize, decodedBuffer, blockSize*3)); /* ensure regenerated content is identical to origin */
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : Block compression with CDict : ", testNb++);
@ -1900,21 +2007,59 @@ static int basicUnitTests(U32 seed, double compressibility)
sampleSize += 96 KB;
cSize = ZSTD_compress(compressedBuffer, ZSTD_compressBound(sampleSize), CNBuffer, sampleSize, 1);
if (ZSTD_isError(cSize)) goto _output_error;
{ CHECK_V(regenSize, ZSTD_decompress(decodedBuffer, sampleSize, compressedBuffer, cSize));
{ CHECK_NEWV(regenSize, ZSTD_decompress(decodedBuffer, sampleSize, compressedBuffer, cSize));
if (regenSize!=sampleSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
}
DISPLAYLEVEL(3, "test%3i : ZSTD_getSequences decode from sequences test : ", testNb++);
{
size_t srcSize = 100 KB;
BYTE* src = (BYTE*)CNBuffer;
BYTE* decoded = (BYTE*)compressedBuffer;
ZSTD_CCtx* cctx = ZSTD_createCCtx();
ZSTD_Sequence* seqs = (ZSTD_Sequence*)malloc(srcSize * sizeof(ZSTD_Sequence));
size_t seqsSize;
if (seqs == NULL) goto _output_error;
assert(cctx != NULL);
/* Populate src with random data */
RDG_genBuffer(CNBuffer, srcSize, compressibility, 0., seed);
/* get the sequences */
seqsSize = ZSTD_getSequences(cctx, seqs, srcSize, src, srcSize);
/* "decode" and compare the sequences */
FUZ_decodeSequences(decoded, seqs, seqsSize, src, srcSize);
assert(!memcmp(CNBuffer, compressedBuffer, srcSize));
ZSTD_freeCCtx(cctx);
free(seqs);
}
/* Multiple blocks of zeros test */
#define LONGZEROSLENGTH 1000000 /* 1MB of zeros */
DISPLAYLEVEL(3, "test%3i : compress %u zeroes : ", testNb++, LONGZEROSLENGTH);
memset(CNBuffer, 0, LONGZEROSLENGTH);
CHECK_VAR(cSize, ZSTD_compress(compressedBuffer, ZSTD_compressBound(LONGZEROSLENGTH), CNBuffer, LONGZEROSLENGTH, 1) );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/LONGZEROSLENGTH*100);
DISPLAYLEVEL(3, "test%3i : decompress %u zeroes : ", testNb++, LONGZEROSLENGTH);
{ CHECK_NEWV(r, ZSTD_decompress(decodedBuffer, LONGZEROSLENGTH, compressedBuffer, cSize) );
if (r != LONGZEROSLENGTH) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
/* All zeroes test (test bug #137) */
#define ZEROESLENGTH 100
DISPLAYLEVEL(3, "test%3i : compress %u zeroes : ", testNb++, ZEROESLENGTH);
memset(CNBuffer, 0, ZEROESLENGTH);
{ CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(ZEROESLENGTH), CNBuffer, ZEROESLENGTH, 1) );
cSize = r; }
CHECK_VAR(cSize, ZSTD_compress(compressedBuffer, ZSTD_compressBound(ZEROESLENGTH), CNBuffer, ZEROESLENGTH, 1) );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/ZEROESLENGTH*100);
DISPLAYLEVEL(3, "test%3i : decompress %u zeroes : ", testNb++, ZEROESLENGTH);
{ CHECK_V(r, ZSTD_decompress(decodedBuffer, ZEROESLENGTH, compressedBuffer, cSize) );
{ CHECK_NEWV(r, ZSTD_decompress(decodedBuffer, ZEROESLENGTH, compressedBuffer, cSize) );
if (r != ZEROESLENGTH) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
@ -1962,13 +2107,12 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : compress lots 3-bytes sequences : ", testNb++);
{ CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(_3BYTESTESTLENGTH),
CNBuffer, _3BYTESTESTLENGTH, 19) );
cSize = r; }
CHECK_VAR(cSize, ZSTD_compress(compressedBuffer, ZSTD_compressBound(_3BYTESTESTLENGTH),
CNBuffer, _3BYTESTESTLENGTH, 19) );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/_3BYTESTESTLENGTH*100);
DISPLAYLEVEL(3, "test%3i : decompress lots 3-bytes sequence : ", testNb++);
{ CHECK_V(r, ZSTD_decompress(decodedBuffer, _3BYTESTESTLENGTH, compressedBuffer, cSize) );
{ CHECK_NEWV(r, ZSTD_decompress(decodedBuffer, _3BYTESTESTLENGTH, compressedBuffer, cSize) );
if (r != _3BYTESTESTLENGTH) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
@ -2084,6 +2228,79 @@ static int basicUnitTests(U32 seed, double compressibility)
}
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : table cleanliness through index reduction : ", testNb++);
{
int cLevel;
size_t approxIndex = 0;
size_t maxIndex = ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)); /* ZSTD_CURRENT_MAX from zstd_compress_internal.h */
/* Provision enough space in a static context so that we can do all
* this without ever reallocating, which would reset the indices. */
size_t const staticCCtxSize = ZSTD_estimateCStreamSize(22);
void* const staticCCtxBuffer = malloc(staticCCtxSize);
ZSTD_CCtx* cctx = ZSTD_initStaticCCtx(staticCCtxBuffer, staticCCtxSize);
/* bump the indices so the following compressions happen at high
* indices. */
{
ZSTD_outBuffer out = { compressedBuffer, compressedBufferSize, 0 };
ZSTD_inBuffer in = { CNBuffer, CNBuffSize, 0 };
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters);
CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, -500));
while (approxIndex <= (maxIndex / 4) * 3) {
CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush));
approxIndex += in.pos;
CHECK(in.pos == in.size);
in.pos = 0;
out.pos = 0;
}
CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end));
}
/* spew a bunch of stuff into the table area */
for (cLevel = 1; cLevel <= 22; cLevel++) {
ZSTD_outBuffer out = { compressedBuffer, compressedBufferSize / cLevel, 0 };
ZSTD_inBuffer in = { CNBuffer, CNBuffSize, 0 };
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters);
CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, cLevel));
CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush));
CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end));
approxIndex += in.pos;
}
/* now crank the indices so we overflow */
{
ZSTD_outBuffer out = { compressedBuffer, compressedBufferSize, 0 };
ZSTD_inBuffer in = { CNBuffer, CNBuffSize, 0 };
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters);
CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, -500));
while (approxIndex <= maxIndex) {
CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush));
approxIndex += in.pos;
CHECK(in.pos == in.size);
in.pos = 0;
out.pos = 0;
}
CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end));
}
/* do a bunch of compressions again in low indices and ensure we don't
* hit untracked invalid indices */
for (cLevel = 1; cLevel <= 22; cLevel++) {
ZSTD_outBuffer out = { compressedBuffer, compressedBufferSize / cLevel, 0 };
ZSTD_inBuffer in = { CNBuffer, CNBuffSize, 0 };
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_and_parameters);
CHECK_Z(ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, cLevel));
CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_flush));
CHECK_Z(ZSTD_compressStream2(cctx, &out, &in, ZSTD_e_end));
approxIndex += in.pos;
}
ZSTD_freeCCtx(cctx);
free(staticCCtxBuffer);
}
DISPLAYLEVEL(3, "OK \n");
_end:
free(CNBuffer);
free(compressedBuffer);

Some files were not shown because too many files have changed in this diff Show More